VirtualBox

Changeset 44252 in vbox for trunk/src/VBox/Storage/VMDK.cpp


Ignore:
Timestamp:
Jan 8, 2013 1:23:54 PM (12 years ago)
Author:
vboxsync
svn:sync-xref-src-repo-rev:
83113
Message:

Storage/Backends: async/sync I/O unification, remove separate entries for sync and async I/O callbacks, remove unused code

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Storage/VMDK.cpp

    r44233 r44252  
    233233    /** File open flags for consistency checking. */
    234234    unsigned         fOpen;
    235     /** Flag whether this file has been opened for async I/O. */
    236     bool             fAsyncIO;
    237235    /** Handle for sync/async file abstraction.*/
    238236    PVDIOSTORAGE     pStorage;
     
    530528
    531529static int vmdkCreateExtents(PVMDKIMAGE pImage, unsigned cExtents);
    532 static int vmdkFlushImage(PVMDKIMAGE pImage);
     530static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx);
    533531static int vmdkSetImageComment(PVMDKIMAGE pImage, const char *pszComment);
    534532static int vmdkFreeImage(PVMDKIMAGE pImage, bool fDelete);
    535533
    536 static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq);
     534static int vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx,
     535                                  void *pvUser, int rcReq);
    537536
    538537/**
     
    541540 */
    542541static int vmdkFileOpen(PVMDKIMAGE pImage, PVMDKFILE *ppVmdkFile,
    543                         const char *pszFilename, uint32_t fOpen, bool fAsyncIO)
     542                        const char *pszFilename, uint32_t fOpen)
    544543{
    545544    int rc = VINF_SUCCESS;
     
    577576    }
    578577    pVmdkFile->fOpen = fOpen;
    579     pVmdkFile->fAsyncIO = fAsyncIO;
    580578
    581579    rc = vdIfIoIntFileOpen(pImage->pIfIo, pszFilename, fOpen,
     
    680678                                    uint64_t *puLBA, uint32_t *pcbMarkerData)
    681679{
    682     if (pExtent->pFile->fAsyncIO)
    683     {
    684         AssertMsgFailed(("TODO\n"));
    685         return VERR_NOT_SUPPORTED;
    686     }
    687     else
    688     {
    689         int rc;
    690         PRTZIPDECOMP pZip = NULL;
    691         VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
    692         size_t cbCompSize, cbActuallyRead;
    693 
    694         if (!pcvMarker)
    695         {
    696             rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
    697                                        uOffset, pMarker, RT_OFFSETOF(VMDKMARKER, uType));
    698             if (RT_FAILURE(rc))
    699                 return rc;
    700         }
    701         else
    702         {
    703             memcpy(pMarker, pcvMarker, RT_OFFSETOF(VMDKMARKER, uType));
    704             /* pcvMarker endianness has already been partially transformed, fix it */
    705             pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
    706             pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
    707         }
    708 
    709         cbCompSize = RT_LE2H_U32(pMarker->cbSize);
    710         if (cbCompSize == 0)
    711         {
    712             AssertMsgFailed(("VMDK: corrupted marker\n"));
    713             return VERR_VD_VMDK_INVALID_FORMAT;
    714         }
    715 
    716         /* Sanity check - the expansion ratio should be much less than 2. */
    717         Assert(cbCompSize < 2 * cbToRead);
    718         if (cbCompSize >= 2 * cbToRead)
    719             return VERR_VD_VMDK_INVALID_FORMAT;
    720 
    721         /* Compressed grain marker. Data follows immediately. */
     680    int rc;
     681    PRTZIPDECOMP pZip = NULL;
     682    VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
     683    size_t cbCompSize, cbActuallyRead;
     684
     685    if (!pcvMarker)
     686    {
    722687        rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
    723                                    uOffset + RT_OFFSETOF(VMDKMARKER, uType),
    724                                     (uint8_t *)pExtent->pvCompGrain
    725                                   + RT_OFFSETOF(VMDKMARKER, uType),
    726                                    RT_ALIGN_Z(  cbCompSize
    727                                               + RT_OFFSETOF(VMDKMARKER, uType),
    728                                               512)
    729                                    - RT_OFFSETOF(VMDKMARKER, uType));
    730 
    731         if (puLBA)
    732             *puLBA = RT_LE2H_U64(pMarker->uSector);
    733         if (pcbMarkerData)
    734             *pcbMarkerData = RT_ALIGN(  cbCompSize
    735                                       + RT_OFFSETOF(VMDKMARKER, uType),
    736                                       512);
    737 
    738         VMDKCOMPRESSIO InflateState;
    739         InflateState.pImage = pImage;
    740         InflateState.iOffset = -1;
    741         InflateState.cbCompGrain = cbCompSize + RT_OFFSETOF(VMDKMARKER, uType);
    742         InflateState.pvCompGrain = pExtent->pvCompGrain;
    743 
    744         rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
     688                                   uOffset, pMarker, RT_OFFSETOF(VMDKMARKER, uType));
    745689        if (RT_FAILURE(rc))
    746690            return rc;
    747         rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
    748         RTZipDecompDestroy(pZip);
    749         if (RT_FAILURE(rc))
    750         {
    751             if (rc == VERR_ZIP_CORRUPTED)
    752                 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
    753             return rc;
    754         }
    755         if (cbActuallyRead != cbToRead)
    756             rc = VERR_VD_VMDK_INVALID_FORMAT;
     691    }
     692    else
     693    {
     694        memcpy(pMarker, pcvMarker, RT_OFFSETOF(VMDKMARKER, uType));
     695        /* pcvMarker endianness has already been partially transformed, fix it */
     696        pMarker->uSector = RT_H2LE_U64(pMarker->uSector);
     697        pMarker->cbSize = RT_H2LE_U32(pMarker->cbSize);
     698    }
     699
     700    cbCompSize = RT_LE2H_U32(pMarker->cbSize);
     701    if (cbCompSize == 0)
     702    {
     703        AssertMsgFailed(("VMDK: corrupted marker\n"));
     704        return VERR_VD_VMDK_INVALID_FORMAT;
     705    }
     706
     707    /* Sanity check - the expansion ratio should be much less than 2. */
     708    Assert(cbCompSize < 2 * cbToRead);
     709    if (cbCompSize >= 2 * cbToRead)
     710        return VERR_VD_VMDK_INVALID_FORMAT;
     711
     712    /* Compressed grain marker. Data follows immediately. */
     713    rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
     714                               uOffset + RT_OFFSETOF(VMDKMARKER, uType),
     715                                (uint8_t *)pExtent->pvCompGrain
     716                              + RT_OFFSETOF(VMDKMARKER, uType),
     717                               RT_ALIGN_Z(  cbCompSize
     718                                          + RT_OFFSETOF(VMDKMARKER, uType),
     719                                          512)
     720                               - RT_OFFSETOF(VMDKMARKER, uType));
     721
     722    if (puLBA)
     723        *puLBA = RT_LE2H_U64(pMarker->uSector);
     724    if (pcbMarkerData)
     725        *pcbMarkerData = RT_ALIGN(  cbCompSize
     726                                  + RT_OFFSETOF(VMDKMARKER, uType),
     727                                  512);
     728
     729    VMDKCOMPRESSIO InflateState;
     730    InflateState.pImage = pImage;
     731    InflateState.iOffset = -1;
     732    InflateState.cbCompGrain = cbCompSize + RT_OFFSETOF(VMDKMARKER, uType);
     733    InflateState.pvCompGrain = pExtent->pvCompGrain;
     734
     735    rc = RTZipDecompCreate(&pZip, &InflateState, vmdkFileInflateHelper);
     736    if (RT_FAILURE(rc))
    757737        return rc;
    758     }
     738    rc = RTZipDecompress(pZip, pvBuf, cbToRead, &cbActuallyRead);
     739    RTZipDecompDestroy(pZip);
     740    if (RT_FAILURE(rc))
     741    {
     742        if (rc == VERR_ZIP_CORRUPTED)
     743            rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: Compressed image is corrupted '%s'"), pExtent->pszFullname);
     744        return rc;
     745    }
     746    if (cbActuallyRead != cbToRead)
     747        rc = VERR_VD_VMDK_INVALID_FORMAT;
     748    return rc;
    759749}
    760750
     
    789779                                    uint32_t *pcbMarkerData)
    790780{
    791     if (pExtent->pFile->fAsyncIO)
    792     {
    793         AssertMsgFailed(("TODO\n"));
    794         return VERR_NOT_SUPPORTED;
    795     }
    796     else
    797     {
    798         int rc;
    799         PRTZIPCOMP pZip = NULL;
    800         VMDKCOMPRESSIO DeflateState;
    801 
    802         DeflateState.pImage = pImage;
    803         DeflateState.iOffset = -1;
    804         DeflateState.cbCompGrain = pExtent->cbCompGrain;
    805         DeflateState.pvCompGrain = pExtent->pvCompGrain;
    806 
    807         rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
    808                              RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
     781    int rc;
     782    PRTZIPCOMP pZip = NULL;
     783    VMDKCOMPRESSIO DeflateState;
     784
     785    DeflateState.pImage = pImage;
     786    DeflateState.iOffset = -1;
     787    DeflateState.cbCompGrain = pExtent->cbCompGrain;
     788    DeflateState.pvCompGrain = pExtent->pvCompGrain;
     789
     790    rc = RTZipCompCreate(&pZip, &DeflateState, vmdkFileDeflateHelper,
     791                         RTZIPTYPE_ZLIB, RTZIPLEVEL_DEFAULT);
     792    if (RT_FAILURE(rc))
     793        return rc;
     794    rc = RTZipCompress(pZip, pvBuf, cbToWrite);
     795    if (RT_SUCCESS(rc))
     796        rc = RTZipCompFinish(pZip);
     797    RTZipCompDestroy(pZip);
     798    if (RT_SUCCESS(rc))
     799    {
     800        Assert(   DeflateState.iOffset > 0
     801               && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
     802
     803        /* pad with zeroes to get to a full sector size */
     804        uint32_t uSize = DeflateState.iOffset;
     805        if (uSize % 512)
     806        {
     807            uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
     808            memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
     809                   uSizeAlign - uSize);
     810            uSize = uSizeAlign;
     811        }
     812
     813        if (pcbMarkerData)
     814            *pcbMarkerData = uSize;
     815
     816        /* Compressed grain marker. Data follows immediately. */
     817        VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
     818        pMarker->uSector = RT_H2LE_U64(uLBA);
     819        pMarker->cbSize = RT_H2LE_U32(  DeflateState.iOffset
     820                                      - RT_OFFSETOF(VMDKMARKER, uType));
     821        rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
     822                                    uOffset, pMarker, uSize);
    809823        if (RT_FAILURE(rc))
    810824            return rc;
    811         rc = RTZipCompress(pZip, pvBuf, cbToWrite);
    812         if (RT_SUCCESS(rc))
    813             rc = RTZipCompFinish(pZip);
    814         RTZipCompDestroy(pZip);
    815         if (RT_SUCCESS(rc))
    816         {
    817             Assert(   DeflateState.iOffset > 0
    818                    && (size_t)DeflateState.iOffset <= DeflateState.cbCompGrain);
    819 
    820             /* pad with zeroes to get to a full sector size */
    821             uint32_t uSize = DeflateState.iOffset;
    822             if (uSize % 512)
    823             {
    824                 uint32_t uSizeAlign = RT_ALIGN(uSize, 512);
    825                 memset((uint8_t *)pExtent->pvCompGrain + uSize, '\0',
    826                        uSizeAlign - uSize);
    827                 uSize = uSizeAlign;
    828             }
    829 
    830             if (pcbMarkerData)
    831                 *pcbMarkerData = uSize;
    832 
    833             /* Compressed grain marker. Data follows immediately. */
    834             VMDKMARKER *pMarker = (VMDKMARKER *)pExtent->pvCompGrain;
    835             pMarker->uSector = RT_H2LE_U64(uLBA);
    836             pMarker->cbSize = RT_H2LE_U32(  DeflateState.iOffset
    837                                           - RT_OFFSETOF(VMDKMARKER, uType));
    838             rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    839                                         uOffset, pMarker, uSize);
    840             if (RT_FAILURE(rc))
    841                 return rc;
    842         }
    843         return rc;
    844     }
     825    }
     826    return rc;
    845827}
    846828
     
    24302412 * Internal: write/update the descriptor part of the image.
    24312413 */
    2432 static int vmdkWriteDescriptor(PVMDKIMAGE pImage)
     2414static int vmdkWriteDescriptor(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
    24332415{
    24342416    int rc = VINF_SUCCESS;
     
    24362418    uint64_t uOffset;
    24372419    PVMDKFILE pDescFile;
    2438     void *pvDescriptor;
    2439     size_t cbDescriptor;
    2440 
    2441     if (pImage->pDescData)
    2442     {
    2443         /* Separate descriptor file. */
    2444         uOffset = 0;
    2445         cbLimit = 0;
    2446         pDescFile = pImage->pFile;
    2447     }
    2448     else
    2449     {
    2450         /* Embedded descriptor file. */
    2451         uOffset = VMDK_SECTOR2BYTE(pImage->pExtents[0].uDescriptorSector);
    2452         cbLimit = VMDK_SECTOR2BYTE(pImage->pExtents[0].cDescriptorSectors);
    2453         pDescFile = pImage->pExtents[0].pFile;
    2454     }
    2455     /* Bail out if there is no file to write to. */
    2456     if (pDescFile == NULL)
    2457         return VERR_INVALID_PARAMETER;
    2458 
    2459     rc = vmdkDescriptorPrepare(pImage, cbLimit, &pvDescriptor, &cbDescriptor);
    2460     if (RT_SUCCESS(rc))
    2461     {
    2462         rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pDescFile->pStorage, uOffset,
    2463                                     pvDescriptor, cbLimit ? cbLimit : cbDescriptor);
    2464         if (RT_FAILURE(rc))
    2465             rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing descriptor in '%s'"), pImage->pszFilename);
    2466 
    2467         if (RT_SUCCESS(rc) && !cbLimit)
    2468         {
    2469             rc = vdIfIoIntFileSetSize(pImage->pIfIo, pDescFile->pStorage, cbDescriptor);
    2470             if (RT_FAILURE(rc))
    2471                 rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error truncating descriptor in '%s'"), pImage->pszFilename);
    2472         }
    2473 
    2474         if (RT_SUCCESS(rc))
    2475             pImage->Descriptor.fDirty = false;
    2476 
    2477         RTMemFree(pvDescriptor);
    2478     }
    2479 
    2480     return rc;
    2481 }
    2482 
    2483 /**
    2484  * Internal: write/update the descriptor part of the image - async version.
    2485  */
    2486 static int vmdkWriteDescriptorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
    2487 {
    2488     int rc = VINF_SUCCESS;
    2489     uint64_t cbLimit;
    2490     uint64_t uOffset;
    2491     PVMDKFILE pDescFile;
    2492     void *pvDescriptor;
     2420    void *pvDescriptor = NULL;
    24932421    size_t cbDescriptor;
    24942422
     
    25332461        pImage->Descriptor.fDirty = false;
    25342462
    2535     RTMemFree(pvDescriptor);
     2463    if (pvDescriptor)
     2464        RTMemFree(pvDescriptor);
    25362465    return rc;
    25372466
     
    27812710 */
    27822711static int vmdkWriteMetaSparseExtent(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    2783                                      uint64_t uOffset)
     2712                                     uint64_t uOffset, PVDIOCTX pIoCtx)
    27842713{
    27852714    SparseExtentHeader Header;
     
    28322761    Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
    28332762
    2834     int rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    2835                                     uOffset, &Header, sizeof(Header));
    2836     AssertRC(rc);
    2837     if (RT_FAILURE(rc))
    2838         rc = vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: error writing extent header in '%s'"), pExtent->pszFullname);
    2839     return rc;
    2840 }
    2841 
    2842 /**
    2843  * Internal: write/update the metadata for a sparse extent - async version.
    2844  */
    2845 static int vmdkWriteMetaSparseExtentAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    2846                                           uint64_t uOffset, PVDIOCTX pIoCtx)
    2847 {
    2848     SparseExtentHeader Header;
    2849 
    2850     memset(&Header, '\0', sizeof(Header));
    2851     Header.magicNumber = RT_H2LE_U32(VMDK_SPARSE_MAGICNUMBER);
    2852     Header.version = RT_H2LE_U32(pExtent->uVersion);
    2853     Header.flags = RT_H2LE_U32(RT_BIT(0));
    2854     if (pExtent->pRGD)
    2855         Header.flags |= RT_H2LE_U32(RT_BIT(1));
    2856     if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
    2857         Header.flags |= RT_H2LE_U32(RT_BIT(16) | RT_BIT(17));
    2858     Header.capacity = RT_H2LE_U64(pExtent->cSectors);
    2859     Header.grainSize = RT_H2LE_U64(pExtent->cSectorsPerGrain);
    2860     Header.descriptorOffset = RT_H2LE_U64(pExtent->uDescriptorSector);
    2861     Header.descriptorSize = RT_H2LE_U64(pExtent->cDescriptorSectors);
    2862     Header.numGTEsPerGT = RT_H2LE_U32(pExtent->cGTEntries);
    2863     if (pExtent->fFooter && uOffset == 0)
    2864     {
    2865         if (pExtent->pRGD)
    2866         {
    2867             Assert(pExtent->uSectorRGD);
    2868             Header.rgdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
    2869             Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
    2870         }
    2871         else
    2872         {
    2873             Header.gdOffset = RT_H2LE_U64(VMDK_GD_AT_END);
    2874         }
    2875     }
    2876     else
    2877     {
    2878         if (pExtent->pRGD)
    2879         {
    2880             Assert(pExtent->uSectorRGD);
    2881             Header.rgdOffset = RT_H2LE_U64(pExtent->uSectorRGD);
    2882             Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
    2883         }
    2884         else
    2885         {
    2886             Header.gdOffset = RT_H2LE_U64(pExtent->uSectorGD);
    2887         }
    2888     }
    2889     Header.overHead = RT_H2LE_U64(pExtent->cOverheadSectors);
    2890     Header.uncleanShutdown = pExtent->fUncleanShutdown;
    2891     Header.singleEndLineChar = '\n';
    2892     Header.nonEndLineChar = ' ';
    2893     Header.doubleEndLineChar1 = '\r';
    2894     Header.doubleEndLineChar2 = '\n';
    2895     Header.compressAlgorithm = RT_H2LE_U16(pExtent->uCompression);
    2896 
    28972763    int rc = vdIfIoIntFileWriteMeta(pImage->pIfIo, pExtent->pFile->pStorage,
    28982764                                    uOffset, &Header, sizeof(Header),
     
    31162982
    31172983    rc = vmdkFileOpen(pImage, &pFile, pImage->pszFilename,
    3118                       VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */),
    3119                       false /* fAsyncIO */);
     2984                      VDOpenFlagsToFileOpenFlags(uOpenFlags, false /* fCreate */));
    31202985    if (RT_FAILURE(rc))
    31212986    {
     
    33573222                    rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
    33583223                                      VDOpenFlagsToFileOpenFlags(uOpenFlags,
    3359                                                                  false /* fCreate */),
    3360                                       false /* fAsyncIO */);
     3224                                                                 false /* fCreate */));
    33613225                    if (RT_FAILURE(rc))
    33623226                    {
     
    33853249                    rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
    33863250                                      VDOpenFlagsToFileOpenFlags(uOpenFlags,
    3387                                                                  false /* fCreate */),
    3388                                       true /* fAsyncIO */);
     3251                                                                 false /* fCreate */));
    33893252                    if (RT_FAILURE(rc))
    33903253                    {
     
    34233286
    34243287    /* Update the image metadata now in case has changed. */
    3425     rc = vmdkFlushImage(pImage);
     3288    rc = vmdkFlushImage(pImage, NULL);
    34263289    if (RT_FAILURE(rc))
    34273290        goto out;
     
    34883351        rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
    34893352                          VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
    3490                                                      true /* fCreate */),
    3491                           false /* fAsyncIO */);
     3353                                                     true /* fCreate */));
    34923354        if (RT_FAILURE(rc))
    34933355            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
     
    35133375        rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
    35143376                          VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
    3515                                                      false /* fCreate */),
    3516                           false /* fAsyncIO */);
     3377                                                     false /* fCreate */));
    35173378        if (RT_FAILURE(rc))
    35183379            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw disk file '%s'"), pExtent->pszFullname);
     
    35513412        rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
    35523413                          VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
    3553                                                      true /* fCreate */),
    3554                           false /* fAsyncIO */);
     3414                                                     true /* fCreate */));
    35553415        if (RT_FAILURE(rc))
    35563416            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pImage->pszFilename);
     
    36233483                rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
    36243484                                  VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
    3625                                                              true /* fCreate */),
    3626                                   false /* fAsyncIO */);
     3485                                                             true /* fCreate */));
    36273486                if (RT_FAILURE(rc))
    36283487                    return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new partition data file '%s'"), pExtent->pszFullname);
     
    36593518                    rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
    36603519                                      VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags & ~VD_OPEN_FLAGS_READONLY,
    3661                                                                  false /* fCreate */),
    3662                                       false /* fAsyncIO */);
     3520                                                                 false /* fCreate */));
    36633521                    if (RT_FAILURE(rc))
    36643522                        return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not open raw partition file '%s'"), pExtent->pszFullname);
     
    37333591        rc = vmdkFileOpen(pImage, &pImage->pFile, pImage->pszFilename,
    37343592                          VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
    3735                                                      true /* fCreate */),
    3736                           false /* fAsyncIO */);
     3593                                                     true /* fCreate */));
    37373594        if (RT_FAILURE(rc))
    37383595            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new sparse descriptor file '%s'"), pImage->pszFilename);
     
    38043661        rc = vmdkFileOpen(pImage, &pExtent->pFile, pExtent->pszFullname,
    38053662                          VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
    3806                                                      true /* fCreate */),
    3807                           false /* fAsyncIO */);
     3663                                                     true /* fCreate */));
    38083664        if (RT_FAILURE(rc))
    38093665            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
     
    40033859                        VDOpenFlagsToFileOpenFlags(pImage->uOpenFlags,
    40043860                                                   true /* fCreate */)
    4005                       & ~RTFILE_O_READ,
    4006                       false /* fAsyncIO */);
     3861                      & ~RTFILE_O_READ);
    40073862    if (RT_FAILURE(rc))
    40083863        return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: could not create new file '%s'"), pExtent->pszFullname);
     
    42094064        pImage->pExtents[0].cDescriptorSectors = VMDK_BYTE2SECTOR(RT_ALIGN_64(  pImage->Descriptor.aLines[pImage->Descriptor.cLines]
    42104065                                                                              - pImage->Descriptor.aLines[0], 512));
    4211         rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0);
     4066        rc = vmdkWriteMetaSparseExtent(pImage, &pImage->pExtents[0], 0, NULL);
    42124067        if (RT_FAILURE(rc))
    42134068        {
     
    42164071        }
    42174072
    4218         rc = vmdkWriteDescriptor(pImage);
     4073        rc = vmdkWriteDescriptor(pImage, NULL);
    42194074        if (RT_FAILURE(rc))
    42204075        {
     
    42244079    }
    42254080    else
    4226         rc = vmdkFlushImage(pImage);
     4081        rc = vmdkFlushImage(pImage, NULL);
    42274082
    42284083out:
     
    44494304
    44504305                uFileOffset += 512;
    4451                 rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset);
     4306                rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset, NULL);
    44524307                AssertRC(rc);
    44534308
     
    44614316        }
    44624317        else
    4463             vmdkFlushImage(pImage);
     4318            vmdkFlushImage(pImage, NULL);
    44644319
    44654320        if (pImage->pExtents != NULL)
     
    44944349 * Internal. Flush image data (and metadata) to disk.
    44954350 */
    4496 static int vmdkFlushImage(PVMDKIMAGE pImage)
     4351static int vmdkFlushImage(PVMDKIMAGE pImage, PVDIOCTX pIoCtx)
    44974352{
    44984353    PVMDKEXTENT pExtent;
     
    45024357    if (pImage->Descriptor.fDirty)
    45034358    {
    4504         rc = vmdkWriteDescriptor(pImage);
     4359        rc = vmdkWriteDescriptor(pImage, pIoCtx);
    45054360        if (RT_FAILURE(rc))
    45064361            goto out;
     
    45174372                    if (!pExtent->fFooter)
    45184373                    {
    4519                         rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0);
     4374                        rc = vmdkWriteMetaSparseExtent(pImage, pExtent, 0, pIoCtx);
    45204375                        if (RT_FAILURE(rc))
    45214376                            goto out;
     
    45304385                        uFileOffset = RT_ALIGN_64(uFileOffset, 512);
    45314386                        rc = vmdkWriteMetaSparseExtent(pImage, pExtent,
    4532                                                        uFileOffset);
     4387                                                       uFileOffset, pIoCtx);
    45334388                        if (RT_FAILURE(rc))
    45344389                            goto out;
     
    45634418                    && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
    45644419                    && !(pExtent->pszBasename[0] == RTPATH_SLASH))
    4565                     rc = vdIfIoIntFileFlushSync(pImage->pIfIo, pExtent->pFile->pStorage);
     4420                    rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage, pIoCtx,
     4421                                            NULL, NULL);
    45664422                break;
    45674423            case VMDKETYPE_ZERO:
     
    46214477 * number in the extent.
    46224478 */
    4623 static int vmdkGetSector(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    4624                          uint64_t uSector, uint64_t *puExtentSector)
     4479static int vmdkGetSector(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
     4480                         PVMDKEXTENT pExtent, uint64_t uSector,
     4481                         uint64_t *puExtentSector)
    46254482{
    46264483    PVMDKGTCACHE pCache = pImage->pGTCache;
     
    46424499        return VINF_SUCCESS;
    46434500    }
    4644 
    4645     uGDIndex = uSector / pExtent->cSectorsPerGDE;
    4646     if (uGDIndex >= pExtent->cGDEntries)
    4647         return VERR_OUT_OF_RANGE;
    4648     uGTSector = pExtent->pGD[uGDIndex];
    4649     if (!uGTSector)
    4650     {
    4651         /* There is no grain table referenced by this grain directory
    4652          * entry. So there is absolutely no data in this area. */
    4653         *puExtentSector = 0;
    4654         return VINF_SUCCESS;
    4655     }
    4656 
    4657     uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
    4658     uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
    4659     pGTCacheEntry = &pCache->aGTCache[uGTHash];
    4660     if (    pGTCacheEntry->uExtent != pExtent->uExtent
    4661         ||  pGTCacheEntry->uGTBlock != uGTBlock)
    4662     {
    4663         /* Cache miss, fetch data from disk. */
    4664         rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4665                                    VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    4666                                    aGTDataTmp, sizeof(aGTDataTmp));
    4667         if (RT_FAILURE(rc))
    4668             return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read grain table entry in '%s'"), pExtent->pszFullname);
    4669         pGTCacheEntry->uExtent = pExtent->uExtent;
    4670         pGTCacheEntry->uGTBlock = uGTBlock;
    4671         for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
    4672             pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
    4673     }
    4674     uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
    4675     uint32_t uGrainSector = pGTCacheEntry->aGTData[uGTBlockIndex];
    4676     if (uGrainSector)
    4677         *puExtentSector = uGrainSector + uSector % pExtent->cSectorsPerGrain;
    4678     else
    4679         *puExtentSector = 0;
    4680     return VINF_SUCCESS;
    4681 }
    4682 
    4683 /**
    4684  * Internal. Get sector number in the extent file from the relative sector
    4685  * number in the extent - version for async access.
    4686  */
    4687 static int vmdkGetSectorAsync(PVMDKIMAGE pImage, PVDIOCTX pIoCtx,
    4688                               PVMDKEXTENT pExtent, uint64_t uSector,
    4689                               uint64_t *puExtentSector)
    4690 {
    4691     PVMDKGTCACHE pCache = pImage->pGTCache;
    4692     uint64_t uGDIndex, uGTSector, uGTBlock;
    4693     uint32_t uGTHash, uGTBlockIndex;
    4694     PVMDKGTCACHEENTRY pGTCacheEntry;
    4695     uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
    4696     int rc;
    46974501
    46984502    uGDIndex = uSector / pExtent->cSectorsPerGDE;
     
    47384542
    47394543/**
    4740  * Internal. Allocates a new grain table (if necessary), writes the grain
    4741  * and updates the grain table. The cache is also updated by this operation.
    4742  * This is separate from vmdkGetSector, because that should be as fast as
    4743  * possible. Most code from vmdkGetSector also appears here.
    4744  */
    4745 static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    4746                           uint64_t uSector, const void *pvBuf,
    4747                           uint64_t cbWrite)
    4748 {
    4749     PVMDKGTCACHE pCache = pImage->pGTCache;
    4750     uint64_t uGDIndex, uGTSector, uRGTSector, uGTBlock;
    4751     uint64_t uFileOffset;
    4752     uint32_t uGTHash, uGTBlockIndex;
    4753     PVMDKGTCACHEENTRY pGTCacheEntry;
    4754     uint32_t aGTDataTmp[VMDK_GT_CACHELINE_SIZE];
    4755     int rc;
    4756 
    4757     uGDIndex = uSector / pExtent->cSectorsPerGDE;
    4758     if (uGDIndex >= pExtent->cGDEntries)
    4759         return VERR_OUT_OF_RANGE;
    4760     uGTSector = pExtent->pGD[uGDIndex];
    4761     if (pExtent->pRGD)
    4762         uRGTSector = pExtent->pRGD[uGDIndex];
    4763     else
    4764         uRGTSector = 0; /**< avoid compiler warning */
    4765     if (!uGTSector)
    4766     {
    4767         /* There is no grain table referenced by this grain directory
    4768          * entry. So there is absolutely no data in this area. Allocate
    4769          * a new grain table and put the reference to it in the GDs. */
    4770         uFileOffset = pExtent->uAppendPosition;
    4771         if (!uFileOffset)
    4772             return VERR_INTERNAL_ERROR;
    4773         Assert(!(uFileOffset % 512));
    4774         uFileOffset = RT_ALIGN_64(uFileOffset, 512);
    4775         uGTSector = VMDK_BYTE2SECTOR(uFileOffset);
    4776 
    4777         pExtent->uAppendPosition += pExtent->cGTEntries * sizeof(uint32_t);
    4778 
    4779         /* Normally the grain table is preallocated for hosted sparse extents
    4780          * that support more than 32 bit sector numbers. So this shouldn't
    4781          * ever happen on a valid extent. */
    4782         if (uGTSector > UINT32_MAX)
    4783             return VERR_VD_VMDK_INVALID_HEADER;
    4784 
    4785         /* Write grain table by writing the required number of grain table
    4786          * cache chunks. Avoids dynamic memory allocation, but is a bit
    4787          * slower. But as this is a pretty infrequently occurring case it
    4788          * should be acceptable. */
    4789         memset(aGTDataTmp, '\0', sizeof(aGTDataTmp));
    4790         for (unsigned i = 0;
    4791              i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
    4792              i++)
    4793         {
    4794             rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4795                                         VMDK_SECTOR2BYTE(uGTSector) + i * sizeof(aGTDataTmp),
    4796                                         aGTDataTmp, sizeof(aGTDataTmp));
    4797             if (RT_FAILURE(rc))
    4798                 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain table allocation in '%s'"), pExtent->pszFullname);
    4799         }
    4800         pExtent->uAppendPosition = RT_ALIGN_64(  pExtent->uAppendPosition
    4801                                                + pExtent->cGTEntries * sizeof(uint32_t),
    4802                                                512);
    4803 
    4804         if (pExtent->pRGD)
    4805         {
    4806             AssertReturn(!uRGTSector, VERR_VD_VMDK_INVALID_HEADER);
    4807             uFileOffset = pExtent->uAppendPosition;
    4808             if (!uFileOffset)
    4809                 return VERR_INTERNAL_ERROR;
    4810             Assert(!(uFileOffset % 512));
    4811             uRGTSector = VMDK_BYTE2SECTOR(uFileOffset);
    4812 
    4813             pExtent->uAppendPosition += pExtent->cGTEntries * sizeof(uint32_t);
    4814 
    4815             /* Normally the redundant grain table is preallocated for hosted
    4816              * sparse extents that support more than 32 bit sector numbers. So
    4817              * this shouldn't ever happen on a valid extent. */
    4818             if (uRGTSector > UINT32_MAX)
    4819                 return VERR_VD_VMDK_INVALID_HEADER;
    4820 
    4821             /* Write backup grain table by writing the required number of grain
    4822              * table cache chunks. Avoids dynamic memory allocation, but is a
    4823              * bit slower. But as this is a pretty infrequently occurring case
    4824              * it should be acceptable. */
    4825             for (unsigned i = 0;
    4826                  i < pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE;
    4827                  i++)
    4828             {
    4829                 rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4830                                             VMDK_SECTOR2BYTE(uRGTSector) + i * sizeof(aGTDataTmp),
    4831                                             aGTDataTmp, sizeof(aGTDataTmp));
    4832                 if (RT_FAILURE(rc))
    4833                     return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain table allocation in '%s'"), pExtent->pszFullname);
    4834             }
    4835 
    4836             pExtent->uAppendPosition =   pExtent->uAppendPosition
    4837                                        + pExtent->cGTEntries * sizeof(uint32_t);
    4838         }
    4839 
    4840         /* Update the grain directory on disk (doing it before writing the
    4841          * grain table will result in a garbled extent if the operation is
    4842          * aborted for some reason. Otherwise the worst that can happen is
    4843          * some unused sectors in the extent. */
    4844         uint32_t uGTSectorLE = RT_H2LE_U64(uGTSector);
    4845         rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4846                                     VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
    4847                                     &uGTSectorLE, sizeof(uGTSectorLE));
    4848         if (RT_FAILURE(rc))
    4849             return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write grain directory entry in '%s'"), pExtent->pszFullname);
    4850         if (pExtent->pRGD)
    4851         {
    4852             uint32_t uRGTSectorLE = RT_H2LE_U64(uRGTSector);
    4853             rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4854                                         VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uRGTSectorLE),
    4855                                         &uRGTSectorLE, sizeof(uRGTSectorLE));
    4856             if (RT_FAILURE(rc))
    4857                 return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write backup grain directory entry in '%s'"), pExtent->pszFullname);
    4858         }
    4859 
    4860         /* As the final step update the in-memory copy of the GDs. */
    4861         pExtent->pGD[uGDIndex] = uGTSector;
    4862         if (pExtent->pRGD)
    4863             pExtent->pRGD[uGDIndex] = uRGTSector;
    4864     }
    4865 
    4866     uFileOffset = pExtent->uAppendPosition;
    4867     if (!uFileOffset)
    4868         return VERR_INTERNAL_ERROR;
    4869     Assert(!(uFileOffset % 512));
    4870 
    4871     /* Write the data. Always a full grain, or we're in big trouble. */
    4872     if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
    4873     {
    4874         if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
    4875             return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
    4876 
    4877         /* Invalidate cache, just in case some code incorrectly allows mixing
    4878          * of reads and writes. Normally shouldn't be needed. */
    4879         pExtent->uGrainSectorAbs = 0;
    4880 
    4881         /* Write compressed data block and the markers. */
    4882         uint32_t cbGrain = 0;
    4883         rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
    4884                                  pvBuf, cbWrite, uSector, &cbGrain);
    4885         if (RT_FAILURE(rc))
    4886         {
    4887             AssertRC(rc);
    4888             return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
    4889         }
    4890         pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
    4891         pExtent->uAppendPosition += cbGrain;
    4892     }
    4893     else
    4894     {
    4895         rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4896                                     uFileOffset, pvBuf, cbWrite);
    4897         if (RT_FAILURE(rc))
    4898             return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
    4899         pExtent->uAppendPosition += cbWrite;
    4900     }
    4901 
    4902     /* Update the grain table (and the cache). */
    4903     uGTBlock = uSector / (pExtent->cSectorsPerGrain * VMDK_GT_CACHELINE_SIZE);
    4904     uGTHash = vmdkGTCacheHash(pCache, uGTBlock, pExtent->uExtent);
    4905     pGTCacheEntry = &pCache->aGTCache[uGTHash];
    4906     if (    pGTCacheEntry->uExtent != pExtent->uExtent
    4907         ||  pGTCacheEntry->uGTBlock != uGTBlock)
    4908     {
    4909         /* Cache miss, fetch data from disk. */
    4910         rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4911                                    VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    4912                                    aGTDataTmp, sizeof(aGTDataTmp));
    4913         if (RT_FAILURE(rc))
    4914             return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot read allocated grain table entry in '%s'"), pExtent->pszFullname);
    4915         pGTCacheEntry->uExtent = pExtent->uExtent;
    4916         pGTCacheEntry->uGTBlock = uGTBlock;
    4917         for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
    4918             pGTCacheEntry->aGTData[i] = RT_LE2H_U32(aGTDataTmp[i]);
    4919     }
    4920     else
    4921     {
    4922         /* Cache hit. Convert grain table block back to disk format, otherwise
    4923          * the code below will write garbage for all but the updated entry. */
    4924         for (unsigned i = 0; i < VMDK_GT_CACHELINE_SIZE; i++)
    4925             aGTDataTmp[i] = RT_H2LE_U32(pGTCacheEntry->aGTData[i]);
    4926     }
    4927     uGTBlockIndex = (uSector / pExtent->cSectorsPerGrain) % VMDK_GT_CACHELINE_SIZE;
    4928     aGTDataTmp[uGTBlockIndex] = RT_H2LE_U32(VMDK_BYTE2SECTOR(uFileOffset));
    4929     pGTCacheEntry->aGTData[uGTBlockIndex] = VMDK_BYTE2SECTOR(uFileOffset);
    4930     /* Update grain table on disk. */
    4931     rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4932                                 VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    4933                                 aGTDataTmp, sizeof(aGTDataTmp));
    4934     if (RT_FAILURE(rc))
    4935         return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated grain table in '%s'"), pExtent->pszFullname);
    4936     if (pExtent->pRGD)
    4937     {
    4938         /* Update backup grain table on disk. */
    4939         rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
    4940                                     VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    4941                                     aGTDataTmp, sizeof(aGTDataTmp));
    4942         if (RT_FAILURE(rc))
    4943             return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write updated backup grain table in '%s'"), pExtent->pszFullname);
    4944     }
    4945 #ifdef VBOX_WITH_VMDK_ESX
    4946     if (RT_SUCCESS(rc) && pExtent->enmType == VMDKETYPE_ESX_SPARSE)
    4947     {
    4948         pExtent->uFreeSector = uGTSector + VMDK_BYTE2SECTOR(cbWrite);
    4949         pExtent->fMetaDirty = true;
    4950     }
    4951 #endif /* VBOX_WITH_VMDK_ESX */
    4952     return rc;
    4953 }
    4954 
    4955 /**
    49564544 * Internal. Writes the grain and also if necessary the grain tables.
    49574545 * Uses the grain table cache as a true grain table.
    49584546 */
    49594547static int vmdkStreamAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    4960                                 uint64_t uSector, const void *pvBuf,
     4548                                uint64_t uSector, PVDIOCTX pIoCtx,
    49614549                                uint64_t cbWrite)
    49624550{
     
    49654553    uint32_t cbGrain = 0;
    49664554    uint32_t uCacheLine, uCacheEntry;
    4967     const void *pData = pvBuf;
     4555    const void *pData;
    49684556    int rc;
    49694557
     
    49964584     * to allocate something, we also need to detect the situation ourself. */
    49974585    if (   !(pImage->uOpenFlags & VD_OPEN_FLAGS_HONOR_ZEROES)
    4998         && ASMBitFirstSet((volatile void *)pvBuf, (uint32_t)cbWrite * 8) == -1)
     4586        && vdIfIoIntIoCtxIsZero(pImage->pIfIo, pIoCtx, cbWrite, true /* fAdvance */))
    49994587        return VINF_SUCCESS;
    50004588
     
    50334621    if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
    50344622    {
    5035         memcpy(pExtent->pvGrain, pvBuf, cbWrite);
     4623        vdIfIoIntIoCtxCopyFrom(pImage->pIfIo, pIoCtx, pExtent->pvGrain, cbWrite);
    50364624        memset((char *)pExtent->pvGrain + cbWrite, '\0',
    50374625               VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite);
    50384626        pData = pExtent->pvGrain;
    50394627    }
     4628    else
     4629    {
     4630        RTSGSEG Segment;
     4631        unsigned cSegments = 1;
     4632        size_t cbSeg = 0;
     4633
     4634        cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
     4635                                             &cSegments, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
     4636        Assert(cbSeg == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain));
     4637        pData = Segment.pvSeg;
     4638    }
    50404639    rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset, pData,
    50414640                             VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain),
     
    50544653
    50554654/**
    5056  * Internal: Updates the grain table during a async grain allocation.
     4655 * Internal: Updates the grain table during grain allocation.
    50574656 */
    5058 static int vmdkAllocGrainAsyncGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    5059                                        PVDIOCTX pIoCtx,
    5060                                        PVMDKGRAINALLOCASYNC pGrainAlloc)
     4657static int vmdkAllocGrainGTUpdate(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
     4658                                  PVMDKGRAINALLOCASYNC pGrainAlloc)
    50614659{
    50624660    int rc = VINF_SUCCESS;
     
    50884686                                   VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    50894687                                   aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
    5090                                    &pMetaXfer, vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4688                                   &pMetaXfer, vmdkAllocGrainComplete, pGrainAlloc);
    50914689        if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    50924690        {
     
    51204718                                VMDK_SECTOR2BYTE(uGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    51214719                                aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
    5122                                 vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4720                                vmdkAllocGrainComplete, pGrainAlloc);
    51234721    if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    51244722        pGrainAlloc->cIoXfersPending++;
     
    51314729                                    VMDK_SECTOR2BYTE(uRGTSector) + (uGTBlock % (pExtent->cGTEntries / VMDK_GT_CACHELINE_SIZE)) * sizeof(aGTDataTmp),
    51324730                                    aGTDataTmp, sizeof(aGTDataTmp), pIoCtx,
    5133                                     vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4731                                    vmdkAllocGrainComplete, pGrainAlloc);
    51344732        if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    51354733            pGrainAlloc->cIoXfersPending++;
     
    51534751 * Internal - complete the grain allocation by updating disk grain table if required.
    51544752 */
    5155 static int vmdkAllocGrainAsyncComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
     4753static int vmdkAllocGrainComplete(void *pBackendData, PVDIOCTX pIoCtx, void *pvUser, int rcReq)
    51564754{
    51574755    int rc = VINF_SUCCESS;
     
    51654763    pGrainAlloc->cIoXfersPending--;
    51664764    if (!pGrainAlloc->cIoXfersPending && pGrainAlloc->fGTUpdateNeeded)
    5167         rc = vmdkAllocGrainAsyncGTUpdate(pImage, pGrainAlloc->pExtent,
    5168                                          pIoCtx, pGrainAlloc);
     4765        rc = vmdkAllocGrainGTUpdate(pImage, pGrainAlloc->pExtent, pIoCtx, pGrainAlloc);
    51694766
    51704767    if (!pGrainAlloc->cIoXfersPending)
     
    51794776
    51804777/**
    5181  * Internal. Allocates a new grain table (if necessary) - async version.
     4778 * Internal. Allocates a new grain table (if necessary).
    51824779 */
    5183 static int vmdkAllocGrainAsync(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    5184                                PVDIOCTX pIoCtx, uint64_t uSector,
    5185                                uint64_t cbWrite)
     4780static int vmdkAllocGrain(PVMDKIMAGE pImage, PVMDKEXTENT pExtent, PVDIOCTX pIoCtx,
     4781                          uint64_t uSector, uint64_t cbWrite)
    51864782{
    51874783    PVMDKGTCACHE pCache = pImage->pGTCache;
     
    51934789    LogFlowFunc(("pCache=%#p pExtent=%#p pIoCtx=%#p uSector=%llu cbWrite=%llu\n",
    51944790                 pCache, pExtent, pIoCtx, uSector, cbWrite));
    5195 
    5196     AssertReturn(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), VERR_NOT_SUPPORTED);
    51974791
    51984792    pGrainAlloc = (PVMDKGRAINALLOCASYNC)RTMemAllocZ(sizeof(VMDKGRAINALLOCASYNC));
     
    52484842                                    VMDK_SECTOR2BYTE(uGTSector),
    52494843                                    paGTDataTmp, cbGTDataTmp, pIoCtx,
    5250                                     vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4844                                    vmdkAllocGrainComplete, pGrainAlloc);
    52514845        if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    52524846            pGrainAlloc->cIoXfersPending++;
     
    52834877                                        VMDK_SECTOR2BYTE(uRGTSector),
    52844878                                        paGTDataTmp, cbGTDataTmp, pIoCtx,
    5285                                         vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4879                                        vmdkAllocGrainComplete, pGrainAlloc);
    52864880            if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    52874881                pGrainAlloc->cIoXfersPending++;
     
    53054899                                    VMDK_SECTOR2BYTE(pExtent->uSectorGD) + uGDIndex * sizeof(uGTSectorLE),
    53064900                                    &uGTSectorLE, sizeof(uGTSectorLE), pIoCtx,
    5307                                     vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4901                                    vmdkAllocGrainComplete, pGrainAlloc);
    53084902        if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    53094903            pGrainAlloc->cIoXfersPending++;
     
    53164910                                        VMDK_SECTOR2BYTE(pExtent->uSectorRGD) + uGDIndex * sizeof(uGTSectorLE),
    53174911                                        &uRGTSectorLE, sizeof(uRGTSectorLE), pIoCtx,
    5318                                         vmdkAllocGrainAsyncComplete, pGrainAlloc);
     4912                                        vmdkAllocGrainComplete, pGrainAlloc);
    53194913            if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    53204914                pGrainAlloc->cIoXfersPending++;
     
    53404934    pGrainAlloc->uGrainOffset = uFileOffset;
    53414935
    5342     /* Write the data. Always a full grain, or we're in big trouble. */
    5343     rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
    5344                                 uFileOffset, pIoCtx, cbWrite,
    5345                                 vmdkAllocGrainAsyncComplete, pGrainAlloc);
    5346     if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
    5347         pGrainAlloc->cIoXfersPending++;
    5348     else if (RT_FAILURE(rc))
    5349         return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
    5350 
    5351     pExtent->uAppendPosition += cbWrite;
    5352 
    5353     rc = vmdkAllocGrainAsyncGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
     4936    if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
     4937    {
     4938        AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
     4939                        ("Accesses to stream optimized images must be synchronous\n"),
     4940                        VERR_INVALID_STATE);
     4941
     4942        if (cbWrite != VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
     4943            return vdIfError(pImage->pIfError, VERR_INTERNAL_ERROR, RT_SRC_POS, N_("VMDK: not enough data for a compressed data block in '%s'"), pExtent->pszFullname);
     4944
     4945        /* Invalidate cache, just in case some code incorrectly allows mixing
     4946         * of reads and writes. Normally shouldn't be needed. */
     4947        pExtent->uGrainSectorAbs = 0;
     4948
     4949        /* Write compressed data block and the markers. */
     4950        uint32_t cbGrain = 0;
     4951        size_t cbSeg = 0;
     4952        RTSGSEG Segment;
     4953        unsigned cSegments = 1;
     4954
     4955        cbSeg = vdIfIoIntIoCtxSegArrayCreate(pImage->pIfIo, pIoCtx, &Segment,
     4956                                             &cSegments, cbWrite);
     4957        Assert(cbSeg == cbWrite);
     4958
     4959        rc = vmdkFileDeflateSync(pImage, pExtent, uFileOffset,
     4960                                 Segment.pvSeg, cbWrite, uSector, &cbGrain);
     4961        if (RT_FAILURE(rc))
     4962        {
     4963            AssertRC(rc);
     4964            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated compressed data block in '%s'"), pExtent->pszFullname);
     4965        }
     4966        pExtent->uLastGrainAccess = uSector / pExtent->cSectorsPerGrain;
     4967        pExtent->uAppendPosition += cbGrain;
     4968    }
     4969    else
     4970    {
     4971        /* Write the data. Always a full grain, or we're in big trouble. */
     4972        rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
     4973                                    uFileOffset, pIoCtx, cbWrite,
     4974                                    vmdkAllocGrainComplete, pGrainAlloc);
     4975        if (rc == VERR_VD_ASYNC_IO_IN_PROGRESS)
     4976            pGrainAlloc->cIoXfersPending++;
     4977        else if (RT_FAILURE(rc))
     4978            return vdIfError(pImage->pIfError, rc, RT_SRC_POS, N_("VMDK: cannot write allocated data block in '%s'"), pExtent->pszFullname);
     4979
     4980        pExtent->uAppendPosition += cbWrite;
     4981    }
     4982
     4983    rc = vmdkAllocGrainGTUpdate(pImage, pExtent, pIoCtx, pGrainAlloc);
    53544984
    53554985    if (!pGrainAlloc->cIoXfersPending)
     
    53694999 */
    53705000static int vmdkStreamReadSequential(PVMDKIMAGE pImage, PVMDKEXTENT pExtent,
    5371                                     uint64_t uSector, void *pvBuf,
     5001                                    uint64_t uSector, PVDIOCTX pIoCtx,
    53725002                                    uint64_t cbRead)
    53735003{
    53745004    int rc;
    53755005
    5376     LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pvBuf=%#p cbRead=%llu\n",
    5377                  pImage, pExtent, uSector, pvBuf, cbRead));
     5006    LogFlowFunc(("pImage=%#p pExtent=%#p uSector=%llu pIoCtx=%#p cbRead=%llu\n",
     5007                 pImage, pExtent, uSector, pIoCtx, cbRead));
     5008
     5009    AssertMsgReturn(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
     5010                    ("Async I/O not supported for sequential stream optimized images\n"),
     5011                    VERR_INVALID_STATE);
    53785012
    53795013    /* Do not allow to go back. */
     
    55095143
    55105144    uint32_t uSectorInGrain = uSector % pExtent->cSectorsPerGrain;
    5511     memcpy(pvBuf,
    5512            (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
    5513            cbRead);
     5145    vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
     5146                         (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain),
     5147                         cbRead);
    55145148    LogFlowFunc(("returns VINF_SUCCESS\n"));
    55155149    return VINF_SUCCESS;
     
    58715505    pImage->Descriptor.fDirty = true;
    58725506    /* Flush the descriptor now, in case it is embedded. */
    5873     vmdkFlushImage(pImage);
     5507    vmdkFlushImage(pImage, NULL);
    58745508
    58755509    /* Close and rename/move extents. */
     
    59445578        rrc = vmdkFileOpen(pImage, &pFile, pszOldDescName,
    59455579                           VDOpenFlagsToFileOpenFlags(VD_OPEN_FLAGS_NORMAL,
    5946                                                       false /* fCreate */),
    5947                            false /* fAsyncIO */);
     5580                                                      false /* fCreate */));
    59485581        AssertRC(rrc);
    59495582        if (fEmbeddedDesc)
     
    59615594        }
    59625595        pImage->Descriptor = DescriptorCopy;
    5963         vmdkWriteDescriptor(pImage);
     5596        vmdkWriteDescriptor(pImage, NULL);
    59645597        vmdkFileClose(pImage, &pFile, false);
    59655598        /* Get rid of the stuff we implanted. */
     
    60275660
    60285661/** @copydoc VBOXHDDBACKEND::pfnRead */
    6029 static int vmdkRead(void *pBackendData, uint64_t uOffset, void *pvBuf,
    6030                     size_t cbToRead, size_t *pcbActuallyRead)
    6031 {
    6032     LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToRead=%zu pcbActuallyRead=%#p\n", pBackendData, uOffset, pvBuf, cbToRead, pcbActuallyRead));
     5662static int vmdkRead(void *pBackendData, uint64_t uOffset, size_t cbToRead,
     5663                    PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
     5664{
     5665    LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
     5666                 pBackendData, uOffset, pIoCtx, cbToRead, pcbActuallyRead));
    60335667    PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
    60345668    PVMDKEXTENT pExtent;
     
    60705704        case VMDKETYPE_ESX_SPARSE:
    60715705#endif /* VBOX_WITH_VMDK_ESX */
    6072             rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
    6073                                &uSectorExtentAbs);
     5706            rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
    60745707            if (RT_FAILURE(rc))
    60755708                goto out;
     
    60865719                    rc = vmdkStreamReadSequential(pImage, pExtent,
    60875720                                                  uSectorExtentRel,
    6088                                                   pvBuf, cbToRead);
     5721                                                  pIoCtx, cbToRead);
    60895722            }
    60905723            else
     
    60925725                if (pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED)
    60935726                {
     5727                    AssertMsg(vdIfIoIntIoCtxIsSynchronous(pImage->pIfIo, pIoCtx),
     5728                              ("Async I/O is not supported for stream optimized VMDK's\n"));
     5729
    60945730                    uint32_t uSectorInGrain = uSectorExtentRel % pExtent->cSectorsPerGrain;
    60955731                    uSectorExtentAbs -= uSectorInGrain;
     
    61125748                        Assert(uLBA == uSectorExtentRel);
    61135749                    }
    6114                     memcpy(pvBuf, (uint8_t *)pExtent->pvGrain + VMDK_SECTOR2BYTE(uSectorInGrain), cbToRead);
     5750                    vdIfIoIntIoCtxCopyTo(pImage->pIfIo, pIoCtx,
     5751                                           (uint8_t *)pExtent->pvGrain
     5752                                         + VMDK_SECTOR2BYTE(uSectorInGrain),
     5753                                         cbToRead);
    61155754                }
    61165755                else
    6117                 {
    6118                     rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
     5756                    rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
    61195757                                               VMDK_SECTOR2BYTE(uSectorExtentAbs),
    6120                                                pvBuf, cbToRead);
    6121                 }
     5758                                               pIoCtx, cbToRead);
    61225759            }
    61235760            break;
    61245761        case VMDKETYPE_VMFS:
    61255762        case VMDKETYPE_FLAT:
    6126             rc = vdIfIoIntFileReadSync(pImage->pIfIo, pExtent->pFile->pStorage,
     5763            rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
    61275764                                       VMDK_SECTOR2BYTE(uSectorExtentRel),
    6128                                        pvBuf, cbToRead);
     5765                                       pIoCtx, cbToRead);
    61295766            break;
    61305767        case VMDKETYPE_ZERO:
    6131             memset(pvBuf, '\0', cbToRead);
     5768            size_t cbSet;
     5769
     5770            cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbToRead);
     5771            Assert(cbSet == cbToRead);
     5772
     5773            rc = VINF_SUCCESS;
    61325774            break;
    61335775    }
     
    61415783
    61425784/** @copydoc VBOXHDDBACKEND::pfnWrite */
    6143 static int vmdkWrite(void *pBackendData, uint64_t uOffset, const void *pvBuf,
    6144                      size_t cbToWrite, size_t *pcbWriteProcess,
    6145                      size_t *pcbPreRead, size_t *pcbPostRead, unsigned fWrite)
    6146 {
    6147     LogFlowFunc(("pBackendData=%#p uOffset=%llu pvBuf=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n", pBackendData, uOffset, pvBuf, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
     5785static int vmdkWrite(void *pBackendData, uint64_t uOffset, size_t cbToWrite,
     5786                     PVDIOCTX pIoCtx, size_t *pcbWriteProcess, size_t *pcbPreRead,
     5787                     size_t *pcbPostRead, unsigned fWrite)
     5788{
     5789    LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
     5790                 pBackendData, uOffset, pIoCtx, cbToWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
    61485791    PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
    61495792    PVMDKEXTENT pExtent;
     
    61975840        case VMDKETYPE_ESX_SPARSE:
    61985841#endif /* VBOX_WITH_VMDK_ESX */
    6199             rc = vmdkGetSector(pImage, pExtent, uSectorExtentRel,
    6200                                &uSectorExtentAbs);
     5842            rc = vmdkGetSector(pImage, pIoCtx, pExtent, uSectorExtentRel, &uSectorExtentAbs);
    62015843            if (RT_FAILURE(rc))
    62025844                goto out;
     
    62165858                    {
    62175859                        /* Full block write to a previously unallocated block.
    6218                          * Check if the caller wants feedback. */
     5860                         * Check if the caller wants to avoid the automatic alloc. */
    62195861                        if (!(fWrite & VD_WRITE_NO_ALLOC))
    62205862                        {
    6221                             /* Allocate GT and store the grain. */
    6222                             rc = vmdkAllocGrain(pImage, pExtent,
    6223                                                 uSectorExtentRel,
    6224                                                 pvBuf, cbToWrite);
     5863                            /* Allocate GT and find out where to store the grain. */
     5864                            rc = vmdkAllocGrain(pImage, pExtent, pIoCtx,
     5865                                                uSectorExtentRel, cbToWrite);
    62255866                        }
    62265867                        else
     
    62425883                    rc = vmdkStreamAllocGrain(pImage, pExtent,
    62435884                                              uSectorExtentRel,
    6244                                               pvBuf, cbToWrite);
     5885                                              pIoCtx, cbToWrite);
    62455886                }
    62465887            }
     
    62585899                else
    62595900                {
    6260                     rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
     5901                    Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
     5902                    rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
    62615903                                                VMDK_SECTOR2BYTE(uSectorExtentAbs),
    6262                                                 pvBuf, cbToWrite);
     5904                                                pIoCtx, cbToWrite, NULL, NULL);
    62635905                }
    62645906            }
     
    62685910            /* Clip write range to remain in this extent. */
    62695911            cbToWrite = RT_MIN(cbToWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
    6270             rc = vdIfIoIntFileWriteSync(pImage->pIfIo, pExtent->pFile->pStorage,
     5912            rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
    62715913                                        VMDK_SECTOR2BYTE(uSectorExtentRel),
    6272                                         pvBuf, cbToWrite);
     5914                                        pIoCtx, cbToWrite, NULL, NULL);
    62735915            break;
    62745916        case VMDKETYPE_ZERO:
     
    62875929
    62885930/** @copydoc VBOXHDDBACKEND::pfnFlush */
    6289 static int vmdkFlush(void *pBackendData)
    6290 {
    6291     LogFlowFunc(("pBackendData=%#p\n", pBackendData));
     5931static int vmdkFlush(void *pBackendData, PVDIOCTX pIoCtx)
     5932{
    62925933    PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
    6293     int rc = VINF_SUCCESS;
    6294 
    6295     AssertPtr(pImage);
    6296 
    6297     if (!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED))
    6298         rc = vmdkFlushImage(pImage);
    6299 
    6300     LogFlowFunc(("returns %Rrc\n", rc));
    6301     return rc;
     5934
     5935    return vmdkFlushImage(pImage, pIoCtx);
    63025936}
    63035937
     
    68766510}
    68776511
    6878 /** @copydoc VBOXHDDBACKEND::pfnAsyncRead */
    6879 static int vmdkAsyncRead(void *pBackendData, uint64_t uOffset, size_t cbRead,
    6880                          PVDIOCTX pIoCtx, size_t *pcbActuallyRead)
    6881 {
    6882     LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToRead=%zu pcbActuallyRead=%#p\n",
    6883                  pBackendData, uOffset, pIoCtx, cbRead, pcbActuallyRead));
    6884     PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
    6885     PVMDKEXTENT pExtent;
    6886     uint64_t uSectorExtentRel;
    6887     uint64_t uSectorExtentAbs;
    6888     int rc;
    6889 
    6890     AssertPtr(pImage);
    6891     Assert(uOffset % 512 == 0);
    6892     Assert(cbRead % 512 == 0);
    6893 
    6894     if (   uOffset + cbRead > pImage->cbSize
    6895         || cbRead == 0)
    6896     {
    6897         rc = VERR_INVALID_PARAMETER;
    6898         goto out;
    6899     }
    6900 
    6901     rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
    6902                         &pExtent, &uSectorExtentRel);
    6903     if (RT_FAILURE(rc))
    6904         goto out;
    6905 
    6906     /* Check access permissions as defined in the extent descriptor. */
    6907     if (pExtent->enmAccess == VMDKACCESS_NOACCESS)
    6908     {
    6909         rc = VERR_VD_VMDK_INVALID_STATE;
    6910         goto out;
    6911     }
    6912 
    6913     /* Clip read range to remain in this extent. */
    6914     cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
    6915 
    6916     /* Handle the read according to the current extent type. */
    6917     switch (pExtent->enmType)
    6918     {
    6919         case VMDKETYPE_HOSTED_SPARSE:
    6920 #ifdef VBOX_WITH_VMDK_ESX
    6921         case VMDKETYPE_ESX_SPARSE:
    6922 #endif /* VBOX_WITH_VMDK_ESX */
    6923             rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent,
    6924                                     uSectorExtentRel, &uSectorExtentAbs);
    6925             if (RT_FAILURE(rc))
    6926                 goto out;
    6927             /* Clip read range to at most the rest of the grain. */
    6928             cbRead = RT_MIN(cbRead, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
    6929             Assert(!(cbRead % 512));
    6930             if (uSectorExtentAbs == 0)
    6931                 rc = VERR_VD_BLOCK_FREE;
    6932             else
    6933             {
    6934                 AssertMsg(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED), ("Async I/O is not supported for stream optimized VMDK's\n"));
    6935                 rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
    6936                                            VMDK_SECTOR2BYTE(uSectorExtentAbs),
    6937                                            pIoCtx, cbRead);
    6938             }
    6939             break;
    6940         case VMDKETYPE_VMFS:
    6941         case VMDKETYPE_FLAT:
    6942             rc = vdIfIoIntFileReadUser(pImage->pIfIo, pExtent->pFile->pStorage,
    6943                                        VMDK_SECTOR2BYTE(uSectorExtentRel),
    6944                                        pIoCtx, cbRead);
    6945             break;
    6946         case VMDKETYPE_ZERO:
    6947             size_t cbSet;
    6948 
    6949             cbSet = vdIfIoIntIoCtxSet(pImage->pIfIo, pIoCtx, 0, cbRead);
    6950             Assert(cbSet == cbRead);
    6951 
    6952             rc = VINF_SUCCESS;
    6953             break;
    6954     }
    6955     if (pcbActuallyRead)
    6956         *pcbActuallyRead = cbRead;
    6957 
    6958 out:
    6959     LogFlowFunc(("returns %Rrc\n", rc));
    6960     return rc;
    6961 }
    6962 
    6963 /** @copydoc VBOXHDDBACKEND::pfnAsyncWrite */
    6964 static int vmdkAsyncWrite(void *pBackendData, uint64_t uOffset, size_t cbWrite,
    6965                           PVDIOCTX pIoCtx,
    6966                           size_t *pcbWriteProcess, size_t *pcbPreRead,
    6967                           size_t *pcbPostRead, unsigned fWrite)
    6968 {
    6969     LogFlowFunc(("pBackendData=%#p uOffset=%llu pIoCtx=%#p cbToWrite=%zu pcbWriteProcess=%#p pcbPreRead=%#p pcbPostRead=%#p\n",
    6970                  pBackendData, uOffset, pIoCtx, cbWrite, pcbWriteProcess, pcbPreRead, pcbPostRead));
    6971     PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
    6972     PVMDKEXTENT pExtent;
    6973     uint64_t uSectorExtentRel;
    6974     uint64_t uSectorExtentAbs;
    6975     int rc;
    6976 
    6977     AssertPtr(pImage);
    6978     Assert(uOffset % 512 == 0);
    6979     Assert(cbWrite % 512 == 0);
    6980 
    6981     if (pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY)
    6982     {
    6983         rc = VERR_VD_IMAGE_READ_ONLY;
    6984         goto out;
    6985     }
    6986 
    6987     if (cbWrite == 0)
    6988     {
    6989         rc = VERR_INVALID_PARAMETER;
    6990         goto out;
    6991     }
    6992 
    6993     /* No size check here, will do that later when the extent is located.
    6994      * There are sparse images out there which according to the spec are
    6995      * invalid, because the total size is not a multiple of the grain size.
    6996      * Also for sparse images which are stitched together in odd ways (not at
    6997      * grain boundaries, and with the nominal size not being a multiple of the
    6998      * grain size), this would prevent writing to the last grain. */
    6999 
    7000     rc = vmdkFindExtent(pImage, VMDK_BYTE2SECTOR(uOffset),
    7001                         &pExtent, &uSectorExtentRel);
    7002     if (RT_FAILURE(rc))
    7003         goto out;
    7004 
    7005     /* Check access permissions as defined in the extent descriptor. */
    7006     if (pExtent->enmAccess != VMDKACCESS_READWRITE)
    7007     {
    7008         rc = VERR_VD_VMDK_INVALID_STATE;
    7009         goto out;
    7010     }
    7011 
    7012     /* Handle the write according to the current extent type. */
    7013     switch (pExtent->enmType)
    7014     {
    7015         case VMDKETYPE_HOSTED_SPARSE:
    7016 #ifdef VBOX_WITH_VMDK_ESX
    7017         case VMDKETYPE_ESX_SPARSE:
    7018 #endif /* VBOX_WITH_VMDK_ESX */
    7019             rc = vmdkGetSectorAsync(pImage, pIoCtx, pExtent, uSectorExtentRel,
    7020                                     &uSectorExtentAbs);
    7021             if (RT_FAILURE(rc))
    7022                 goto out;
    7023             /* Clip write range to at most the rest of the grain. */
    7024             cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain - uSectorExtentRel % pExtent->cSectorsPerGrain));
    7025             if (    pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED
    7026                 &&  uSectorExtentRel < (uint64_t)pExtent->uLastGrainAccess * pExtent->cSectorsPerGrain)
    7027             {
    7028                 rc = VERR_VD_VMDK_INVALID_WRITE;
    7029                 goto out;
    7030             }
    7031             if (uSectorExtentAbs == 0)
    7032             {
    7033                 if (cbWrite == VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain))
    7034                 {
    7035                     /* Full block write to a previously unallocated block.
    7036                      * Check if the caller wants to avoid the automatic alloc. */
    7037                     if (!(fWrite & VD_WRITE_NO_ALLOC))
    7038                     {
    7039                         /* Allocate GT and find out where to store the grain. */
    7040                         rc = vmdkAllocGrainAsync(pImage, pExtent, pIoCtx,
    7041                                                  uSectorExtentRel, cbWrite);
    7042                     }
    7043                     else
    7044                         rc = VERR_VD_BLOCK_FREE;
    7045                     *pcbPreRead = 0;
    7046                     *pcbPostRead = 0;
    7047                 }
    7048                 else
    7049                 {
    7050                     /* Clip write range to remain in this extent. */
    7051                     cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
    7052                     *pcbPreRead = VMDK_SECTOR2BYTE(uSectorExtentRel % pExtent->cSectorsPerGrain);
    7053                     *pcbPostRead = VMDK_SECTOR2BYTE(pExtent->cSectorsPerGrain) - cbWrite - *pcbPreRead;
    7054                     rc = VERR_VD_BLOCK_FREE;
    7055                 }
    7056             }
    7057             else
    7058             {
    7059                 Assert(!(pImage->uImageFlags & VD_VMDK_IMAGE_FLAGS_STREAM_OPTIMIZED));
    7060                 rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
    7061                                             VMDK_SECTOR2BYTE(uSectorExtentAbs),
    7062                                             pIoCtx, cbWrite, NULL, NULL);
    7063             }
    7064             break;
    7065         case VMDKETYPE_VMFS:
    7066         case VMDKETYPE_FLAT:
    7067             /* Clip write range to remain in this extent. */
    7068             cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
    7069             rc = vdIfIoIntFileWriteUser(pImage->pIfIo, pExtent->pFile->pStorage,
    7070                                         VMDK_SECTOR2BYTE(uSectorExtentRel),
    7071                                         pIoCtx, cbWrite, NULL, NULL);
    7072             break;
    7073         case VMDKETYPE_ZERO:
    7074             /* Clip write range to remain in this extent. */
    7075             cbWrite = RT_MIN(cbWrite, VMDK_SECTOR2BYTE(pExtent->uSectorOffset + pExtent->cNominalSectors - uSectorExtentRel));
    7076             break;
    7077     }
    7078 
    7079     if (pcbWriteProcess)
    7080         *pcbWriteProcess = cbWrite;
    7081 
    7082 out:
    7083     LogFlowFunc(("returns %Rrc\n", rc));
    7084     return rc;
    7085 }
    7086 
    7087 /** @copydoc VBOXHDDBACKEND::pfnAsyncFlush */
    7088 static int vmdkAsyncFlush(void *pBackendData, PVDIOCTX pIoCtx)
    7089 {
    7090     PVMDKIMAGE pImage = (PVMDKIMAGE)pBackendData;
    7091     PVMDKEXTENT pExtent;
    7092     int rc = VINF_SUCCESS;
    7093 
    7094     /* Update descriptor if changed. */
    7095     /** @todo: The descriptor is never updated because
    7096      * it remains unchanged during normal operation (only vmdkRename updates it).
    7097      * So this part is actually not tested so far and requires testing as soon
    7098      * as the descriptor might change during async I/O.
    7099      */
    7100     if (pImage->Descriptor.fDirty)
    7101     {
    7102         rc = vmdkWriteDescriptorAsync(pImage, pIoCtx);
    7103         if (   RT_FAILURE(rc)
    7104             && rc != VERR_VD_ASYNC_IO_IN_PROGRESS)
    7105             goto out;
    7106     }
    7107 
    7108     for (unsigned i = 0; i < pImage->cExtents; i++)
    7109     {
    7110         pExtent = &pImage->pExtents[i];
    7111         if (pExtent->pFile != NULL && pExtent->fMetaDirty)
    7112         {
    7113             switch (pExtent->enmType)
    7114             {
    7115                 case VMDKETYPE_HOSTED_SPARSE:
    7116 #ifdef VBOX_WITH_VMDK_ESX
    7117                 case VMDKETYPE_ESX_SPARSE:
    7118 #endif /* VBOX_WITH_VMDK_ESX */
    7119                     rc = vmdkWriteMetaSparseExtentAsync(pImage, pExtent, 0, pIoCtx);
    7120                     if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
    7121                         goto out;
    7122                     if (pExtent->fFooter)
    7123                     {
    7124                         uint64_t uFileOffset = pExtent->uAppendPosition;
    7125                         if (!uFileOffset)
    7126                         {
    7127                             rc = VERR_INTERNAL_ERROR;
    7128                             goto out;
    7129                         }
    7130                         uFileOffset = RT_ALIGN_64(uFileOffset, 512);
    7131                         rc = vmdkWriteMetaSparseExtent(pImage, pExtent, uFileOffset);
    7132                         if (RT_FAILURE(rc) && (rc != VERR_VD_ASYNC_IO_IN_PROGRESS))
    7133                             goto out;
    7134                     }
    7135                     break;
    7136                 case VMDKETYPE_VMFS:
    7137                 case VMDKETYPE_FLAT:
    7138                     /* Nothing to do. */
    7139                     break;
    7140                 case VMDKETYPE_ZERO:
    7141                 default:
    7142                     AssertMsgFailed(("extent with type %d marked as dirty\n",
    7143                                      pExtent->enmType));
    7144                     break;
    7145             }
    7146         }
    7147         switch (pExtent->enmType)
    7148         {
    7149             case VMDKETYPE_HOSTED_SPARSE:
    7150 #ifdef VBOX_WITH_VMDK_ESX
    7151             case VMDKETYPE_ESX_SPARSE:
    7152 #endif /* VBOX_WITH_VMDK_ESX */
    7153             case VMDKETYPE_VMFS:
    7154             case VMDKETYPE_FLAT:
    7155                 /*
    7156                  * Don't ignore block devices like in the sync case
    7157                  * (they have an absolute path).
    7158                  * We might have unwritten data in the writeback cache and
    7159                  * the async I/O manager will handle these requests properly
    7160                  * even if the block device doesn't support these requests.
    7161                  */
    7162                 if (   pExtent->pFile != NULL
    7163                     && !(pImage->uOpenFlags & VD_OPEN_FLAGS_READONLY))
    7164                     rc = vdIfIoIntFileFlush(pImage->pIfIo, pExtent->pFile->pStorage,
    7165                                             pIoCtx, NULL, NULL);
    7166                 break;
    7167             case VMDKETYPE_ZERO:
    7168                 /* No need to do anything for this extent. */
    7169                 break;
    7170             default:
    7171                 AssertMsgFailed(("unknown extent type %d\n", pExtent->enmType));
    7172                 break;
    7173         }
    7174     }
    7175 
    7176 out:
    7177     return rc;
    7178 }
    71796512
    71806513
     
    72116544    /* pfnFlush */
    72126545    vmdkFlush,
     6546    /* pfnDiscard */
     6547    NULL,
    72136548    /* pfnGetVersion */
    72146549    vmdkGetVersion,
     
    72636598    /* pfnSetParentFilename */
    72646599    NULL,
    7265     /* pfnAsyncRead */
    7266     vmdkAsyncRead,
    7267     /* pfnAsyncWrite */
    7268     vmdkAsyncWrite,
    7269     /* pfnAsyncFlush */
    7270     vmdkAsyncFlush,
    72716600    /* pfnComposeLocation */
    72726601    genericFileComposeLocation,
     
    72776606    /* pfnResize */
    72786607    NULL,
    7279     /* pfnDiscard */
    7280     NULL,
    7281     /* pfnAsyncDiscard */
    7282     NULL,
    72836608    /* pfnRepair */
    72846609    NULL
Note: See TracChangeset for help on using the changeset viewer.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette