VirtualBox

Changeset 77873 in vbox for trunk/src/VBox


Ignore:
Timestamp:
Mar 26, 2019 1:36:36 AM (6 years ago)
Author:
vboxsync
Message:

linux/vboxsf: Kicked out generic_file_aio_read and generic_file_aio_write for 2.6.23-2.6.31 as they broke mmap coherency, using the read_iter/write_iter code with added glue instead and setting .aio_read/write for 2.6.19-3.16 (when they changed name to _iter). Also did some dir entry cache TTL tweaking. bugref:9172

Location:
trunk/src/VBox/Additions/linux/sharedfolders
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Additions/linux/sharedfolders/dirops.c

    r77866 r77873  
    118118                 */
    119119                /** @todo do more to invalidate dentry and inode here. */
    120                 vbsf_dentry_set_update_jiffies(dentry, jiffies + INT_MAX / 2);
     120                vbsf_dentry_invalidate_ttl(dentry);
    121121                sf_i->force_restat = true;
    122122                rc = -ENOENT;
     
    564564    .read           = generic_read_dir,
    565565#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37)
    566     .llseek       = generic_file_llseek
     566    .llseek         = generic_file_llseek
    567567#endif
    568568};
     
    11241124                rc = 0;
    11251125            } else if (rc == VERR_FILE_NOT_FOUND || rc == VERR_PATH_NOT_FOUND) {
     1126                /* Probably deleted on the host while the guest had it cached, so don't complain: */
    11261127                LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc; calling d_drop on %p\n",
    11271128                         fDirectory, path->String.ach, rc, dentry));
     1129                sf_parent_i->force_restat = true;
    11281130                d_drop(dentry);
    1129                 rc = 0; /** @todo ??? */
     1131                rc = 0;
    11301132            } else {
    11311133                LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc\n", fDirectory, path->String.ach, rc));
     
    12411243                                   pOldPath->String.ach, pNewPath->String.ach, fRename, rc));
    12421244                        if (rc == VERR_IS_A_DIRECTORY || rc == VERR_IS_A_FILE)
    1243                             vbsf_dentry_set_update_jiffies(old_dentry, jiffies + INT_MAX / 2);
     1245                            vbsf_dentry_invalidate_ttl(old_dentry);
    12441246                        rc = -RTErrConvertToErrno(rc);
    12451247                    }
     
    13231325                        SFLOGFLOW(("vbsf_inode_symlink: Successfully created '%s' -> '%s'\n", pPath->String.ach, pTarget->String.ach));
    13241326                        pPath = NULL; /* consumed by inode */
     1327                        vbsf_dentry_chain_increase_ttl(dentry);
    13251328                    } else {
    13261329                        SFLOGFLOW(("vbsf_inode_symlink: Failed to create inode for '%s': %d\n", pPath->String.ach, rc));
     1330                        vbsf_dentry_chain_increase_parent_ttl(dentry);
     1331                        vbsf_dentry_invalidate_ttl(dentry);
    13271332                    }
    13281333                } else {
  • trunk/src/VBox/Additions/linux/sharedfolders/lnkops.c

    r77859 r77873  
    128128                rc = vbsf_symlink_nls_convert(sf_g, pszTarget, PAGE_SIZE);
    129129                if (rc == 0) {
     130                    vbsf_dentry_chain_increase_ttl(dentry);
    130131                    set_delayed_call(done, kfree_link, pszTarget);
    131132                    return pszTarget;
     
    164165            SFLOGFLOW(("vbsf_readlink: %s -> %*s\n", sf_i->path->String.ach, pszTarget));
    165166            rc = vbsf_symlink_nls_convert(sf_g, pszTarget, PAGE_SIZE);
    166             if (rc == 0)
     167            if (rc == 0) {
     168                vbsf_dentry_chain_increase_ttl(dentry);
    167169                rc = vfs_readlink(dentry, buffer, len, pszTarget);
     170            }
    168171        } else {
    169172            SFLOGFLOW(("vbsf_readlink: VbglR0SfHostReqReadLinkContigSimple failed on '%s': %Rrc\n", sf_i->path->String.ach, rc));
     
    209212                 * using the buffer we pass it here.
    210213                 */
     214                vbsf_dentry_chain_increase_ttl(dentry);
    211215# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)
    212216                *cookie = pszTarget;
  • trunk/src/VBox/Additions/linux/sharedfolders/regops.c

    r77853 r77873  
    5858#endif
    5959
    60 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
     60#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
     61# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) )
     62#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
    6163# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) )
    6264#endif
     
    7476*   Structures and Typedefs                                                                                                      *
    7577*********************************************************************************************************************************/
    76 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     78#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
     79struct vbsf_iov_iter {
     80    unsigned int        type;
     81    unsigned int        v_write : 1;
     82    size_t              iov_offset;
     83    size_t              nr_segs;
     84    struct iovec const *iov;
     85# ifdef VBOX_STRICT
     86    struct iovec const * const iov_org;
     87    size_t              const nr_segs_org;
     88# endif
     89};
     90# ifdef VBOX_STRICT
     91#  define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) { 0, a_fWrite, 0, a_cSegs, a_pIov, a_pIov, a_cSegs }
     92# else
     93#  define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) { 0, a_fWrite, 0, a_cSegs, a_pIov }
     94# endif
     95# define ITER_KVEC 1
     96# define iov_iter vbsf_iov_iter
     97#endif
     98
     99#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
    77100/** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */
    78101struct vbsf_iter_stash {
     
    94117
    95118
     119/*********************************************************************************************************************************
     120*   Internal Functions                                                                                                           *
     121*********************************************************************************************************************************/
     122DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack);
     123
     124
     125/*********************************************************************************************************************************
     126*   Provide more recent uio.h functionality to older kernels.                                                                    *
     127*********************************************************************************************************************************/
     128#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
     129
     130# undef  iov_iter_count
     131# define iov_iter_count(a_pIter)                vbsf_iov_iter_count(a_pIter)
     132static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter)
     133{
     134    size_t              cbRet = 0;
     135    size_t              cLeft = iter->nr_segs;
     136    struct iovec const *iov   = iter->iov;
     137    while (cLeft-- > 0) {
     138        cbRet += iov->iov_len;
     139        iov++;
     140    }
     141    return cbRet - iter->iov_offset;
     142}
     143
     144
     145# undef  iov_iter_single_seg_count
     146# define iov_iter_single_seg_count(a_pIter)     vbsf_iov_iter_single_seg_count(a_pIter)
     147static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter)
     148{
     149    if (iter->nr_segs > 0)
     150        return iter->iov->iov_len - iter->iov_offset;
     151    return 0;
     152}
     153
     154
     155# undef  iov_iter_advance
     156# define iov_iter_advance(a_pIter, a_cbSkip)    vbsf_iov_iter_advance(a_pIter, a_cbSkip)
     157static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip)
     158{
     159    SFLOG2(("vbsf_iov_iter_advance: cbSkip=%#zx\n", cbSkip));
     160    if (iter->nr_segs > 0) {
     161        size_t const cbLeftCur = iter->iov->iov_len - iter->iov_offset;
     162        Assert(iter->iov_offset <= iter->iov->iov_len);
     163        if (cbLeftCur > cbSkip) {
     164            iter->iov_offset += cbSkip;
     165        } else {
     166            cbSkip -= cbLeftCur;
     167            iter->iov_offset = 0;
     168            iter->iov++;
     169            iter->nr_segs--;
     170            while (iter->nr_segs > 0) {
     171                size_t const cbSeg = iter->iov->iov_len;
     172                if (cbSeg > cbSkip) {
     173                    iter->iov_offset = cbSkip;
     174                    break;
     175                }
     176                cbSkip -= cbSeg;
     177                iter->iov++;
     178                iter->nr_segs--;
     179            }
     180        }
     181    }
     182}
     183
     184
     185# undef  iov_iter_get_pages
     186# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
     187    vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
     188static ssize_t vbsf_iov_iter_get_pages(struct vbsf_iov_iter *iter, struct page **papPages,
     189                                       size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
     190{
     191    while (iter->nr_segs > 0) {
     192        size_t const cbLeft = iter->iov->iov_len - iter->iov_offset;
     193        Assert(iter->iov->iov_len >= iter->iov_offset);
     194        if (cbLeft > 0) {
     195            uintptr_t           uPtrFrom   = (uintptr_t)iter->iov->iov_base + iter->iov_offset;
     196            size_t              offPg0     = *poffPg0 = uPtrFrom & PAGE_OFFSET_MASK;
     197            size_t              cPagesLeft = RT_ALIGN_Z(offPg0 + cbLeft, PAGE_SIZE) >> PAGE_SHIFT;
     198            size_t              cPages     = RT_MIN(cPagesLeft, cMaxPages);
     199            struct task_struct *pTask      = current;
     200            size_t              cPagesLocked;
     201
     202            down_read(&pTask->mm->mmap_sem);
     203            cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL);
     204            up_read(&pTask->mm->mmap_sem);
     205            if (cPagesLocked == cPages) {
     206                size_t cbRet = (cPages << PAGE_SHIFT) - offPg0;
     207                if (cPages == cPagesLeft) {
     208                    size_t offLastPg = (uPtrFrom + cbLeft) & PAGE_OFFSET_MASK;
     209                    if (offLastPg)
     210                        cbRet -= PAGE_SIZE - offLastPg;
     211                }
     212                Assert(cbRet <= cbLeft);
     213                return cbRet;
     214            }
     215            if (cPagesLocked > 0)
     216                vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
     217            return -EFAULT;
     218        }
     219        iter->iov_offset = 0;
     220        iter->iov++;
     221        iter->nr_segs--;
     222    }
     223    AssertFailed();
     224    return 0;
     225}
     226
     227
     228# undef  iov_iter_truncate
     229# define iov_iter_truncate(iter, cbNew)         vbsf_iov_iter_truncate(iter, cbNew)
     230static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew)
     231{
     232    /* we have no counter or stuff, so it's a no-op. */
     233    RT_NOREF(iter, cbNew);
     234}
     235
     236
     237# undef  iov_iter_revert
     238# define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind)
     239void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind)
     240{
     241    SFLOG2(("vbsf_iov_iter_revert: cbRewind=%#zx\n", cbRewind));
     242    if (iter->iov_offset > 0) {
     243        if (cbRewind <= iter->iov_offset) {
     244            iter->iov_offset -= cbRewind;
     245            return;
     246        }
     247        cbRewind -= iter->iov_offset;
     248        iter->iov_offset = 0;
     249    }
     250
     251    while (cbRewind > 0) {
     252        struct iovec const *pIov  = --iter->iov;
     253        size_t const        cbSeg = pIov->iov_len;
     254        iter->nr_segs++;
     255
     256        Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org);
     257        Assert(iter->nr_segs <= iter->iter->nr_segs_org);
     258
     259        if (cbRewind <= cbSeg) {
     260            iter->iov_offset = cbSeg - cbRewind;
     261            break;
     262        }
     263        cbRewind -= cbSeg;
     264    }
     265}
     266
     267#endif /* 2.6.19 <= linux < 3.16.0 */
     268#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
     269
     270static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
     271{
     272    size_t const cbTotal = cbToCopy;
     273    Assert(iov_iter_count(pSrcIter) >= cbToCopy);
     274# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     275    if (pSrcIter->type & ITER_BVEC) {
     276        while (cbToCopy > 0) {
     277            size_t const offPage    = (uintptr_t)pbDst & PAGE_OFFSET_MASK;
     278            size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
     279            struct page *pPage      = rtR0MemObjLinuxVirtToPage(pbDst);
     280            size_t       cbCopied   = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter);
     281            AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
     282            pbDst    += cbCopied;
     283            cbToCopy -= cbCopied;
     284            if (cbCopied != cbToCopy)
     285                break;
     286        }
     287    } else
     288# endif
     289    {
     290        while (cbToCopy > 0) {
     291            size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter);
     292            if (cbThisCopy > 0) {
     293                if (cbThisCopy > cbToCopy)
     294                    cbThisCopy = cbToCopy;
     295                if (pSrcIter->type & ITER_KVEC)
     296                    memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy);
     297                else if (!copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy))
     298                    break;
     299                pbDst    += cbThisCopy;
     300                cbToCopy -= cbThisCopy;
     301            }
     302            iov_iter_advance(pSrcIter, cbThisCopy);
     303        }
     304    }
     305    return cbTotal - cbToCopy;
     306}
     307
     308
     309static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
     310{
     311    size_t const cbTotal = cbToCopy;
     312    Assert(iov_iter_count(pDstIter) >= cbToCopy);
     313# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     314    if (pDstIter->type & ITER_BVEC) {
     315        while (cbToCopy > 0) {
     316            size_t const offPage    = (uintptr_t)pbSrc & PAGE_OFFSET_MASK;
     317            size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
     318            struct page *pPage      = rtR0MemObjLinuxVirtToPage((void *)pbSrc);
     319            size_t       cbCopied   = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter);
     320            AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
     321            pbSrc    += cbCopied;
     322            cbToCopy -= cbCopied;
     323            if (cbCopied != cbToCopy)
     324                break;
     325        }
     326    } else
     327# endif
     328    {
     329        while (cbToCopy > 0) {
     330            size_t cbThisCopy = iov_iter_single_seg_count(pDstIter);
     331            if (cbThisCopy > 0) {
     332                if (cbThisCopy > cbToCopy)
     333                    cbThisCopy = cbToCopy;
     334                if (pDstIter->type & ITER_KVEC)
     335                    memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy);
     336                else if (!copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy)) {
     337                    break;
     338                }
     339                pbSrc    += cbThisCopy;
     340                cbToCopy -= cbThisCopy;
     341            }
     342            iov_iter_advance(pDstIter, cbThisCopy);
     343        }
     344    }
     345    return cbTotal - cbToCopy;
     346}
     347
     348#endif /* 3.16.0 <= linux < 3.18.0 */
     349
     350
     351
     352/*********************************************************************************************************************************
     353*   Handle management                                                                                                            *
     354*********************************************************************************************************************************/
    96355
    97356/**
     
    237496}
    238497
     498
     499
     500/*********************************************************************************************************************************
     501*   Pipe / splice stuff for 2.6.23 >= linux < 2.6.31 (figure out why we need this)                                               *
     502*********************************************************************************************************************************/
    239503
    240504#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
     
    390654
    391655#endif /* 2.6.23 <= LINUX_VERSION_CODE < 2.6.31 */
     656
     657
     658/*********************************************************************************************************************************
     659*   File operations on regular files                                                                                             *
     660*********************************************************************************************************************************/
    392661
    393662/**
     
    561830    size_t cPagesLocked;
    562831    down_read(&pTask->mm->mmap_sem);
    563     cPagesLocked = get_user_pages(current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);
     832    cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);
    564833    up_read(&pTask->mm->mmap_sem);
    565834# endif
     
    11671436}
    11681437
    1169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
    1170 /*
    1171  * Hide missing uio.h functionality in older kernsl.
    1172  */
    1173 
    1174 static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
    1175 {
    1176     size_t const cbTotal = cbToCopy;
    1177     Assert(iov_iter_count(pSrcIter) >= cbToCopy);
    1178     if (pSrcIter->type & ITER_BVEC) {
    1179         while (cbToCopy > 0) {
    1180             size_t const offPage    = (uintptr_t)pbDst & PAGE_OFFSET_MASK;
    1181             size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
    1182             struct page *pPage      = rtR0MemObjLinuxVirtToPage(pbDst);
    1183             size_t       cbCopied   = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter);
    1184             AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
    1185             pbDst    += cbCopied;
    1186             cbToCopy -= cbCopied;
    1187             if (cbCopied != cbToCopy)
    1188                 break;
    1189         }
    1190     } else {
    1191         while (cbToCopy > 0) {
    1192             size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter);
    1193             if (cbThisCopy > 0) {
    1194                 if (cbThisCopy > cbToCopy)
    1195                     cbThisCopy = cbToCopy;
    1196                 if (pSrcIter->type & ITER_KVEC)
    1197                     memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy);
    1198                 else if (!copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy))
    1199                     break;
    1200                 pbDst    += cbThisCopy;
    1201                 cbToCopy -= cbThisCopy;
    1202             }
    1203             iov_iter_advance(pSrcIter, cbThisCopy);
    1204         }
    1205     }
    1206     return cbTotal - cbToCopy;
    1207 }
    1208 
    1209 static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
    1210 {
    1211     size_t const cbTotal = cbToCopy;
    1212     Assert(iov_iter_count(pDstIter) >= cbToCopy);
    1213     if (pDstIter->type & ITER_BVEC) {
    1214         while (cbToCopy > 0) {
    1215             size_t const offPage    = (uintptr_t)pbSrc & PAGE_OFFSET_MASK;
    1216             size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
    1217             struct page *pPage      = rtR0MemObjLinuxVirtToPage((void *)pbSrc);
    1218             size_t       cbCopied   = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter);
    1219             AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
    1220             pbSrc    += cbCopied;
    1221             cbToCopy -= cbCopied;
    1222             if (cbCopied != cbToCopy)
    1223                 break;
    1224         }
    1225     } else {
    1226         while (cbToCopy > 0) {
    1227             size_t cbThisCopy = iov_iter_single_seg_count(pDstIter);
    1228             if (cbThisCopy > 0) {
    1229                 if (cbThisCopy > cbToCopy)
    1230                     cbThisCopy = cbToCopy;
    1231                 if (pDstIter->type & ITER_KVEC)
    1232                     memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy);
    1233                 else if (!copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy)) {
    1234                     break;
    1235                 }
    1236                 pbSrc    += cbThisCopy;
    1237                 cbToCopy -= cbThisCopy;
    1238             }
    1239             iov_iter_advance(pDstIter, cbThisCopy);
    1240         }
    1241     }
    1242     return cbTotal - cbToCopy;
    1243 }
    1244 
    1245 #endif /* 3.16.0 >= linux < 3.18.0 */
    1246 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     1438#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
    12471439
    12481440/**
     
    15041696    }
    15051697
    1506 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
     1698# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
    15071699    iov_iter_revert(iter, cbToRewind + cbExtra);
    15081700    return true;
     
    15341726{
    15351727    size_t cPages;
     1728# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
    15361729    if (iter_is_iovec(iter) || (iter->type & ITER_KVEC)) {
     1730#endif
    15371731        const struct iovec *pCurIov    = iter->iov;
    15381732        size_t              cLeft      = iter->nr_segs;
     
    15921786        if (cPagesSpan > cPages)
    15931787            cPages = cPagesSpan;
    1594     } else  {
     1788# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     1789    } else {
    15951790        /* Won't bother with accurate counts for the next two types, just make
    15961791           some rough estimates (does pipes have segments?): */
     
    15981793        cPages = (iov_iter_count(iter) + (PAGE_SIZE * 2 - 2) * cSegs) >> PAGE_SHIFT;
    15991794    }
     1795# endif
    16001796    SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages));
    16011797    return cPages;
     
    17291925 * @param   iter        The I/O vector iterator describing the buffer.
    17301926 */
     1927# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
    17311928static ssize_t vbsf_reg_read_iter(struct kiocb *kio, struct iov_iter *iter)
    1732 {
     1929# else
     1930static ssize_t vbsf_reg_aio_read(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
     1931# endif
     1932{
     1933# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
     1934    struct vbsf_iov_iter    fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 0 /*write*/);
     1935    struct vbsf_iov_iter   *iter      = &fake_iter;
     1936# endif
    17331937    size_t                  cbToRead = iov_iter_count(iter);
    17341938    struct inode           *inode    = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
     
    17541958     * mappings around with any kind of pages loaded.
    17551959     */
    1756     if (vbsf_should_use_cached_read(kio->ki_filp, mapping, sf_g))
     1960    if (vbsf_should_use_cached_read(kio->ki_filp, mapping, sf_g)) {
     1961# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
    17571962        return generic_file_read_iter(kio, iter);
     1963# else
     1964        return generic_file_aio_read(kio, iov, cSegs, offFile);
     1965# endif
     1966    }
    17581967
    17591968    /*
     
    19292138
    19302139
    1931 
    19322140/**
    19332141 * Write from I/O vector iterator.
     
    19372145 * @param   iter        The I/O vector iterator describing the buffer.
    19382146 */
     2147# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
    19392148static ssize_t vbsf_reg_write_iter(struct kiocb *kio, struct iov_iter *iter)
    1940 {
     2149# else
     2150static ssize_t vbsf_reg_aio_write(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
     2151# endif
     2152{
     2153# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
     2154    struct vbsf_iov_iter    fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 1 /*write*/);
     2155    struct vbsf_iov_iter   *iter      = &fake_iter;
     2156# endif
    19412157    size_t                  cbToWrite = iov_iter_count(iter);
    19422158    struct inode           *inode     = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
     
    19462162    struct vbsf_reg_info   *sf_r      = kio->ki_filp->private_data;
    19472163    struct vbsf_super_info *sf_g      = VBSF_GET_SUPER_INFO(inode->i_sb);
     2164# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
    19482165    loff_t                  offFile   = kio->ki_pos;
     2166# endif
    19492167
    19502168    SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
     
    19572175    /** @todo This should be handled by the host, it returning the new file
    19582176     *        offset when appending.  We may have an outdated i_size value here! */
    1959 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
     2177# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
    19602178    if (kio->ki_flags & IOCB_APPEND)
    1961 #else
     2179# else
    19622180    if (kio->ki_filp->f_flags & O_APPEND)
    1963 #endif
     2181# endif
    19642182        kio->ki_pos = offFile = i_size_read(inode);
    19652183
     
    19852203        && mapping->nrpages > 0
    19862204        && mapping_writably_mapped(mapping)) {
    1987 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
     2205# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
    19882206        int err = filemap_fdatawait_range(mapping, offFile, offFile + cbToWrite - 1);
    19892207        if (err)
    19902208            return err;
    1991 #else
     2209# else
    19922210        /** @todo ... */
    1993 #endif
     2211# endif
    19942212    }
    19952213
     
    20382256}
    20392257
    2040 #endif /* >= 3.16.0 */
     2258#endif /* >= 2.6.19 */
    20412259
    20422260/**
     
    21272345    struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
    21282346    struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
    2129     struct vbsf_reg_info *sf_r;
    2130     struct dentry *dentry = VBSF_GET_F_DENTRY(file);
    2131     VBOXSFCREATEREQ *pReq;
     2347    struct dentry          *dentry = VBSF_GET_F_DENTRY(file);
     2348    struct vbsf_reg_info   *sf_r;
     2349    VBOXSFCREATEREQ        *pReq;
    21322350
    21332351    SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL));
     
    22652483}
    22662484
     2485
    22672486/**
    22682487 * Wrapper around generic/default seek function that ensures that we've got
     
    22992518#endif
    23002519}
     2520
    23012521
    23022522/**
     
    24162636}
    24172637#endif /* > 4.5 */
     2638
    24182639
    24192640#ifdef SFLOG_ENABLED
     
    25962817    .read_iter       = vbsf_reg_read_iter,
    25972818    .write_iter      = vbsf_reg_write_iter,
     2819#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
     2820    .aio_read        = vbsf_reg_aio_read,
     2821    .aio_write       = vbsf_reg_aio_write,
    25982822#endif
    25992823    .release         = vbsf_reg_release,
     
    26032827    .mmap            = generic_file_mmap,
    26042828#endif
    2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    2606 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
     2829#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
    26072830/** @todo This code is known to cause caching of data which should not be
    2608  * cached.  Investigate. */
     2831 * cached.  Investigate --
     2832 * bird: Part of this was using generic page cache functions for
     2833 * implementing .aio_read/write.  Fixed that (see above). */
    26092834# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
    26102835    .splice_read     = vbsf_splice_read,
    26112836# else
    26122837    .sendfile        = generic_file_sendfile,
    2613 # endif
    2614     .aio_read        = generic_file_aio_read,
    2615     .aio_write       = generic_file_aio_write,
    26162838# endif
    26172839#endif
     
    26232845};
    26242846
     2847
     2848/**
     2849 * Inodes operations for regular files.
     2850 */
    26252851struct inode_operations vbsf_reg_iops = {
    26262852#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18)
    2627     .getattr = vbsf_inode_getattr,
     2853    .getattr    = vbsf_inode_getattr,
    26282854#else
    26292855    .revalidate = vbsf_inode_revalidate,
    26302856#endif
    2631     .setattr = vbsf_inode_setattr,
     2857    .setattr    = vbsf_inode_setattr,
    26322858};
     2859
     2860
     2861
     2862/*********************************************************************************************************************************
     2863*   Address Space Operations on Regular Files (for mmap)                                                                         *
     2864*********************************************************************************************************************************/
    26332865
    26342866
     
    27693001    return err;
    27703002}
     3003
    27713004
    27723005# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
     
    27943027}
    27953028# endif /* KERNEL_VERSION >= 2.6.24 */
     3029
    27963030
    27973031# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
     
    28313065 */
    28323066struct address_space_operations vbsf_reg_aops = {
    2833     .readpage = vbsf_readpage,
    2834     .writepage = vbsf_writepage,
     3067    .readpage       = vbsf_readpage,
     3068    .writepage      = vbsf_writepage,
    28353069    /** @todo Need .writepages if we want msync performance...  */
    28363070# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
     
    28383072# endif
    28393073# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
    2840     .write_begin = vbsf_write_begin,
    2841     .write_end = simple_write_end,
     3074    .write_begin    = vbsf_write_begin,
     3075    .write_end      = simple_write_end,
    28423076# else
    2843     .prepare_write = simple_prepare_write,
    2844     .commit_write = simple_commit_write,
     3077    .prepare_write  = simple_prepare_write,
     3078    .commit_write   = simple_commit_write,
    28453079# endif
    28463080# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
    2847     .direct_IO = vbsf_direct_IO,
     3081    .direct_IO      = vbsf_direct_IO,
    28483082# endif
    28493083};
  • trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h

    r77863 r77873  
    356356
    357357/**
     358 * Invalidates the update TTL for the given directory entry so that it is
     359 * revalidate the next time it is used.
     360 * @param   pDirEntry   The directory entry cache entry to invalidate.
     361 */
     362DECLINLINE(void) vbsf_dentry_invalidate_ttl(struct dentry *pDirEntry)
     363{
     364    vbsf_dentry_set_update_jiffies(pDirEntry, jiffies - INT32_MAX / 2);
     365}
     366
     367/**
    358368 * Increase the time-to-live of @a pDirEntry and all ancestors.
    359  * @param   pDirEntry           The directory entry cache entry which ancestors
    360  *                  we should increase the TTL for.
     369 * @param   pDirEntry   The directory entry cache entry which ancestors
     370 *                      we should increase the TTL for.
    361371 */
    362372DECLINLINE(void) vbsf_dentry_chain_increase_ttl(struct dentry *pDirEntry)
     
    375385/**
    376386 * Increase the time-to-live of all ancestors.
    377  * @param   pDirEntry           The directory entry cache entry which ancestors
    378  *                  we should increase the TTL for.
     387 * @param   pDirEntry   The directory entry cache entry which ancestors
     388 *                      we should increase the TTL for.
    379389 */
    380390DECLINLINE(void) vbsf_dentry_chain_increase_parent_ttl(struct dentry *pDirEntry)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette