Changeset 77873 in vbox for trunk/src/VBox
- Timestamp:
- Mar 26, 2019 1:36:36 AM (6 years ago)
- Location:
- trunk/src/VBox/Additions/linux/sharedfolders
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/dirops.c
r77866 r77873 118 118 */ 119 119 /** @todo do more to invalidate dentry and inode here. */ 120 vbsf_dentry_ set_update_jiffies(dentry, jiffies + INT_MAX / 2);120 vbsf_dentry_invalidate_ttl(dentry); 121 121 sf_i->force_restat = true; 122 122 rc = -ENOENT; … … 564 564 .read = generic_read_dir, 565 565 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) 566 .llseek = generic_file_llseek566 .llseek = generic_file_llseek 567 567 #endif 568 568 }; … … 1124 1124 rc = 0; 1125 1125 } else if (rc == VERR_FILE_NOT_FOUND || rc == VERR_PATH_NOT_FOUND) { 1126 /* Probably deleted on the host while the guest had it cached, so don't complain: */ 1126 1127 LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc; calling d_drop on %p\n", 1127 1128 fDirectory, path->String.ach, rc, dentry)); 1129 sf_parent_i->force_restat = true; 1128 1130 d_drop(dentry); 1129 rc = 0; /** @todo ??? */1131 rc = 0; 1130 1132 } else { 1131 1133 LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc\n", fDirectory, path->String.ach, rc)); … … 1241 1243 pOldPath->String.ach, pNewPath->String.ach, fRename, rc)); 1242 1244 if (rc == VERR_IS_A_DIRECTORY || rc == VERR_IS_A_FILE) 1243 vbsf_dentry_ set_update_jiffies(old_dentry, jiffies + INT_MAX / 2);1245 vbsf_dentry_invalidate_ttl(old_dentry); 1244 1246 rc = -RTErrConvertToErrno(rc); 1245 1247 } … … 1323 1325 SFLOGFLOW(("vbsf_inode_symlink: Successfully created '%s' -> '%s'\n", pPath->String.ach, pTarget->String.ach)); 1324 1326 pPath = NULL; /* consumed by inode */ 1327 vbsf_dentry_chain_increase_ttl(dentry); 1325 1328 } else { 1326 1329 SFLOGFLOW(("vbsf_inode_symlink: Failed to create inode for '%s': %d\n", pPath->String.ach, rc)); 1330 vbsf_dentry_chain_increase_parent_ttl(dentry); 1331 vbsf_dentry_invalidate_ttl(dentry); 1327 1332 } 1328 1333 } else { -
trunk/src/VBox/Additions/linux/sharedfolders/lnkops.c
r77859 r77873 128 128 rc = vbsf_symlink_nls_convert(sf_g, pszTarget, PAGE_SIZE); 129 129 if (rc == 0) { 130 vbsf_dentry_chain_increase_ttl(dentry); 130 131 set_delayed_call(done, kfree_link, pszTarget); 131 132 return pszTarget; … … 164 165 SFLOGFLOW(("vbsf_readlink: %s -> %*s\n", sf_i->path->String.ach, pszTarget)); 165 166 rc = vbsf_symlink_nls_convert(sf_g, pszTarget, PAGE_SIZE); 166 if (rc == 0) 167 if (rc == 0) { 168 vbsf_dentry_chain_increase_ttl(dentry); 167 169 rc = vfs_readlink(dentry, buffer, len, pszTarget); 170 } 168 171 } else { 169 172 SFLOGFLOW(("vbsf_readlink: VbglR0SfHostReqReadLinkContigSimple failed on '%s': %Rrc\n", sf_i->path->String.ach, rc)); … … 209 212 * using the buffer we pass it here. 210 213 */ 214 vbsf_dentry_chain_increase_ttl(dentry); 211 215 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 212 216 *cookie = pszTarget; -
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77853 r77873 58 58 #endif 59 59 60 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) 60 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 61 # define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) ) 62 #elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) 61 63 # define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) ) 62 64 #endif … … 74 76 * Structures and Typedefs * 75 77 *********************************************************************************************************************************/ 76 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 78 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 79 struct vbsf_iov_iter { 80 unsigned int type; 81 unsigned int v_write : 1; 82 size_t iov_offset; 83 size_t nr_segs; 84 struct iovec const *iov; 85 # ifdef VBOX_STRICT 86 struct iovec const * const iov_org; 87 size_t const nr_segs_org; 88 # endif 89 }; 90 # ifdef VBOX_STRICT 91 # define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) { 0, a_fWrite, 0, a_cSegs, a_pIov, a_pIov, a_cSegs } 92 # else 93 # define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) { 0, a_fWrite, 0, a_cSegs, a_pIov } 94 # endif 95 # define ITER_KVEC 1 96 # define iov_iter vbsf_iov_iter 97 #endif 98 99 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 77 100 /** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */ 78 101 struct vbsf_iter_stash { … … 94 117 95 118 119 /********************************************************************************************************************************* 120 * Internal Functions * 121 *********************************************************************************************************************************/ 122 DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack); 123 124 125 /********************************************************************************************************************************* 126 * Provide more recent uio.h functionality to older kernels. * 127 *********************************************************************************************************************************/ 128 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 129 130 # undef iov_iter_count 131 # define iov_iter_count(a_pIter) vbsf_iov_iter_count(a_pIter) 132 static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter) 133 { 134 size_t cbRet = 0; 135 size_t cLeft = iter->nr_segs; 136 struct iovec const *iov = iter->iov; 137 while (cLeft-- > 0) { 138 cbRet += iov->iov_len; 139 iov++; 140 } 141 return cbRet - iter->iov_offset; 142 } 143 144 145 # undef iov_iter_single_seg_count 146 # define iov_iter_single_seg_count(a_pIter) vbsf_iov_iter_single_seg_count(a_pIter) 147 static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter) 148 { 149 if (iter->nr_segs > 0) 150 return iter->iov->iov_len - iter->iov_offset; 151 return 0; 152 } 153 154 155 # undef iov_iter_advance 156 # define iov_iter_advance(a_pIter, a_cbSkip) vbsf_iov_iter_advance(a_pIter, a_cbSkip) 157 static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip) 158 { 159 SFLOG2(("vbsf_iov_iter_advance: cbSkip=%#zx\n", cbSkip)); 160 if (iter->nr_segs > 0) { 161 size_t const cbLeftCur = iter->iov->iov_len - iter->iov_offset; 162 Assert(iter->iov_offset <= iter->iov->iov_len); 163 if (cbLeftCur > cbSkip) { 164 iter->iov_offset += cbSkip; 165 } else { 166 cbSkip -= cbLeftCur; 167 iter->iov_offset = 0; 168 iter->iov++; 169 iter->nr_segs--; 170 while (iter->nr_segs > 0) { 171 size_t const cbSeg = iter->iov->iov_len; 172 if (cbSeg > cbSkip) { 173 iter->iov_offset = cbSkip; 174 break; 175 } 176 cbSkip -= cbSeg; 177 iter->iov++; 178 iter->nr_segs--; 179 } 180 } 181 } 182 } 183 184 185 # undef iov_iter_get_pages 186 # define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \ 187 vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) 188 static ssize_t vbsf_iov_iter_get_pages(struct vbsf_iov_iter *iter, struct page **papPages, 189 size_t cbMax, unsigned cMaxPages, size_t *poffPg0) 190 { 191 while (iter->nr_segs > 0) { 192 size_t const cbLeft = iter->iov->iov_len - iter->iov_offset; 193 Assert(iter->iov->iov_len >= iter->iov_offset); 194 if (cbLeft > 0) { 195 uintptr_t uPtrFrom = (uintptr_t)iter->iov->iov_base + iter->iov_offset; 196 size_t offPg0 = *poffPg0 = uPtrFrom & PAGE_OFFSET_MASK; 197 size_t cPagesLeft = RT_ALIGN_Z(offPg0 + cbLeft, PAGE_SIZE) >> PAGE_SHIFT; 198 size_t cPages = RT_MIN(cPagesLeft, cMaxPages); 199 struct task_struct *pTask = current; 200 size_t cPagesLocked; 201 202 down_read(&pTask->mm->mmap_sem); 203 cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL); 204 up_read(&pTask->mm->mmap_sem); 205 if (cPagesLocked == cPages) { 206 size_t cbRet = (cPages << PAGE_SHIFT) - offPg0; 207 if (cPages == cPagesLeft) { 208 size_t offLastPg = (uPtrFrom + cbLeft) & PAGE_OFFSET_MASK; 209 if (offLastPg) 210 cbRet -= PAGE_SIZE - offLastPg; 211 } 212 Assert(cbRet <= cbLeft); 213 return cbRet; 214 } 215 if (cPagesLocked > 0) 216 vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/); 217 return -EFAULT; 218 } 219 iter->iov_offset = 0; 220 iter->iov++; 221 iter->nr_segs--; 222 } 223 AssertFailed(); 224 return 0; 225 } 226 227 228 # undef iov_iter_truncate 229 # define iov_iter_truncate(iter, cbNew) vbsf_iov_iter_truncate(iter, cbNew) 230 static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew) 231 { 232 /* we have no counter or stuff, so it's a no-op. */ 233 RT_NOREF(iter, cbNew); 234 } 235 236 237 # undef iov_iter_revert 238 # define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind) 239 void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind) 240 { 241 SFLOG2(("vbsf_iov_iter_revert: cbRewind=%#zx\n", cbRewind)); 242 if (iter->iov_offset > 0) { 243 if (cbRewind <= iter->iov_offset) { 244 iter->iov_offset -= cbRewind; 245 return; 246 } 247 cbRewind -= iter->iov_offset; 248 iter->iov_offset = 0; 249 } 250 251 while (cbRewind > 0) { 252 struct iovec const *pIov = --iter->iov; 253 size_t const cbSeg = pIov->iov_len; 254 iter->nr_segs++; 255 256 Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org); 257 Assert(iter->nr_segs <= iter->iter->nr_segs_org); 258 259 if (cbRewind <= cbSeg) { 260 iter->iov_offset = cbSeg - cbRewind; 261 break; 262 } 263 cbRewind -= cbSeg; 264 } 265 } 266 267 #endif /* 2.6.19 <= linux < 3.16.0 */ 268 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) 269 270 static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter) 271 { 272 size_t const cbTotal = cbToCopy; 273 Assert(iov_iter_count(pSrcIter) >= cbToCopy); 274 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 275 if (pSrcIter->type & ITER_BVEC) { 276 while (cbToCopy > 0) { 277 size_t const offPage = (uintptr_t)pbDst & PAGE_OFFSET_MASK; 278 size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy); 279 struct page *pPage = rtR0MemObjLinuxVirtToPage(pbDst); 280 size_t cbCopied = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter); 281 AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy); 282 pbDst += cbCopied; 283 cbToCopy -= cbCopied; 284 if (cbCopied != cbToCopy) 285 break; 286 } 287 } else 288 # endif 289 { 290 while (cbToCopy > 0) { 291 size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter); 292 if (cbThisCopy > 0) { 293 if (cbThisCopy > cbToCopy) 294 cbThisCopy = cbToCopy; 295 if (pSrcIter->type & ITER_KVEC) 296 memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy); 297 else if (!copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy)) 298 break; 299 pbDst += cbThisCopy; 300 cbToCopy -= cbThisCopy; 301 } 302 iov_iter_advance(pSrcIter, cbThisCopy); 303 } 304 } 305 return cbTotal - cbToCopy; 306 } 307 308 309 static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter) 310 { 311 size_t const cbTotal = cbToCopy; 312 Assert(iov_iter_count(pDstIter) >= cbToCopy); 313 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 314 if (pDstIter->type & ITER_BVEC) { 315 while (cbToCopy > 0) { 316 size_t const offPage = (uintptr_t)pbSrc & PAGE_OFFSET_MASK; 317 size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy); 318 struct page *pPage = rtR0MemObjLinuxVirtToPage((void *)pbSrc); 319 size_t cbCopied = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter); 320 AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy); 321 pbSrc += cbCopied; 322 cbToCopy -= cbCopied; 323 if (cbCopied != cbToCopy) 324 break; 325 } 326 } else 327 # endif 328 { 329 while (cbToCopy > 0) { 330 size_t cbThisCopy = iov_iter_single_seg_count(pDstIter); 331 if (cbThisCopy > 0) { 332 if (cbThisCopy > cbToCopy) 333 cbThisCopy = cbToCopy; 334 if (pDstIter->type & ITER_KVEC) 335 memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy); 336 else if (!copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy)) { 337 break; 338 } 339 pbSrc += cbThisCopy; 340 cbToCopy -= cbThisCopy; 341 } 342 iov_iter_advance(pDstIter, cbThisCopy); 343 } 344 } 345 return cbTotal - cbToCopy; 346 } 347 348 #endif /* 3.16.0 <= linux < 3.18.0 */ 349 350 351 352 /********************************************************************************************************************************* 353 * Handle management * 354 *********************************************************************************************************************************/ 96 355 97 356 /** … … 237 496 } 238 497 498 499 500 /********************************************************************************************************************************* 501 * Pipe / splice stuff for 2.6.23 >= linux < 2.6.31 (figure out why we need this) * 502 *********************************************************************************************************************************/ 239 503 240 504 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \ … … 390 654 391 655 #endif /* 2.6.23 <= LINUX_VERSION_CODE < 2.6.31 */ 656 657 658 /********************************************************************************************************************************* 659 * File operations on regular files * 660 *********************************************************************************************************************************/ 392 661 393 662 /** … … 561 830 size_t cPagesLocked; 562 831 down_read(&pTask->mm->mmap_sem); 563 cPagesLocked = get_user_pages( current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);832 cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL); 564 833 up_read(&pTask->mm->mmap_sem); 565 834 # endif … … 1167 1436 } 1168 1437 1169 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0) 1170 /* 1171 * Hide missing uio.h functionality in older kernsl. 1172 */ 1173 1174 static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter) 1175 { 1176 size_t const cbTotal = cbToCopy; 1177 Assert(iov_iter_count(pSrcIter) >= cbToCopy); 1178 if (pSrcIter->type & ITER_BVEC) { 1179 while (cbToCopy > 0) { 1180 size_t const offPage = (uintptr_t)pbDst & PAGE_OFFSET_MASK; 1181 size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy); 1182 struct page *pPage = rtR0MemObjLinuxVirtToPage(pbDst); 1183 size_t cbCopied = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter); 1184 AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy); 1185 pbDst += cbCopied; 1186 cbToCopy -= cbCopied; 1187 if (cbCopied != cbToCopy) 1188 break; 1189 } 1190 } else { 1191 while (cbToCopy > 0) { 1192 size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter); 1193 if (cbThisCopy > 0) { 1194 if (cbThisCopy > cbToCopy) 1195 cbThisCopy = cbToCopy; 1196 if (pSrcIter->type & ITER_KVEC) 1197 memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy); 1198 else if (!copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy)) 1199 break; 1200 pbDst += cbThisCopy; 1201 cbToCopy -= cbThisCopy; 1202 } 1203 iov_iter_advance(pSrcIter, cbThisCopy); 1204 } 1205 } 1206 return cbTotal - cbToCopy; 1207 } 1208 1209 static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter) 1210 { 1211 size_t const cbTotal = cbToCopy; 1212 Assert(iov_iter_count(pDstIter) >= cbToCopy); 1213 if (pDstIter->type & ITER_BVEC) { 1214 while (cbToCopy > 0) { 1215 size_t const offPage = (uintptr_t)pbSrc & PAGE_OFFSET_MASK; 1216 size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy); 1217 struct page *pPage = rtR0MemObjLinuxVirtToPage((void *)pbSrc); 1218 size_t cbCopied = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter); 1219 AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy); 1220 pbSrc += cbCopied; 1221 cbToCopy -= cbCopied; 1222 if (cbCopied != cbToCopy) 1223 break; 1224 } 1225 } else { 1226 while (cbToCopy > 0) { 1227 size_t cbThisCopy = iov_iter_single_seg_count(pDstIter); 1228 if (cbThisCopy > 0) { 1229 if (cbThisCopy > cbToCopy) 1230 cbThisCopy = cbToCopy; 1231 if (pDstIter->type & ITER_KVEC) 1232 memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy); 1233 else if (!copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy)) { 1234 break; 1235 } 1236 pbSrc += cbThisCopy; 1237 cbToCopy -= cbThisCopy; 1238 } 1239 iov_iter_advance(pDstIter, cbThisCopy); 1240 } 1241 } 1242 return cbTotal - cbToCopy; 1243 } 1244 1245 #endif /* 3.16.0 >= linux < 3.18.0 */ 1246 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1438 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 1247 1439 1248 1440 /** … … 1504 1696 } 1505 1697 1506 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 1698 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 1507 1699 iov_iter_revert(iter, cbToRewind + cbExtra); 1508 1700 return true; … … 1534 1726 { 1535 1727 size_t cPages; 1728 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1536 1729 if (iter_is_iovec(iter) || (iter->type & ITER_KVEC)) { 1730 #endif 1537 1731 const struct iovec *pCurIov = iter->iov; 1538 1732 size_t cLeft = iter->nr_segs; … … 1592 1786 if (cPagesSpan > cPages) 1593 1787 cPages = cPagesSpan; 1594 } else { 1788 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1789 } else { 1595 1790 /* Won't bother with accurate counts for the next two types, just make 1596 1791 some rough estimates (does pipes have segments?): */ … … 1598 1793 cPages = (iov_iter_count(iter) + (PAGE_SIZE * 2 - 2) * cSegs) >> PAGE_SHIFT; 1599 1794 } 1795 # endif 1600 1796 SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages)); 1601 1797 return cPages; … … 1729 1925 * @param iter The I/O vector iterator describing the buffer. 1730 1926 */ 1927 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1731 1928 static ssize_t vbsf_reg_read_iter(struct kiocb *kio, struct iov_iter *iter) 1732 { 1929 # else 1930 static ssize_t vbsf_reg_aio_read(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile) 1931 # endif 1932 { 1933 # if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 1934 struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 0 /*write*/); 1935 struct vbsf_iov_iter *iter = &fake_iter; 1936 # endif 1733 1937 size_t cbToRead = iov_iter_count(iter); 1734 1938 struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode; … … 1754 1958 * mappings around with any kind of pages loaded. 1755 1959 */ 1756 if (vbsf_should_use_cached_read(kio->ki_filp, mapping, sf_g)) 1960 if (vbsf_should_use_cached_read(kio->ki_filp, mapping, sf_g)) { 1961 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1757 1962 return generic_file_read_iter(kio, iter); 1963 # else 1964 return generic_file_aio_read(kio, iov, cSegs, offFile); 1965 # endif 1966 } 1758 1967 1759 1968 /* … … 1929 2138 1930 2139 1931 1932 2140 /** 1933 2141 * Write from I/O vector iterator. … … 1937 2145 * @param iter The I/O vector iterator describing the buffer. 1938 2146 */ 2147 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1939 2148 static ssize_t vbsf_reg_write_iter(struct kiocb *kio, struct iov_iter *iter) 1940 { 2149 # else 2150 static ssize_t vbsf_reg_aio_write(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile) 2151 # endif 2152 { 2153 # if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 2154 struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 1 /*write*/); 2155 struct vbsf_iov_iter *iter = &fake_iter; 2156 # endif 1941 2157 size_t cbToWrite = iov_iter_count(iter); 1942 2158 struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode; … … 1946 2162 struct vbsf_reg_info *sf_r = kio->ki_filp->private_data; 1947 2163 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb); 2164 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1948 2165 loff_t offFile = kio->ki_pos; 2166 # endif 1949 2167 1950 2168 SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n", … … 1957 2175 /** @todo This should be handled by the host, it returning the new file 1958 2176 * offset when appending. We may have an outdated i_size value here! */ 1959 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)2177 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) 1960 2178 if (kio->ki_flags & IOCB_APPEND) 1961 # else2179 # else 1962 2180 if (kio->ki_filp->f_flags & O_APPEND) 1963 # endif2181 # endif 1964 2182 kio->ki_pos = offFile = i_size_read(inode); 1965 2183 … … 1985 2203 && mapping->nrpages > 0 1986 2204 && mapping_writably_mapped(mapping)) { 1987 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)2205 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) 1988 2206 int err = filemap_fdatawait_range(mapping, offFile, offFile + cbToWrite - 1); 1989 2207 if (err) 1990 2208 return err; 1991 # else2209 # else 1992 2210 /** @todo ... */ 1993 # endif2211 # endif 1994 2212 } 1995 2213 … … 2038 2256 } 2039 2257 2040 #endif /* >= 3.16.0*/2258 #endif /* >= 2.6.19 */ 2041 2259 2042 2260 /** … … 2127 2345 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb); 2128 2346 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode); 2129 struct vbsf_reg_info *sf_r;2130 struct dentry *dentry = VBSF_GET_F_DENTRY(file);2131 VBOXSFCREATEREQ *pReq;2347 struct dentry *dentry = VBSF_GET_F_DENTRY(file); 2348 struct vbsf_reg_info *sf_r; 2349 VBOXSFCREATEREQ *pReq; 2132 2350 2133 2351 SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL)); … … 2265 2483 } 2266 2484 2485 2267 2486 /** 2268 2487 * Wrapper around generic/default seek function that ensures that we've got … … 2299 2518 #endif 2300 2519 } 2520 2301 2521 2302 2522 /** … … 2416 2636 } 2417 2637 #endif /* > 4.5 */ 2638 2418 2639 2419 2640 #ifdef SFLOG_ENABLED … … 2596 2817 .read_iter = vbsf_reg_read_iter, 2597 2818 .write_iter = vbsf_reg_write_iter, 2819 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 2820 .aio_read = vbsf_reg_aio_read, 2821 .aio_write = vbsf_reg_aio_write, 2598 2822 #endif 2599 2823 .release = vbsf_reg_release, … … 2603 2827 .mmap = generic_file_mmap, 2604 2828 #endif 2605 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 2606 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 2829 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 2607 2830 /** @todo This code is known to cause caching of data which should not be 2608 * cached. Investigate. */ 2831 * cached. Investigate -- 2832 * bird: Part of this was using generic page cache functions for 2833 * implementing .aio_read/write. Fixed that (see above). */ 2609 2834 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) 2610 2835 .splice_read = vbsf_splice_read, 2611 2836 # else 2612 2837 .sendfile = generic_file_sendfile, 2613 # endif2614 .aio_read = generic_file_aio_read,2615 .aio_write = generic_file_aio_write,2616 2838 # endif 2617 2839 #endif … … 2623 2845 }; 2624 2846 2847 2848 /** 2849 * Inodes operations for regular files. 2850 */ 2625 2851 struct inode_operations vbsf_reg_iops = { 2626 2852 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18) 2627 .getattr = vbsf_inode_getattr,2853 .getattr = vbsf_inode_getattr, 2628 2854 #else 2629 2855 .revalidate = vbsf_inode_revalidate, 2630 2856 #endif 2631 .setattr = vbsf_inode_setattr,2857 .setattr = vbsf_inode_setattr, 2632 2858 }; 2859 2860 2861 2862 /********************************************************************************************************************************* 2863 * Address Space Operations on Regular Files (for mmap) * 2864 *********************************************************************************************************************************/ 2633 2865 2634 2866 … … 2769 3001 return err; 2770 3002 } 3003 2771 3004 2772 3005 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) … … 2794 3027 } 2795 3028 # endif /* KERNEL_VERSION >= 2.6.24 */ 3029 2796 3030 2797 3031 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10) … … 2831 3065 */ 2832 3066 struct address_space_operations vbsf_reg_aops = { 2833 .readpage = vbsf_readpage,2834 .writepage = vbsf_writepage,3067 .readpage = vbsf_readpage, 3068 .writepage = vbsf_writepage, 2835 3069 /** @todo Need .writepages if we want msync performance... */ 2836 3070 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) … … 2838 3072 # endif 2839 3073 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 2840 .write_begin = vbsf_write_begin,2841 .write_end = simple_write_end,3074 .write_begin = vbsf_write_begin, 3075 .write_end = simple_write_end, 2842 3076 # else 2843 .prepare_write = simple_prepare_write,2844 .commit_write = simple_commit_write,3077 .prepare_write = simple_prepare_write, 3078 .commit_write = simple_commit_write, 2845 3079 # endif 2846 3080 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10) 2847 .direct_IO = vbsf_direct_IO,3081 .direct_IO = vbsf_direct_IO, 2848 3082 # endif 2849 3083 }; -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h
r77863 r77873 356 356 357 357 /** 358 * Invalidates the update TTL for the given directory entry so that it is 359 * revalidate the next time it is used. 360 * @param pDirEntry The directory entry cache entry to invalidate. 361 */ 362 DECLINLINE(void) vbsf_dentry_invalidate_ttl(struct dentry *pDirEntry) 363 { 364 vbsf_dentry_set_update_jiffies(pDirEntry, jiffies - INT32_MAX / 2); 365 } 366 367 /** 358 368 * Increase the time-to-live of @a pDirEntry and all ancestors. 359 * @param pDirEntry 360 * we should increase the TTL for.369 * @param pDirEntry The directory entry cache entry which ancestors 370 * we should increase the TTL for. 361 371 */ 362 372 DECLINLINE(void) vbsf_dentry_chain_increase_ttl(struct dentry *pDirEntry) … … 375 385 /** 376 386 * Increase the time-to-live of all ancestors. 377 * @param pDirEntry 378 * we should increase the TTL for.387 * @param pDirEntry The directory entry cache entry which ancestors 388 * we should increase the TTL for. 379 389 */ 380 390 DECLINLINE(void) vbsf_dentry_chain_increase_parent_ttl(struct dentry *pDirEntry)
Note:
See TracChangeset
for help on using the changeset viewer.