Changeset 77458 in vbox for trunk/src/VBox/Additions/linux
- Timestamp:
- Feb 25, 2019 2:40:13 PM (6 years ago)
- svn:sync-xref-src-repo-rev:
- 129031
- Location:
- trunk/src/VBox/Additions/linux/sharedfolders
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/dirops.c
r77303 r77458 432 432 sf_new_i->handle = SHFL_HANDLE_NIL; 433 433 sf_new_i->force_reread = 0; 434 RTListInit(&sf_new_i->HandleList); 435 #ifdef VBOX_STRICT 436 sf_new_i->u32Magic = SF_INODE_INFO_MAGIC; 437 #endif 434 438 435 439 ino = iunique(parent->i_sb, 1); … … 519 523 520 524 sf_init_inode(sf_g, inode, info); 525 521 526 sf_new_i->path = path; 522 SET_INODE_INFO(inode, sf_new_i);527 RTListInit(&sf_new_i->HandleList); 523 528 sf_new_i->force_restat = 1; 524 529 sf_new_i->force_reread = 0; 530 #ifdef VBOX_STRICT 531 sf_new_i->u32Magic = SF_INODE_INFO_MAGIC; 532 #endif 533 SET_INODE_INFO(inode, sf_new_i); 525 534 526 535 d_instantiate(dentry, inode); -
trunk/src/VBox/Additions/linux/sharedfolders/files_vboxsf
r77391 r77458 30 30 ${PATH_ROOT}/include/iprt/fs.h=>include/iprt/fs.h \ 31 31 ${PATH_ROOT}/include/iprt/latin1.h=>include/iprt/latin1.h \ 32 ${PATH_ROOT}/include/iprt/list.h=>include/iprt/list.h \ 32 33 ${PATH_ROOT}/include/iprt/log.h=>include/iprt/log.h \ 33 34 ${PATH_ROOT}/include/iprt/mangling.h=>include/iprt/mangling.h \ -
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77444 r77458 73 73 #endif /* < 2.6.0 */ 74 74 75 76 /** 77 * Called when an inode is released to unlink all handles that might impossibly 78 * still be associated with it. 79 * 80 * @param pInodeInfo The inode which handles to drop. 81 */ 82 void sf_handle_drop_chain(struct sf_inode_info *pInodeInfo) 83 { 84 struct sf_handle *pCur, *pNext; 85 unsigned long fSavedFlags; 86 SFLOGFLOW(("sf_handle_drop_chain: %p\n", pInodeInfo)); 87 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); 88 89 RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct sf_handle, Entry) { 90 AssertMsg((pCur->fFlags & (SF_HANDLE_F_MAGIC_MASK | SF_HANDLE_F_ON_LIST)) == (SF_HANDLE_F_MAGIC | SF_HANDLE_F_ON_LIST), 91 ("%p %#x\n", pCur, pCur->fFlags)); 92 pCur->fFlags |= SF_HANDLE_F_ON_LIST; 93 RTListNodeRemove(&pCur->Entry); 94 } 95 96 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); 97 } 98 99 100 /** 101 * Locates a handle that matches all the flags in @a fFlags. 102 * 103 * @returns Pointer to handle on success (retained), use sf_handle_release() to 104 * release it. NULL if no suitable handle was found. 105 * @param pInodeInfo The inode info to search. 106 * @param fFlagsSet The flags that must be set. 107 * @param fFlagsClear The flags that must be clear. 108 */ 109 struct sf_handle *sf_handle_find(struct sf_inode_info *pInodeInfo, uint32_t fFlagsSet, uint32_t fFlagsClear) 110 { 111 struct sf_handle *pCur; 112 unsigned long fSavedFlags; 113 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); 114 115 RTListForEach(&pInodeInfo->HandleList, pCur, struct sf_handle, Entry) { 116 AssertMsg((pCur->fFlags & (SF_HANDLE_F_MAGIC_MASK | SF_HANDLE_F_ON_LIST)) == (SF_HANDLE_F_MAGIC | SF_HANDLE_F_ON_LIST), 117 ("%p %#x\n", pCur, pCur->fFlags)); 118 if ((pCur->fFlags & (fFlagsSet | fFlagsClear)) == fFlagsSet) { 119 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs); 120 if (cRefs > 1) { 121 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); 122 SFLOGFLOW(("sf_handle_find: returns %p\n", pCur)); 123 return pCur; 124 } 125 /* Oops, already being closed (safe as it's only ever increased here). */ 126 ASMAtomicDecU32(&pCur->cRefs); 127 } 128 } 129 130 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); 131 SFLOGFLOW(("sf_handle_find: returns NULL!\n")); 132 return NULL; 133 } 134 135 136 /** 137 * Slow worker for sf_handle_release() that does the freeing. 138 * 139 * @returns 0 (ref count). 140 * @param pHandle The handle to release. 141 * @param sf_g The info structure for the shared folder associated 142 * with the handle. 143 * @param pszCaller The caller name (for logging failures). 144 */ 145 uint32_t sf_handle_release_slow(struct sf_handle *pHandle, struct sf_glob_info *sf_g, const char *pszCaller) 146 { 147 int rc; 148 unsigned long fSavedFlags; 149 150 SFLOGFLOW(("sf_handle_release_slow: %p (%s)\n", pHandle, pszCaller)); 151 152 /* 153 * Remove from the list. 154 */ 155 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); 156 157 AssertMsg((pHandle->fFlags & SF_HANDLE_F_MAGIC_MASK) == SF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags)); 158 Assert(pHandle->pInodeInfo); 159 Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); 160 161 if (pHandle->fFlags & SF_HANDLE_F_ON_LIST) { 162 pHandle->fFlags &= ~SF_HANDLE_F_ON_LIST; 163 RTListNodeRemove(&pHandle->Entry); 164 } 165 166 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); 167 168 /* 169 * Actually destroy it. 170 */ 171 rc = VbglR0SfHostReqCloseSimple(sf_g->map.root, pHandle->hHost); 172 if (RT_FAILURE(rc)) 173 LogFunc(("Caller %s: VbglR0SfHostReqCloseSimple %#RX64 failed with rc=%Rrc\n", pszCaller, pHandle->hHost, rc)); 174 pHandle->hHost = SHFL_HANDLE_NIL; 175 pHandle->fFlags = SF_HANDLE_F_MAGIC_DEAD; 176 kfree(pHandle); 177 return 0; 178 } 179 180 181 /** 182 * Appends a handle to a handle list. 183 * 184 * @param pInodeInfo The inode to add it to. 185 * @param pHandle The handle to add. 186 */ 187 void sf_handle_append(struct sf_inode_info *pInodeInfo, struct sf_handle *pHandle) 188 { 189 #ifdef VBOX_STRICT 190 struct sf_handle *pCur; 191 #endif 192 unsigned long fSavedFlags; 193 194 SFLOGFLOW(("sf_handle_append: %p (to %p)\n", pHandle, pInodeInfo)); 195 AssertMsg((pHandle->fFlags & (SF_HANDLE_F_MAGIC_MASK | SF_HANDLE_F_ON_LIST)) == SF_HANDLE_F_MAGIC, 196 ("%p %#x\n", pHandle, pHandle->fFlags)); 197 Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); 198 199 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); 200 201 AssertMsg((pHandle->fFlags & (SF_HANDLE_F_MAGIC_MASK | SF_HANDLE_F_ON_LIST)) == SF_HANDLE_F_MAGIC, 202 ("%p %#x\n", pHandle, pHandle->fFlags)); 203 #ifdef VBOX_STRICT 204 RTListForEach(&pInodeInfo->HandleList, pCur, struct sf_handle, Entry) { 205 Assert(pCur != pHandle); 206 AssertMsg((pCur->fFlags & (SF_HANDLE_F_MAGIC_MASK | SF_HANDLE_F_ON_LIST)) == (SF_HANDLE_F_MAGIC | SF_HANDLE_F_ON_LIST), 207 ("%p %#x\n", pCur, pCur->fFlags)); 208 } 209 pHandle->pInodeInfo = pInodeInfo; 210 #endif 211 212 pHandle->fFlags |= SF_HANDLE_F_ON_LIST; 213 RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry); 214 215 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags); 216 } 75 217 76 218 … … 131 273 uint32_t * nread, uint64_t pos) 132 274 { 133 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r-> handle,275 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->Handle.hHost, 134 276 pos, nread, buf, false /* already locked? */ ); 135 277 if (RT_FAILURE(rc)) { … … 390 532 * Issue the request and unlock the pages. 391 533 */ 392 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r-> handle, offFile, cbChunk, cPages);534 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages); 393 535 394 536 sf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/); … … 457 599 struct address_space *mapping = inode->i_mapping; 458 600 459 TRACE(); 601 SFLOGFLOW(("sf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off)); 602 460 603 if (!S_ISREG(inode->i_mode)) { 461 604 LogFunc(("read from non regular file %d\n", inode->i_mode)); … … 491 634 && (PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) { 492 635 ssize_t cbRet; 493 int vrc = VbglR0SfHostReqReadEmbedded(sf_g->map.root, pReq, sf_r-> handle, *off, (uint32_t)size);636 int vrc = VbglR0SfHostReqReadEmbedded(sf_g->map.root, pReq, sf_r->Handle.hHost, *off, (uint32_t)size); 494 637 if (RT_SUCCESS(vrc)) { 495 638 cbRet = pReq->Parms.cb32Read.u.value32; … … 518 661 if (pReq) { 519 662 ssize_t cbRet; 520 int vrc = VbglR0SfHostReqReadContig(sf_g->map.root, pReq, sf_r-> handle, *off, (uint32_t)size,521 663 int vrc = VbglR0SfHostReqReadContig(sf_g->map.root, pReq, sf_r->Handle.hHost, *off, 664 (uint32_t)size, pvBounce, virt_to_phys(pvBounce)); 522 665 if (RT_SUCCESS(vrc)) { 523 666 cbRet = pReq->Parms.cb32Read.u.value32; … … 629 772 * Issue the request and unlock the pages. 630 773 */ 631 rc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r-> handle, offFile, cbChunk, cPages);774 rc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages); 632 775 633 776 sf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/); … … 702 845 loff_t pos; 703 846 704 TRACE();847 SFLOGFLOW(("sf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off)); 705 848 BUG_ON(!sf_i); 706 849 BUG_ON(!sf_g); … … 753 896 ssize_t cbRet; 754 897 if (copy_from_user(pReq->abData, buf, size) == 0) { 755 int vrc = VbglR0SfHostReqWriteEmbedded(sf_g->map.root, pReq, sf_r->handle, pos, (uint32_t)size); 898 int vrc = VbglR0SfHostReqWriteEmbedded(sf_g->map.root, pReq, sf_r->Handle.hHost, 899 pos, (uint32_t)size); 756 900 if (RT_SUCCESS(vrc)) { 757 901 cbRet = pReq->Parms.cb32Write.u.value32; … … 832 976 SHFLCREATEPARMS *pCreateParms; /* temp glue */ 833 977 834 TRACE(); 978 SFLOGFLOW(("sf_reg_open: inode=%p file=%p flags=%#x %s\n", 979 inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL)); 835 980 BUG_ON(!sf_g); 836 981 BUG_ON(!sf_i); 837 838 LogFunc(("open %s\n", sf_i->path->String.utf8));839 982 840 983 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL); … … 843 986 return -ENOMEM; 844 987 } 988 989 RTListInit(&sf_r->Handle.Entry); 990 sf_r->Handle.cRefs = 1; 991 sf_r->Handle.fFlags = SF_HANDLE_F_FILE | SF_HANDLE_F_MAGIC; 992 sf_r->Handle.hHost = SHFL_HANDLE_NIL; 845 993 846 994 /* Already open? */ … … 852 1000 */ 853 1001 sf_i->force_restat = 1; 854 sf_r-> handle= sf_i->handle;1002 sf_r->Handle.hHost = sf_i->handle; 855 1003 sf_i->handle = SHFL_HANDLE_NIL; 856 sf_i->file = file;857 1004 file->private_data = sf_r; 1005 1006 sf_r->Handle.fFlags |= SF_HANDLE_F_READ | SF_HANDLE_F_WRITE; /** @todo check */ 1007 sf_handle_append(sf_i, &sf_r->Handle); 1008 SFLOGFLOW(("sf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost)); 858 1009 return 0; 859 1010 } … … 898 1049 case O_RDONLY: 899 1050 pCreateParms->CreateFlags |= SHFL_CF_ACCESS_READ; 1051 sf_r->Handle.fFlags |= SF_HANDLE_F_READ; 900 1052 break; 901 1053 902 1054 case O_WRONLY: 903 1055 pCreateParms->CreateFlags |= SHFL_CF_ACCESS_WRITE; 1056 sf_r->Handle.fFlags |= SF_HANDLE_F_WRITE; 904 1057 break; 905 1058 906 1059 case O_RDWR: 907 1060 pCreateParms->CreateFlags |= SHFL_CF_ACCESS_READWRITE; 1061 sf_r->Handle.fFlags |= SF_HANDLE_F_READ | SF_HANDLE_F_WRITE; 908 1062 break; 909 1063 … … 915 1069 LogFunc(("O_APPEND set\n")); 916 1070 pCreateParms->CreateFlags |= SHFL_CF_ACCESS_APPEND; 1071 sf_r->Handle.fFlags |= SF_HANDLE_F_APPEND; 917 1072 } 918 1073 … … 942 1097 943 1098 sf_i->force_restat = 1; 944 sf_r->handle = pCreateParms->Handle; 945 sf_i->file = file; 1099 sf_r->Handle.hHost = pCreateParms->Handle; 946 1100 file->private_data = sf_r; 1101 sf_handle_append(sf_i, &sf_r->Handle); 947 1102 VbglR0PhysHeapFree(pReq); 1103 SFLOGFLOW(("sf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost)); 948 1104 return rc_linux; 949 1105 } 1106 950 1107 951 1108 /** … … 958 1115 static int sf_reg_release(struct inode *inode, struct file *file) 959 1116 { 960 int rc;961 1117 struct sf_reg_info *sf_r; 962 1118 struct sf_glob_info *sf_g; 963 1119 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 964 1120 965 TRACE();1121 SFLOGFLOW(("sf_reg_release: inode=%p file=%p\n", inode, file)); 966 1122 sf_g = GET_GLOB_INFO(inode->i_sb); 967 1123 sf_r = file->private_data; … … 980 1136 filemap_fdatawait(inode->i_mapping); 981 1137 #endif 982 rc = VbglR0SfHostReqCloseSimple(sf_g->map.root, sf_r->handle); 983 if (RT_FAILURE(rc)) 984 LogFunc(("VbglR0SfHostReqCloseSimple failed rc=%Rrc\n", rc)); 985 sf_r->handle = SHFL_HANDLE_NIL; 986 987 kfree(sf_r); 988 sf_i->file = NULL; 1138 1139 /* Release sf_r, closing the handle if we're the last user. */ 1140 file->private_data = NULL; 1141 sf_handle_release(&sf_r->Handle, sf_g, "sf_reg_release"); 1142 989 1143 sf_i->handle = SHFL_HANDLE_NIL; 990 file->private_data = NULL;991 1144 return 0; 992 1145 } … … 1003 1156 static loff_t sf_reg_llseek(struct file *file, loff_t off, int whence) 1004 1157 { 1158 SFLOGFLOW(("sf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence)); 1159 1005 1160 switch (whence) { 1006 1161 #ifdef SEEK_HOLE … … 1010 1165 case SEEK_END: { 1011 1166 struct sf_reg_info *sf_r = file->private_data; 1012 int rc = sf_inode_revalidate_with_handle(GET_F_DENTRY(file), sf_r-> handle, true /*fForce*/);1167 int rc = sf_inode_revalidate_with_handle(GET_F_DENTRY(file), sf_r->Handle.hHost, true /*fForce*/); 1013 1168 if (rc == 0) 1014 1169 break; … … 1128 1283 int err; 1129 1284 1130 TRACE(); 1285 SFLOGFLOW(("sf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT)); 1286 1131 1287 if (!is_bad_inode(inode)) { 1132 1288 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); … … 1141 1297 vrc = VbglR0SfHostReqReadPgLst(sf_g->map.root, 1142 1298 pReq, 1143 sf_r-> handle,1299 sf_r->Handle.hHost, 1144 1300 (uint64_t)page->index << PAGE_SHIFT, 1145 1301 PAGE_SIZE, … … 1173 1329 } 1174 1330 1331 1175 1332 /** 1176 1333 * Used to write out the content of a dirty page cache page to the host file. … … 1181 1338 static int sf_writepage(struct page *page, struct writeback_control *wbc) 1182 1339 { 1183 int err; 1184 1185 TRACE(); 1186 1187 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); 1188 if (pReq) { 1189 struct address_space *mapping = page->mapping; 1190 struct inode *inode = mapping->host; 1191 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 1192 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 1193 struct file *file = sf_i->file; /** @todo r=bird: This isn't quite sane wrt readonly vs writeable. */ 1194 struct sf_reg_info *sf_r = file->private_data; 1195 uint64_t const cbFile = i_size_read(inode); 1196 uint64_t const offInFile = (uint64_t)page->index << PAGE_SHIFT; 1197 uint32_t const cbToWrite = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE 1198 : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK; 1199 int vrc; 1200 1201 pReq->PgLst.offFirstPage = 0; 1202 pReq->PgLst.aPages[0] = page_to_phys(page); 1203 vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, 1204 pReq, 1205 sf_r->handle, 1206 offInFile, 1207 cbToWrite, 1208 1 /*cPages*/); 1209 AssertMsgStmt(pReq->Parms.cb32Write.u.value32 == cbToWrite || RT_FAILURE(vrc), /* lazy bird */ 1210 ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite), 1211 vrc = VERR_WRITE_ERROR); 1212 VbglR0PhysHeapFree(pReq); 1213 1214 if (RT_SUCCESS(vrc)) { 1215 /* Update the inode if we've extended the file. */ 1216 /** @todo is this necessary given the cbToWrite calc above? */ 1217 uint64_t const offEndOfWrite = offInFile + cbToWrite; 1218 if ( offEndOfWrite > cbFile 1219 && offEndOfWrite > i_size_read(inode)) 1220 i_size_write(inode, offEndOfWrite); 1221 1222 if (PageError(page)) 1223 ClearPageError(page); 1224 1225 err = 0; 1226 } else { 1227 ClearPageUptodate(page); 1228 err = -EPROTO; 1229 } 1230 } else 1231 err = -ENOMEM; 1340 struct address_space *mapping = page->mapping; 1341 struct inode *inode = mapping->host; 1342 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 1343 struct sf_handle *pHandle = sf_handle_find(sf_i, SF_HANDLE_F_WRITE, SF_HANDLE_F_APPEND); 1344 int err; 1345 1346 SFLOGFLOW(("sf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n", 1347 inode, page,(uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle->hHost)); 1348 1349 if (pHandle) { 1350 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 1351 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); 1352 if (pReq) { 1353 uint64_t const cbFile = i_size_read(inode); 1354 uint64_t const offInFile = (uint64_t)page->index << PAGE_SHIFT; 1355 uint32_t const cbToWrite = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE 1356 : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK; 1357 int vrc; 1358 1359 pReq->PgLst.offFirstPage = 0; 1360 pReq->PgLst.aPages[0] = page_to_phys(page); 1361 vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, 1362 pReq, 1363 pHandle->hHost, 1364 offInFile, 1365 cbToWrite, 1366 1 /*cPages*/); 1367 AssertMsgStmt(pReq->Parms.cb32Write.u.value32 == cbToWrite || RT_FAILURE(vrc), /* lazy bird */ 1368 ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite), 1369 vrc = VERR_WRITE_ERROR); 1370 VbglR0PhysHeapFree(pReq); 1371 1372 if (RT_SUCCESS(vrc)) { 1373 /* Update the inode if we've extended the file. */ 1374 /** @todo is this necessary given the cbToWrite calc above? */ 1375 uint64_t const offEndOfWrite = offInFile + cbToWrite; 1376 if ( offEndOfWrite > cbFile 1377 && offEndOfWrite > i_size_read(inode)) 1378 i_size_write(inode, offEndOfWrite); 1379 1380 if (PageError(page)) 1381 ClearPageError(page); 1382 1383 err = 0; 1384 } else { 1385 ClearPageUptodate(page); 1386 err = -EPROTO; 1387 } 1388 } else 1389 err = -ENOMEM; 1390 sf_handle_release(pHandle, sf_g, "sf_writepage"); 1391 } else { 1392 static uint64_t volatile s_cCalls = 0; 1393 if (s_cCalls++ < 16) 1394 printk("sf_writepage: no writable handle for %s..\n", sf_i->path->String.ach); 1395 err = -EPROTO; 1396 } 1232 1397 unlock_page(page); 1233 1398 return err; … … 1235 1400 1236 1401 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 1237 1238 # if 0 /* unused - see below */1239 static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,1240 struct sf_reg_info *sf_r, void *buf,1241 uint32_t * nwritten, uint64_t pos)1242 {1243 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is1244 * contiguous in physical memory (kmalloc or single page), we should1245 * use a physical address here to speed things up. */1246 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle,1247 pos, nwritten, buf,1248 false /* already locked? */ );1249 if (RT_FAILURE(rc)) {1250 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n",1251 caller, rc));1252 return -EPROTO;1253 }1254 return 0;1255 }1256 # endif1257 1258 1402 /** 1259 1403 * Called when writing thru the page cache (which we shouldn't be doing). … … 1279 1423 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata); 1280 1424 } 1281 1282 /**1283 * Called to complete a write thru the page cache (which we shouldn't be doing).1284 */1285 int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,1286 unsigned len, unsigned copied, struct page *page, void *fsdata)1287 {1288 # if 0 /** @todo r=bird: See sf_write_begin. */1289 struct inode *inode = mapping->host;1290 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);1291 struct sf_reg_info *sf_r = file->private_data;1292 void *buf;1293 unsigned from = pos & (PAGE_SIZE - 1);1294 uint32_t nwritten = len;1295 int err;1296 1297 TRACE();1298 1299 buf = kmap(page);1300 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf + from, &nwritten, pos);1301 kunmap(page);1302 1303 if (err >= 0) {1304 if (!PageUptodate(page) && nwritten == PAGE_SIZE)1305 SetPageUptodate(page);1306 1307 pos += nwritten;1308 if (pos > inode->i_size)1309 inode->i_size = pos;1310 }1311 1312 unlock_page(page);1313 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)1314 put_page(page);1315 # else1316 page_cache_release(page);1317 # endif1318 return nwritten;1319 # else1320 return simple_write_end(file, mapping, pos, len, copied, page, fsdata);1321 # endif1322 }1323 1324 1425 # endif /* KERNEL_VERSION >= 2.6.24 */ 1325 1426 … … 1357 1458 .readpage = sf_readpage, 1358 1459 .writepage = sf_writepage, 1460 /** @todo Need .writepages if we want msync performance... */ 1359 1461 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) 1360 1462 .set_page_dirty = __set_page_dirty_buffers, … … 1362 1464 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 1363 1465 .write_begin = sf_write_begin, 1364 .write_end = s f_write_end,1466 .write_end = simple_write_end, 1365 1467 # else 1366 1468 .prepare_write = simple_prepare_write, -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.c
r77419 r77458 59 59 VBGLSFCLIENT client_handle; 60 60 VBGLSFCLIENT g_SfClient; /* temporary? */ 61 61 62 uint32_t g_fHostFeatures = 0; /* temporary? */ 63 64 spinlock_t g_SfHandleLock; 65 62 66 63 67 /* forward declarations */ … … 292 296 sf_i->path->String.utf8[1] = 0; 293 297 sf_i->force_reread = 0; 298 RTListInit(&sf_i->HandleList); 299 #ifdef VBOX_STRICT 300 sf_i->u32Magic = SF_INODE_INFO_MAGIC; 301 #endif 294 302 295 303 err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); … … 409 417 return; 410 418 419 Assert(sf_i->u32Magic == SF_INODE_INFO_MAGIC); 411 420 BUG_ON(!sf_i->path); 412 421 kfree(sf_i->path); 422 sf_handle_drop_chain(sf_i); 423 # ifdef VBOX_STRICT 424 sf_i->u32Magic = SF_INODE_INFO_MAGIC_DEAD; 425 # endif 413 426 kfree(sf_i); 414 427 SET_INODE_INFO(inode, NULL); … … 431 444 return; 432 445 446 Assert(sf_i->u32Magic == SF_INODE_INFO_MAGIC); 433 447 BUG_ON(!sf_i->path); 434 448 kfree(sf_i->path); 449 sf_handle_drop_chain(sf_i); 450 # ifdef VBOX_STRICT 451 sf_i->u32Magic = SF_INODE_INFO_MAGIC_DEAD; 452 # endif 435 453 kfree(sf_i); 436 454 SET_INODE_INFO(inode, NULL); … … 627 645 } 628 646 647 /** @todo Init order is wrong, file system reigstration is the very last 648 * thing we should do. */ 649 spin_lock_init(&g_SfHandleLock); 629 650 err = register_filesystem(&vboxsf_fs_type); 630 651 if (err) { -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h
r77419 r77458 35 35 #endif 36 36 37 #if 0 /* Enables strict checks. */ 38 # define RT_STRICT 39 # define VBOX_STRICT 40 #endif 41 37 42 #define LOG_GROUP LOG_GROUP_SHARED_FOLDERS 38 43 #include "the-linux-kernel.h" 44 #include <iprt/list.h> 45 #include <iprt/asm.h> 39 46 #include <VBox/log.h> 40 47 … … 69 76 }; 70 77 71 /* per-inode information */ 78 /** 79 * For associating inodes with host handles. 80 * 81 * This is necessary for address_space_operations::sf_writepage and allows 82 * optimizing stat, lookups and other operations on open files and directories. 83 */ 84 struct sf_handle { 85 /** List entry (head sf_inode_info::HandleList). */ 86 RTLISTNODE Entry; 87 /** Host file/whatever handle. */ 88 SHFLHANDLE hHost; 89 /** SF_HANDLE_F_XXX */ 90 uint32_t fFlags; 91 /** Reference counter. 92 * Close the handle and free the structure when it reaches zero. */ 93 uint32_t volatile cRefs; 94 #ifdef VBOX_STRICT 95 /** For strictness checks. */ 96 struct sf_inode_info *pInodeInfo; 97 #endif 98 }; 99 100 /** @name SF_HANDLE_F_XXX - Handle summary flags (sf_handle::fFlags). 101 * @{ */ 102 #define SF_HANDLE_F_READ UINT32_C(0x00000001) 103 #define SF_HANDLE_F_WRITE UINT32_C(0x00000002) 104 #define SF_HANDLE_F_APPEND UINT32_C(0x00000004) 105 #define SF_HANDLE_F_FILE UINT32_C(0x00000010) 106 #define SF_HANDLE_F_ON_LIST UINT32_C(0x00000080) 107 #define SF_HANDLE_F_MAGIC_MASK UINT32_C(0xffffff00) 108 #define SF_HANDLE_F_MAGIC UINT32_C(0x75030700) /**< Maurice Ravel (1875-03-07). */ 109 #define SF_HANDLE_F_MAGIC_DEAD UINT32_C(0x19371228) 110 /** @} */ 111 112 /** 113 * VBox specific per-inode information. 114 */ 72 115 struct sf_inode_info { 73 /* which file */116 /** Which file */ 74 117 SHFLSTRING *path; 75 /* some information was changed, update data on next revalidate */118 /** Some information was changed, update data on next revalidate */ 76 119 int force_restat; 77 /* directory content changed, update the whole directory on next sf_getdent */120 /** directory content changed, update the whole directory on next sf_getdent */ 78 121 int force_reread; 79 /* file structure, only valid between open() and release() */ 80 struct file *file;81 /* handle valid if a file was created with sf_create_aux until it will82 * be opened with sf_reg_open()*/122 123 /** handle valid if a file was created with sf_create_aux until it will 124 * be opened with sf_reg_open() 125 * @todo r=bird: figure this one out... */ 83 126 SHFLHANDLE handle; 127 128 /** List of open handles (struct sf_handle), protected by g_SfHandleLock. */ 129 RTLISTANCHOR HandleList; 130 #ifdef VBOX_STRICT 131 uint32_t u32Magic; 132 # define SF_INODE_INFO_MAGIC UINT32_C(0x18620822) /**< Claude Debussy */ 133 # define SF_INODE_INFO_MAGIC_DEAD UINT32_C(0x19180325) 134 #endif 84 135 }; 85 136 86 137 struct sf_dir_info { 138 /** @todo sf_handle. */ 87 139 struct list_head info_list; 88 140 }; … … 96 148 }; 97 149 150 /** 151 * VBox specific infor fore a regular file. 152 */ 98 153 struct sf_reg_info { 99 SHFLHANDLE handle; 154 /** Handle tracking structure. */ 155 struct sf_handle Handle; 100 156 }; 101 157 102 158 /* globals */ 103 159 extern VBGLSFCLIENT client_handle; 160 extern spinlock_t g_SfHandleLock; 161 104 162 105 163 /* forward declarations */ … … 111 169 extern struct dentry_operations sf_dentry_ops; 112 170 extern struct address_space_operations sf_reg_aops; 171 172 extern void sf_handle_drop_chain(struct sf_inode_info *pInodeInfo); 173 extern struct sf_handle *sf_handle_find(struct sf_inode_info *pInodeInfo, uint32_t fFlagsSet, uint32_t fFlagsClear); 174 extern uint32_t sf_handle_release_slow(struct sf_handle *pHandle, struct sf_glob_info *sf_g, const char *pszCaller); 175 extern void sf_handle_append(struct sf_inode_info *pInodeInfo, struct sf_handle *pHandle); 176 177 /** 178 * Releases a handle. 179 * 180 * @returns New reference count. 181 * @param pHandle The handle to release. 182 * @param sf_g The info structure for the shared folder associated 183 * with the handle. 184 * @param pszCaller The caller name (for logging failures). 185 */ 186 DECLINLINE(uint32_t) sf_handle_release(struct sf_handle *pHandle, struct sf_glob_info *sf_g, const char *pszCaller) 187 { 188 uint32_t cRefs; 189 190 Assert((pHandle->fFlags & SF_HANDLE_F_MAGIC_MASK) == SF_HANDLE_F_MAGIC); 191 Assert(pHandle->pInodeInfo); 192 Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); 193 194 cRefs = ASMAtomicDecU32(&pHandle->cRefs); 195 Assert(cRefs < _64M); 196 if (cRefs) 197 return cRefs; 198 return sf_handle_release_slow(pHandle, sf_g, pszCaller); 199 } 113 200 114 201 extern void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode, … … 156 243 #endif 157 244 158 #define TRACE() LogFunc(("tracepoint\n")) 245 #if 1 246 # define TRACE() LogFunc(("tracepoint\n")) 247 # define SFLOGFLOW(aArgs) Log(aArgs) 248 #else 249 # define TRACE() RTLogBackdoorPrintf("%s: tracepoint\n", __FUNCTION__) 250 # define SFLOGFLOW(aArgs) RTLogBackdoorPrintf aArgs 251 #endif 159 252 160 253 /* Following casts are here to prevent assignment of void * to
Note:
See TracChangeset
for help on using the changeset viewer.