Changeset 77549 in vbox for trunk/src/VBox/Additions
- Timestamp:
- Mar 4, 2019 10:00:34 AM (6 years ago)
- Location:
- trunk/src/VBox/Additions/linux/sharedfolders
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/dirops.c
r77543 r77549 29 29 */ 30 30 31 32 /********************************************************************************************************************************* 33 * Header Files * 34 *********************************************************************************************************************************/ 31 35 #include "vfsmod.h" 32 36 #include <iprt/err.h> … … 780 784 781 785 RT_ZERO(pReq->Create.CreateParms); 782 pReq->Create.CreateParms.Handle = SHFL_HANDLE_NIL;783 pReq->Create.CreateParms.CreateFlags = SHFL_CF_ACT_CREATE_IF_NEW784 | SHFL_CF_ACT_FAIL_IF_EXISTS785 | SHFL_CF_ACCESS_READWRITE786 | (fDirectory ? SHFL_CF_DIRECTORY : 0);787 /** @todo use conversion function from utils.c here! */ 788 pReq->Create.CreateParms.Info.Attr.fMode = (fDirectory ? RTFS_TYPE_DIRECTORY : RTFS_TYPE_FILE) | (mode & S_IRWXUGO);786 pReq->Create.CreateParms.Handle = SHFL_HANDLE_NIL; 787 pReq->Create.CreateParms.CreateFlags = SHFL_CF_ACT_CREATE_IF_NEW 788 | SHFL_CF_ACT_FAIL_IF_EXISTS 789 | SHFL_CF_ACCESS_READWRITE 790 | (fDirectory ? SHFL_CF_DIRECTORY : 0); 791 pReq->Create.CreateParms.Info.Attr.fMode = (fDirectory ? RTFS_TYPE_DIRECTORY : RTFS_TYPE_FILE) 792 | sf_access_permissions_to_vbox(mode); 789 793 pReq->Create.CreateParms.Info.Attr.enmAdditional = RTFSOBJATTRADD_NOTHING; 790 794 … … 792 796 rc = VbglR0SfHostReqCreate(sf_g->map.root, &pReq->Create); 793 797 if (RT_FAILURE(rc)) { 794 if (rc == VERR_WRITE_PROTECT) { 795 err = -EROFS; 796 goto fail2; 797 } 798 err = -EPROTO; 799 LogFunc(("(%d): SHFL_FN_CREATE(%s) failed rc=%Rrc\n", 800 fDirectory, sf_parent_i->path->String.utf8, rc)); 798 err = -RTErrConvertToErrno(rc); 799 LogFunc(("(%d): SHFL_FN_CREATE(%s) failed rc=%Rrc err=%d\n", fDirectory, sf_parent_i->path->String.utf8, rc, err)); 801 800 goto fail2; 802 801 } … … 1088 1087 1089 1088 if (RT_FAILURE(rc)) { 1090 if (rc == VERR_WRITE_PROTECT) { 1091 err = -EROFS; 1092 goto fail1; 1093 } 1089 err = RTErrConvertFromErrno(rc); 1094 1090 LogFunc(("VbglR0SfSymlink(%s) failed rc=%Rrc\n", sf_i->path->String.utf8, rc)); 1095 err = -EPROTO;1096 1091 goto fail1; 1097 1092 } -
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77536 r77549 66 66 67 67 RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) { 68 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | vbSF_HANDLE_F_ON_LIST)) == (VBSF_HANDLE_F_MAGIC | vbSF_HANDLE_F_ON_LIST),69 ("%p %#x\n", pCur, pCur->fFlags));70 pCur->fFlags |= vbSF_HANDLE_F_ON_LIST;68 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) 69 == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags)); 70 pCur->fFlags |= VBSF_HANDLE_F_ON_LIST; 71 71 RTListNodeRemove(&pCur->Entry); 72 72 } … … 92 92 93 93 RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) { 94 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | vbSF_HANDLE_F_ON_LIST)) == (VBSF_HANDLE_F_MAGIC | vbSF_HANDLE_F_ON_LIST),95 ("%p %#x\n", pCur, pCur->fFlags));94 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) 95 == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags)); 96 96 if ((pCur->fFlags & (fFlagsSet | fFlagsClear)) == fFlagsSet) { 97 97 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs); … … 137 137 Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); 138 138 139 if (pHandle->fFlags & vbSF_HANDLE_F_ON_LIST) {140 pHandle->fFlags &= ~ vbSF_HANDLE_F_ON_LIST;139 if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) { 140 pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST; 141 141 RTListNodeRemove(&pHandle->Entry); 142 142 } … … 171 171 172 172 SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo)); 173 AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | vbSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,174 ("%p %#x\n", pHandle, pHandle->fFlags));173 AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC, 174 ("%p %#x\n", pHandle, pHandle->fFlags)); 175 175 Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC); 176 176 177 177 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags); 178 178 179 AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | vbSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,179 AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC, 180 180 ("%p %#x\n", pHandle, pHandle->fFlags)); 181 181 #ifdef VBOX_STRICT 182 182 RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) { 183 183 Assert(pCur != pHandle); 184 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | vbSF_HANDLE_F_ON_LIST)) == (VBSF_HANDLE_F_MAGIC | vbSF_HANDLE_F_ON_LIST),185 ("%p %#x\n", pCur, pCur->fFlags));184 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) 185 == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags)); 186 186 } 187 187 pHandle->pInodeInfo = pInodeInfo; 188 188 #endif 189 189 190 pHandle->fFlags |= vbSF_HANDLE_F_ON_LIST;190 pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST; 191 191 RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry); 192 192 … … 348 348 #endif /* 2.6.23 <= LINUX_VERSION_CODE < 2.6.31 */ 349 349 350 351 /** Companion to vbsf_lock_user_pages(). */ 352 DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty) 353 { 354 while (cPages-- > 0) 355 { 356 struct page *pPage = papPages[cPages]; 357 if (fSetDirty && !PageReserved(pPage)) 358 SetPageDirty(pPage); 350 /** Wrapper around put_page / page_cache_release. */ 351 DECLINLINE(void) vbsf_put_page(struct page *pPage) 352 { 359 353 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) 360 354 put_page(pPage); … … 362 356 page_cache_release(pPage); 363 357 #endif 364 } 358 } 359 360 361 /** Wrapper around get_page / page_cache_get. */ 362 DECLINLINE(void) vbsf_get_page(struct page *pPage) 363 { 364 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) 365 get_page(pPage); 366 #else 367 page_cache_get(pPage); 368 #endif 369 } 370 371 372 /** Companion to vbsf_lock_user_pages(). */ 373 DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack) 374 { 375 /* We don't mark kernel pages dirty: */ 376 if (fLockPgHack) 377 fSetDirty = false; 378 379 while (cPages-- > 0) 380 { 381 struct page *pPage = papPages[cPages]; 382 if (fSetDirty && !PageReserved(pPage)) 383 SetPageDirty(pPage); 384 vbsf_put_page(pPage); 385 } 386 } 387 388 389 /** 390 * Catches kernel_read() and kernel_write() calls and works around them. 391 * 392 * The file_operations::read and file_operations::write callbacks supposedly 393 * hands us the user buffers to read into and write out of. To allow the kernel 394 * to read and write without allocating buffers in userland, they kernel_read() 395 * and kernel_write() increases the user space address limit before calling us 396 * so that copyin/copyout won't reject it. Our problem is that get_user_pages() 397 * works on the userspace address space structures and will not be fooled by an 398 * increased addr_limit. 399 * 400 * This code tries to detect this situation and fake get_user_lock() for the 401 * kernel buffer. 402 */ 403 static int vbsf_lock_user_pages_failed_check_kernel(uintptr_t uPtrFrom, size_t cPages, bool fWrite, int rcFailed, 404 struct page **papPages, bool *pfLockPgHack) 405 { 406 /* 407 * Check that this is valid user memory that is actually in the kernel range. 408 */ 409 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0) 410 if ( access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT) 411 && uPtrFrom >= USER_DS.seg) 412 #else 413 if ( access_ok(fWrite ? VERIFY_WRITE : VERIFY_READ, (void *)uPtrFrom, cPages << PAGE_SHIFT) 414 && uPtrFrom >= USER_DS.seg) 415 #endif 416 { 417 uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1; 418 uint8_t *pbPage = (uint8_t *)uPtrLast; 419 size_t iPage = cPages; 420 421 /* 422 * Touch the pages first (paranoia^2). 423 */ 424 if (fWrite) { 425 uint8_t volatile *pbProbe = (uint8_t volatile *)uPtrFrom; 426 while (iPage-- > 0) { 427 *pbProbe = *pbProbe; 428 pbProbe += PAGE_SIZE; 429 } 430 } else { 431 uint8_t const *pbProbe = (uint8_t const *)uPtrFrom; 432 while (iPage-- > 0) { 433 ASMProbeReadByte(pbProbe); 434 pbProbe += PAGE_SIZE; 435 } 436 } 437 438 /* 439 * Get the pages. 440 * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well. 441 */ 442 iPage = cPages; 443 if ( uPtrFrom >= (unsigned long)__va(0) 444 && uPtrLast < (unsigned long)high_memory) 445 { 446 /* The physical page mapping area: */ 447 while (iPage-- > 0) 448 { 449 struct page *pPage = papPages[iPage] = virt_to_page(pbPage); 450 vbsf_get_page(pPage); 451 pbPage -= PAGE_SIZE; 452 } 453 } 454 else 455 { 456 /* This is vmalloc or some such thing, so go thru page tables: */ 457 while (iPage-- > 0) 458 { 459 struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage); 460 if (pPage) { 461 papPages[iPage] = pPage; 462 vbsf_get_page(pPage); 463 pbPage -= PAGE_SIZE; 464 } else { 465 while (++iPage < cPages) { 466 pPage = papPages[iPage]; 467 vbsf_put_page(pPage); 468 } 469 return rcFailed; 470 } 471 } 472 } 473 *pfLockPgHack = true; 474 return 0; 475 } 476 477 return rcFailed; 365 478 } 366 479 367 480 368 481 /** Wrapper around get_user_pages. */ 369 DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages )482 DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack) 370 483 { 371 484 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) … … 375 488 ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, fWrite, 1 /*force*/, papPages); 376 489 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 377 ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, 378 fWrite, 1 /*force*/, papPages); 490 ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages); 379 491 # else 380 492 struct task_struct *pTask = current; … … 384 496 up_read(&pTask->mm->mmap_sem); 385 497 # endif 498 *pfLockPgHack = false; 386 499 if (cPagesLocked == cPages) 387 500 return 0; 501 502 /* 503 * It failed. 504 */ 388 505 if (cPagesLocked < 0) 389 return cPagesLocked;390 391 vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/ );506 return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack); 507 508 vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/); 392 509 393 510 /* We could use uPtrFrom + cPagesLocked to get the correct status here... */ … … 444 561 * write directly to them. 445 562 */ 446 static ssize_t vbsf_reg_read_ fallback(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off,447 563 static ssize_t vbsf_reg_read_locking(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off, 564 struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r) 448 565 { 449 566 /* … … 461 578 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; 462 579 size_t cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 1), cPages); 580 bool fLockPgHack; 463 581 464 582 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); … … 486 604 } 487 605 488 rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages );606 rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack); 489 607 if (rc == 0) { 490 608 size_t iPage = cPages; … … 492 610 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]); 493 611 } else { 612 /** @todo may need fallback here for kernel addresses during exec. sigh. */ 494 613 cbRet = rc; 495 614 break; … … 501 620 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages); 502 621 503 vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/ );622 vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack); 504 623 505 624 if (RT_SUCCESS(rc)) { … … 647 766 #endif 648 767 649 return vbsf_reg_read_ fallback(file, buf, size, off, sf_g, sf_r);768 return vbsf_reg_read_locking(file, buf, size, off, sf_g, sf_r); 650 769 } 651 770 … … 683 802 * write directly to them. 684 803 */ 685 static ssize_t vbsf_reg_write_ fallback(struct file *file, const char /*__user*/ *buf, size_t size, loff_t *off, loff_t offFile,686 687 804 static ssize_t vbsf_reg_write_locking(struct file *file, const char /*__user*/ *buf, size_t size, loff_t *off, loff_t offFile, 805 struct inode *inode, struct vbsf_inode_info *sf_i, 806 struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r) 688 807 { 689 808 /* … … 700 819 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; 701 820 size_t cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 1), cPages); 821 bool fLockPgHack; 702 822 703 823 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages])); … … 725 845 } 726 846 727 rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages );847 rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack); 728 848 if (rc == 0) { 729 849 size_t iPage = cPages; … … 740 860 rc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages); 741 861 742 vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/ );862 vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack); 743 863 744 864 if (RT_SUCCESS(rc)) { … … 921 1041 #endif 922 1042 923 return vbsf_reg_write_ fallback(file, buf, size, off, pos, inode, sf_i, sf_g, sf_r);1043 return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, sf_g, sf_r); 924 1044 } 925 1045 … … 1262 1382 1263 1383 SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT)); 1384 Assert(PageLocked(page)); 1385 1386 if (PageUptodate(page)) { 1387 unlock_page(page); 1388 return 0; 1389 } 1264 1390 1265 1391 if (!is_bad_inode(inode)) { … … 1267 1393 if (pReq) { 1268 1394 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb); 1269 struct vbsf_reg_info 1270 uint32_t cbRead;1395 struct vbsf_reg_info *sf_r = file->private_data; 1396 uint32_t cbRead; 1271 1397 int vrc; 1272 1398 … … 1296 1422 flush_dcache_page(page); 1297 1423 SetPageUptodate(page); 1298 err = 0; 1299 } else 1300 err = -EPROTO; 1424 unlock_page(page); 1425 return 0; 1426 } 1427 err = -RTErrConvertToErrno(vrc); 1301 1428 } else 1302 1429 err = -ENOMEM; 1303 1430 } else 1304 1431 err = -EIO; 1432 SetPageError(page); 1305 1433 unlock_page(page); 1306 1434 return err; … … 1319 1447 struct inode *inode = mapping->host; 1320 1448 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode); 1321 struct vbsf_handle 1449 struct vbsf_handle *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND); 1322 1450 int err; 1323 1451 1324 1452 SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n", 1325 inode, page,(uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle->hHost));1453 inode, page,(uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle->hHost)); 1326 1454 1327 1455 if (pHandle) { … … 1338 1466 pReq->PgLst.aPages[0] = page_to_phys(page); 1339 1467 vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, 1340 pReq,1341 pHandle->hHost,1342 offInFile,1343 cbToWrite,1344 1 /*cPages*/);1468 pReq, 1469 pHandle->hHost, 1470 offInFile, 1471 cbToWrite, 1472 1 /*cPages*/); 1345 1473 AssertMsgStmt(pReq->Parms.cb32Write.u.value32 == cbToWrite || RT_FAILURE(vrc), /* lazy bird */ 1346 ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite),1347 vrc = VERR_WRITE_ERROR);1474 ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite), 1475 vrc = VERR_WRITE_ERROR); 1348 1476 VbglR0PhysHeapFree(pReq); 1349 1477 -
trunk/src/VBox/Additions/linux/sharedfolders/utils.c
r77538 r77549 37 37 38 38 39 int vbsf_nlscpy(struct vbsf_super_info *sf_g, char *name, size_t name_bound_len, const unsigned char *utf8_name, size_t utf8_len) 40 { 41 if (sf_g->nls) { 42 const char *in; 43 char *out; 44 size_t out_len; 45 size_t out_bound_len; 46 size_t in_bound_len; 47 48 in = utf8_name; 49 in_bound_len = utf8_len; 50 51 out = name; 52 out_len = 0; 53 out_bound_len = name_bound_len; 54 55 while (in_bound_len) { 56 int nb; 57 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) 58 unicode_t uni; 59 60 nb = utf8_to_utf32(in, in_bound_len, &uni); 61 #else 62 linux_wchar_t uni; 63 64 nb = utf8_mbtowc(&uni, in, in_bound_len); 65 #endif 66 if (nb < 0) { 67 LogFunc(("utf8_mbtowc failed(%s) %x:%d\n", (const char *)utf8_name, *in, in_bound_len)); 68 return -EINVAL; 69 } 70 in += nb; 71 in_bound_len -= nb; 72 73 nb = sf_g->nls->uni2char(uni, out, out_bound_len); 74 if (nb < 0) { 75 LogFunc(("nls->uni2char failed(%s) %x:%d\n", utf8_name, uni, out_bound_len)); 76 return nb; 77 } 78 out += nb; 79 out_bound_len -= nb; 80 out_len += nb; 81 } 82 83 *out = 0; 84 } else { 85 if (utf8_len + 1 > name_bound_len) 86 return -ENAMETOOLONG; 87 88 memcpy(name, utf8_name, utf8_len + 1); 89 } 90 return 0; 91 } 92 93 39 94 /** 40 95 * Convert from VBox to linux time. … … 82 137 83 138 /** 84 * Converts Linux access permissions to VBox ones (mode & 0777).139 * Converts VBox access permissions to Linux ones (mode & 0777). 85 140 * 86 141 * @note Currently identical. 87 */ 88 DECLINLINE(uint32_t) sf_access_permissions_to_vbox(int fAttr) 142 * @sa sf_access_permissions_to_vbox 143 */ 144 DECLINLINE(int) sf_access_permissions_to_linux(uint32_t fAttr) 89 145 { 90 146 /* Access bits should be the same: */ … … 104 160 105 161 /** 106 * Converts VBox access permissions to Linux ones (mode & 0777).107 *108 * @note Currently identical.109 */110 DECLINLINE(int) sf_access_permissions_to_linux(uint32_t fAttr)111 {112 /* Access bits should be the same: */113 AssertCompile(RTFS_UNIX_IRUSR == S_IRUSR);114 AssertCompile(RTFS_UNIX_IWUSR == S_IWUSR);115 AssertCompile(RTFS_UNIX_IXUSR == S_IXUSR);116 AssertCompile(RTFS_UNIX_IRGRP == S_IRGRP);117 AssertCompile(RTFS_UNIX_IWGRP == S_IWGRP);118 AssertCompile(RTFS_UNIX_IXGRP == S_IXGRP);119 AssertCompile(RTFS_UNIX_IROTH == S_IROTH);120 AssertCompile(RTFS_UNIX_IWOTH == S_IWOTH);121 AssertCompile(RTFS_UNIX_IXOTH == S_IXOTH);122 123 return fAttr & RTFS_UNIX_ALL_ACCESS_PERMS;124 }125 126 127 /**128 162 * Produce the Linux mode mask, given VBox, mount options and file type. 129 163 */ … … 205 239 sf_i->BirthTime = pObjInfo->BirthTime; 206 240 } 241 207 242 208 243 /** … … 328 363 return rc; 329 364 } 365 330 366 331 367 /** … … 395 431 * Reset the TTL and copy the info over into the inode structure. 396 432 */ 397 vbsf_update_inode(pInode, sf_i, &pReq->CreateParms.Info, 398 sf_g, true /*fInodeLocked??*/); 433 vbsf_update_inode(pInode, sf_i, &pReq->CreateParms.Info, sf_g, true /*fInodeLocked??*/); 399 434 rc = 0; 400 435 } else { … … 485 520 } 486 521 522 487 523 /* on 2.6 this is a proxy for [sf_inode_revalidate] which (as a side 488 524 effect) updates inode attributes for [dentry] (given that [dentry] … … 572 608 } 573 609 610 574 611 int vbsf_inode_setattr(struct dentry *dentry, struct iattr *iattr) 575 612 { … … 742 779 } 743 780 781 744 782 /** 745 783 * [dentry] contains string encoded in coding system that corresponds … … 824 862 kfree(name); 825 863 return err; 826 }827 828 int vbsf_nlscpy(struct vbsf_super_info *sf_g, char *name, size_t name_bound_len, const unsigned char *utf8_name, size_t utf8_len)829 {830 if (sf_g->nls) {831 const char *in;832 char *out;833 size_t out_len;834 size_t out_bound_len;835 size_t in_bound_len;836 837 in = utf8_name;838 in_bound_len = utf8_len;839 840 out = name;841 out_len = 0;842 out_bound_len = name_bound_len;843 844 while (in_bound_len) {845 int nb;846 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)847 unicode_t uni;848 849 nb = utf8_to_utf32(in, in_bound_len, &uni);850 #else851 linux_wchar_t uni;852 853 nb = utf8_mbtowc(&uni, in, in_bound_len);854 #endif855 if (nb < 0) {856 LogFunc(("utf8_mbtowc failed(%s) %x:%d\n", (const char *)utf8_name, *in, in_bound_len));857 return -EINVAL;858 }859 in += nb;860 in_bound_len -= nb;861 862 nb = sf_g->nls->uni2char(uni, out, out_bound_len);863 if (nb < 0) {864 LogFunc(("nls->uni2char failed(%s) %x:%d\n", utf8_name, uni, out_bound_len));865 return nb;866 }867 out += nb;868 out_bound_len -= nb;869 out_len += nb;870 }871 872 *out = 0;873 } else {874 if (utf8_len + 1 > name_bound_len)875 return -ENAMETOOLONG;876 877 memcpy(name, utf8_name, utf8_len + 1);878 }879 return 0;880 864 } 881 865 -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h
r77539 r77549 166 166 #define VBSF_HANDLE_F_FILE UINT32_C(0x00000010) 167 167 #define VBSF_HANDLE_F_DIR UINT32_C(0x00000020) 168 #define vbSF_HANDLE_F_ON_LIST UINT32_C(0x00000080)168 #define VBSF_HANDLE_F_ON_LIST UINT32_C(0x00000080) 169 169 #define VBSF_HANDLE_F_MAGIC_MASK UINT32_C(0xffffff00) 170 170 #define VBSF_HANDLE_F_MAGIC UINT32_C(0x75030700) /**< Maurice Ravel (1875-03-07). */ … … 370 370 371 371 372 /** 373 * Converts Linux access permissions to VBox ones (mode & 0777). 374 * 375 * @note Currently identical. 376 * @sa sf_access_permissions_to_linux 377 */ 378 DECLINLINE(uint32_t) sf_access_permissions_to_vbox(int fAttr) 379 { 380 /* Access bits should be the same: */ 381 AssertCompile(RTFS_UNIX_IRUSR == S_IRUSR); 382 AssertCompile(RTFS_UNIX_IWUSR == S_IWUSR); 383 AssertCompile(RTFS_UNIX_IXUSR == S_IXUSR); 384 AssertCompile(RTFS_UNIX_IRGRP == S_IRGRP); 385 AssertCompile(RTFS_UNIX_IWGRP == S_IWGRP); 386 AssertCompile(RTFS_UNIX_IXGRP == S_IXGRP); 387 AssertCompile(RTFS_UNIX_IROTH == S_IROTH); 388 AssertCompile(RTFS_UNIX_IWOTH == S_IWOTH); 389 AssertCompile(RTFS_UNIX_IXOTH == S_IXOTH); 390 391 return fAttr & RTFS_UNIX_ALL_ACCESS_PERMS; 392 } 393 372 394 #if 1 373 395 # define TRACE() LogFunc(("tracepoint\n"))
Note:
See TracChangeset
for help on using the changeset viewer.