Changeset 77141 in vbox for trunk/src/VBox/Additions/linux
- Timestamp:
- Feb 1, 2019 8:32:24 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77138 r77141 35 35 #include "vfsmod.h" 36 36 37 #ifdef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 38 37 39 static void *alloc_bounce_buffer(size_t * tmp_sizep, PRTCCPHYS physp, size_t 38 40 xfer_size, const char *caller) … … 65 67 kfree(tmp); 66 68 } 69 70 #else /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 71 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 72 73 /* 74 * inode compatibility glue. 75 */ 76 # include <iprt/asm.h> 77 78 DECLINLINE(loff_t) i_size_read(struct inode *inode) 79 { 80 AssertCompile(sizeof(loff_t) == sizeof(uint64_t)); 81 return ASMAtomicReadU64((uint64_t volatile *)&inode->i_size); 82 } 83 84 DECLINLINE(void) i_size_write(struct inode *inode, loff_t i_size) 85 { 86 AssertCompile(sizeof(inode->i_size) == sizeof(uint64_t)); 87 ASMAtomicWriteU64((uint64_t volatile *)&inode->i_size, i_size); 88 } 89 90 # endif /* < 2.6.0 */ 91 #endif /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 67 92 68 93 /* fops */ … … 275 300 276 301 /** Wrapper around get_user_pages. */ 277 DECLINLINE(int) sf_lock_user_pages( void /*__user*/ *pvFrom, size_t cPages, bool fWrite, struct page **papPages)302 DECLINLINE(int) sf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages) 278 303 { 279 304 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) 280 ssize_t cPagesLocked = get_user_pages_unlocked( (uintptr_t)pvFrom, cPages, papPages,305 ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, papPages, 281 306 fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE); 282 307 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) 283 ssize_t cPagesLocked = get_user_pages_unlocked( (uintptr_t)pvFrom, cPages, fWrite, 1 /*force*/, papPages);308 ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, fWrite, 1 /*force*/, papPages); 284 309 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 285 ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, (uintptr_t)pvFrom, cPages,310 ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, 286 311 fWrite, 1 /*force*/, papPages); 287 312 # else … … 289 314 size_t cPagesLocked; 290 315 down_read(&pTask->mm->mmap_sem); 291 cPagesLocked = get_user_pages(current, current->mm, (uintptr_t)pvFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);316 cPagesLocked = get_user_pages(current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL); 292 317 up_read(&pTask->mm->mmap_sem); 293 318 # endif … … 299 324 sf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/); 300 325 301 /* We could use pvFrom + cPagesLocked to get the correct status here... */326 /* We could use uPtrFrom + cPagesLocked to get the correct status here... */ 302 327 return -EFAULT; 303 328 } … … 351 376 } 352 377 353 rc = sf_lock_user_pages( buf, cPages, true /*fWrite*/, papPages);378 rc = sf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages); 354 379 if (rc == 0) { 355 380 size_t iPage = cPages; … … 508 533 *off += cbRet; 509 534 else 510 cbRet = -E PROTO;535 cbRet = -EFAULT; 511 536 } else 512 537 cbRet = -EPROTO; … … 536 561 *off += cbRet; 537 562 else 538 cbRet = -E PROTO;563 cbRet = -EFAULT; 539 564 } else 540 565 cbRet = -EPROTO; … … 551 576 #endif /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 552 577 } 578 579 580 #ifndef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 581 /** 582 * Fallback case of sf_reg_write() that locks the user buffers and let the host 583 * write directly to them. 584 */ 585 static ssize_t sf_reg_write_fallback(struct file *file, const char /*__user*/ *buf, size_t size, loff_t *off, loff_t offFile, 586 struct inode *inode, struct sf_inode_info *sf_i, 587 struct sf_glob_info *sf_g, struct sf_reg_info *sf_r) 588 { 589 /* 590 * Lock pages and execute the write, taking care not to pass the host 591 * more than it can handle in one go or more than we care to allocate 592 * page arrays for. The latter limit is set at just short of 32KB due 593 * to how the physical heap works. 594 */ 595 struct page *apPagesStack[16]; 596 struct page **papPages = &apPagesStack[0]; 597 struct page **papPagesFree = NULL; 598 VBOXSFWRITEPGLSTREQ *pReq; 599 ssize_t cbRet = -ENOMEM; 600 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; 601 size_t cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 1), cPages); 602 603 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages])); 604 while (!pReq && cMaxPages > 4) { 605 cMaxPages /= 2; 606 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages])); 607 } 608 if (pReq && cPages > RT_ELEMENTS(apPagesStack)) 609 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL); 610 if (pReq && papPages) { 611 cbRet = 0; 612 for (;;) { 613 /* 614 * Figure out how much to process now and lock the user pages. 615 */ 616 int rc; 617 size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK; 618 pReq->PgLst.offFirstPage = (uint16_t)cbChunk; 619 cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT; 620 if (cPages <= cMaxPages) 621 cbChunk = size; 622 else { 623 cPages = cMaxPages; 624 cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk; 625 } 626 627 rc = sf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages); 628 if (rc == 0) { 629 size_t iPage = cPages; 630 while (iPage-- > 0) 631 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]); 632 } else { 633 cbRet = rc; 634 break; 635 } 636 637 /* 638 * Issue the request and unlock the pages. 639 */ 640 rc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->handle, offFile, cbChunk, cPages); 641 642 sf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/); 643 644 if (RT_SUCCESS(rc)) { 645 /* 646 * Success, advance position and buffer. 647 */ 648 uint32_t cbActual = pReq->Parms.cb32Write.u.value32; 649 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk); 650 cbRet += cbActual; 651 offFile += cbActual; 652 buf = (uint8_t *)buf + cbActual; 653 size -= cbActual; 654 if (offFile > i_size_read(inode)) 655 i_size_write(inode, offFile); 656 657 /* 658 * Are we done already? If so commit the new file offset. 659 */ 660 if (!size || cbActual < cbChunk) { 661 *off = offFile; 662 break; 663 } 664 } else if (rc == VERR_NO_MEMORY && cMaxPages > 4) { 665 /* 666 * The host probably doesn't have enough heap to handle the 667 * request, reduce the page count and retry. 668 */ 669 cMaxPages /= 4; 670 Assert(cMaxPages > 0); 671 } else { 672 /* 673 * If we've successfully written stuff, return it rather than 674 * the error. (Not sure if this is such a great idea...) 675 */ 676 if (cbRet > 0) 677 *off = offFile; 678 else 679 cbRet = -EPROTO; 680 break; 681 } 682 sf_i->force_restat = 1; /* mtime (and size) may have changed */ 683 } 684 } 685 if (papPagesFree) 686 kfree(papPages); 687 if (pReq) 688 VbglR0PhysHeapFree(pReq); 689 return cbRet; 690 } 691 #endif /* VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 553 692 554 693 /** … … 564 703 loff_t * off) 565 704 { 705 #ifdef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 566 706 int err; 567 707 void *tmp; … … 570 710 size_t left = size; 571 711 ssize_t total_bytes_written = 0; 712 #endif 713 loff_t pos; 572 714 struct inode *inode = GET_F_DENTRY(file)->d_inode; 573 715 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 574 716 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 575 717 struct sf_reg_info *sf_r = file->private_data; 576 loff_t pos;577 718 578 719 TRACE(); … … 587 728 588 729 pos = *off; 730 /** @todo This should be handled by the host, it returning the new file 731 * offset when appending. We may have an outdated i_size value here! */ 589 732 if (file->f_flags & O_APPEND) { 733 #ifdef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 590 734 pos = inode->i_size; 591 735 *off = pos; 736 #else 737 pos = i_size_read(inode); 738 #endif 592 739 } 593 740 594 741 /** @todo XXX Check write permission according to inode->i_mode! */ 595 742 596 if (!size) 743 if (!size) { 744 #ifndef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 745 if (file->f_flags & O_APPEND) /** @todo check if this is the consensus behavior... */ 746 *off = pos; 747 #endif 597 748 return 0; 598 749 } 750 751 #ifdef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 599 752 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, 600 753 __PRETTY_FUNCTION__); … … 643 796 free_bounce_buffer(tmp); 644 797 return err; 798 #else /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 799 800 /* 801 * For small requests, try use an embedded buffer provided we get a heap block 802 * that does not cross page boundraries (see host code). 803 */ 804 if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) { 805 uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + size; 806 VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq); 807 if ( pReq 808 && (PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) { 809 ssize_t cbRet; 810 if (copy_from_user(pReq->abData, buf, size) == 0) { 811 int vrc = VbglR0SfHostReqWriteEmbedded(sf_g->map.root, pReq, sf_r->handle, pos, (uint32_t)size); 812 if (RT_SUCCESS(vrc)) { 813 cbRet = pReq->Parms.cb32Write.u.value32; 814 AssertStmt(cbRet <= (ssize_t)size, cbRet = size); 815 pos += cbRet; 816 *off = pos; 817 if (pos > i_size_read(inode)) 818 i_size_write(inode, pos); 819 } else 820 cbRet = -EPROTO; 821 sf_i->force_restat = 1; /* mtime (and size) may have changed */ 822 } else 823 cbRet = -EFAULT; 824 825 VbglR0PhysHeapFree(pReq); 826 return cbRet; 827 } 828 if (pReq) 829 VbglR0PhysHeapFree(pReq); 830 } 831 832 # if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */ 833 /* 834 * For medium sized requests try use a bounce buffer. 835 */ 836 if (size <= _64K /** @todo make this configurable? */) { 837 void *pvBounce = kmalloc(size, GFP_KERNEL); 838 if (pvBounce) { 839 if (copy_from_user(pvBounce, buf, size) == 0) { 840 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); 841 if (pReq) { 842 ssize_t cbRet; 843 int vrc = VbglR0SfHostReqWriteContig(sf_g->map.root, pReq, sf_r->handle, pos, 844 (uint32_t)size, pvBounce, virt_to_phys(pvBounce)); 845 if (RT_SUCCESS(vrc)) { 846 cbRet = pReq->Parms.cb32Write.u.value32; 847 AssertStmt(cbRet <= (ssize_t)size, cbRet = size); 848 pos += cbRet; 849 *off = pos; 850 if (pos > i_size_read(inode)) 851 i_size_write(inode, pos); 852 } else 853 cbRet = -EPROTO; 854 sf_i->force_restat = 1; /* mtime (and size) may have changed */ 855 VbglR0PhysHeapFree(pReq); 856 kfree(pvBounce); 857 return cbRet; 858 } 859 kfree(pvBounce); 860 } else { 861 kfree(pvBounce); 862 return -EFAULT; 863 } 864 } 865 } 866 # endif 867 868 return sf_reg_write_fallback(file, buf, size, off, pos, inode, sf_i, sf_g, sf_r); 869 #endif /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 645 870 } 646 871
Note:
See TracChangeset
for help on using the changeset viewer.