- Timestamp:
- Mar 28, 2019 6:34:05 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77942 r77943 46 46 #endif 47 47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \ 48 && LINUX_VERSION_CODE < KERNEL_VERSION( 2, 6, 31)48 && LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 49 49 # include <linux/splice.h> 50 50 #endif … … 125 125 *********************************************************************************************************************************/ 126 126 DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack); 127 static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange, 128 uint8_t const *pbSrcBuf, struct page **papSrcPages, 129 uint32_t offSrcPage, size_t cSrcPages); 127 130 128 131 … … 528 531 529 532 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \ 530 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 531 532 533 /** Verify pipe buffer content (needed for page-cache to ensure idle page). */ 534 static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 535 { 536 /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/ 537 return 0; 538 } 539 540 /** Maps the buffer page. */ 541 static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic) 542 { 543 void *pvRet; 544 if (!atomic) 545 pvRet = kmap(pPipeBuf->page); 546 else { 547 pPipeBuf->flags |= PIPE_BUF_FLAG_ATOMIC; 548 pvRet = kmap_atomic(pPipeBuf->page, KM_USER0); 549 } 550 /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/ 551 return pvRet; 552 } 553 554 /** Unmaps the buffer page. */ 555 static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping) 556 { 557 /*SFLOG3(("vbsf_pipe_buf_unmap: %p/%p\n", pPipeBuf, pvMapping)); */ 558 if (!(pPipeBuf->flags & PIPE_BUF_FLAG_ATOMIC)) 559 kunmap(pPipeBuf->page); 560 else { 561 pPipeBuf->flags &= ~PIPE_BUF_FLAG_ATOMIC; 562 kunmap_atomic(pvMapping, KM_USER0); 563 } 564 } 565 566 /** Gets a reference to the page. */ 567 static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 568 { 569 page_cache_get(pPipeBuf->page); 570 /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/ 571 } 572 573 /** Release the buffer page (counter to vbsf_pipe_buf_get). */ 574 static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 575 { 576 /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/ 577 page_cache_release(pPipeBuf->page); 578 } 579 580 /** Attempt to steal the page. 581 * @returns 0 success, 1 on failure. */ 582 static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 583 { 584 if (page_count(pPipeBuf->page) == 1) { 585 lock_page(pPipeBuf->page); 586 SFLOG3(("vbsf_pipe_buf_steal: %p -> 0\n", pPipeBuf)); 587 return 0; 588 } 589 SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf)); 590 return 1; 591 } 592 593 /** 594 * Pipe buffer operations for used by vbsf_feed_pages_to_pipe. 595 */ 596 static struct pipe_buf_operations vbsf_pipe_buf_ops = { 597 .can_merge = 0, 598 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) 599 .confirm = vbsf_pipe_buf_confirm, 533 && LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 534 535 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) 536 # define LOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0) 537 # define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0) 600 538 # else 601 .pin = vbsf_pipe_buf_confirm, 602 # endif 603 .map = vbsf_pipe_buf_map, 604 .unmap = vbsf_pipe_buf_unmap, 605 .get = vbsf_pipe_buf_get, 606 .release = vbsf_pipe_buf_release, 607 .steal = vbsf_pipe_buf_steal, 608 }; 609 610 # define LOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0) 611 # define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0) 539 # define LOCK_PIPE(a_pPipe) pipe_lock(a_pPipe) 540 # define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe) 541 # endif 542 612 543 613 544 /** Waits for the pipe buffer status to change. */ … … 639 570 kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT); 640 571 } 572 573 #endif 574 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \ 575 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 576 577 /** Verify pipe buffer content (needed for page-cache to ensure idle page). */ 578 static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 579 { 580 /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/ 581 return 0; 582 } 583 584 585 /** Maps the buffer page. */ 586 static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic) 587 { 588 void *pvRet; 589 if (!atomic) 590 pvRet = kmap(pPipeBuf->page); 591 else { 592 pPipeBuf->flags |= PIPE_BUF_FLAG_ATOMIC; 593 pvRet = kmap_atomic(pPipeBuf->page, KM_USER0); 594 } 595 /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/ 596 return pvRet; 597 } 598 599 600 /** Unmaps the buffer page. */ 601 static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping) 602 { 603 /*SFLOG3(("vbsf_pipe_buf_unmap: %p/%p\n", pPipeBuf, pvMapping)); */ 604 if (!(pPipeBuf->flags & PIPE_BUF_FLAG_ATOMIC)) 605 kunmap(pPipeBuf->page); 606 else { 607 pPipeBuf->flags &= ~PIPE_BUF_FLAG_ATOMIC; 608 kunmap_atomic(pvMapping, KM_USER0); 609 } 610 } 611 612 613 /** Gets a reference to the page. */ 614 static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 615 { 616 page_cache_get(pPipeBuf->page); 617 /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/ 618 } 619 620 621 /** Release the buffer page (counter to vbsf_pipe_buf_get). */ 622 static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 623 { 624 /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/ 625 page_cache_release(pPipeBuf->page); 626 } 627 628 629 /** Attempt to steal the page. 630 * @returns 0 success, 1 on failure. */ 631 static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf) 632 { 633 if (page_count(pPipeBuf->page) == 1) { 634 lock_page(pPipeBuf->page); 635 SFLOG3(("vbsf_pipe_buf_steal: %p -> 0\n", pPipeBuf)); 636 return 0; 637 } 638 SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf)); 639 return 1; 640 } 641 642 643 /** 644 * Pipe buffer operations for used by vbsf_feed_pages_to_pipe. 645 */ 646 static struct pipe_buf_operations vbsf_pipe_buf_ops = { 647 .can_merge = 0, 648 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) 649 .confirm = vbsf_pipe_buf_confirm, 650 # else 651 .pin = vbsf_pipe_buf_confirm, 652 # endif 653 .map = vbsf_pipe_buf_map, 654 .unmap = vbsf_pipe_buf_unmap, 655 .get = vbsf_pipe_buf_get, 656 .release = vbsf_pipe_buf_release, 657 .steal = vbsf_pipe_buf_steal, 658 }; 659 641 660 642 661 /** … … 800 819 } 801 820 821 #endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */ 822 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \ 823 && LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) 802 824 803 825 /** 804 826 * For splicing from a pipe to a file. 827 * 828 * Since we can combine buffers and request allocations, this should be faster 829 * than the default implementation. 805 830 */ 806 831 static ssize_t vbsf_splice_write(struct pipe_inode_info *pPipe, struct file *file, loff_t *poffset, size_t len, unsigned int flags) … … 811 836 812 837 SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags)); 813 if (false /** @todo later */) {838 /** @todo later if (false) { 814 839 cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags); 815 } else {840 } else */ { 816 841 /* 817 842 * Prepare a write request. 818 843 */ 819 struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)file->private_data; 820 loff_t offFile = *poffset; 821 uint32_t const cMaxPages = RT_MIN(PIPE_BUFFERS, RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT); 822 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, 823 PgLst.aPages[cMaxPages])); 844 # ifdef PIPE_BUFFERS 845 uint32_t const cMaxPages = RT_MIN(PIPE_BUFFERS, RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT); 846 # else 847 uint32_t const cMaxPages = RT_MIN(RT_MAX(RT_MIN(pPipe->buffers, 256), PIPE_DEF_BUFFERS), 848 RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT); 849 # endif 850 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, 851 PgLst.aPages[cMaxPages])); 824 852 if (pReq) { 825 853 /* 826 854 * Feed from the pipe. 827 855 */ 828 bool fNeedWakeUp = false; 856 struct vbsf_reg_info *sf_r = (struct vbsf_reg_info *)file->private_data; 857 struct address_space *mapping = inode->i_mapping; 858 loff_t offFile = *poffset; 859 bool fNeedWakeUp = false; 829 860 cbRet = 0; 830 861 … … 853 884 && cPagesToWrite < cMaxPages 854 885 && ((pReq->PgLst.offFirstPage + cbToWrite) & PAGE_OFFSET_MASK) == 0) { 886 # ifdef PIPE_BUFFERS 855 887 struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % PIPE_BUFFERS]; 888 # else 889 struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % pPipe->buffers]; 890 # endif 856 891 Assert(pPipeBuf2->len <= PAGE_SIZE); 857 892 Assert(pPipeBuf2->offset < PAGE_SIZE); … … 863 898 } 864 899 865 vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbToWrite, cPagesToWrite); 900 /* Check that we don't have signals pending before we issue the write, as 901 we'll only end up having to cancel the HGCM request 99% of the time: */ 902 if (!signal_pending(current)) 903 vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbToWrite, cPagesToWrite); 904 else 905 vrc = VERR_INTERRUPTED; 866 906 if (RT_SUCCESS(vrc)) { 867 907 /* … … 873 913 SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile)); 874 914 875 cbRet += cbActual; 876 offFile += cbActual; 877 *poffset = offFile; 915 cbRet += cbActual; 878 916 879 917 while (cbActual > 0) { 880 918 uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual); 919 920 vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL, 921 &pPipeBuf->page, pPipeBuf->offset, 1); 922 923 offFile += cbAdvance; 881 924 cbActual -= cbAdvance; 882 925 pPipeBuf->offset += cbAdvance; 883 926 pPipeBuf->len -= cbAdvance; 927 884 928 if (!pPipeBuf->len) { 885 struct pipe_buf_operations *pOps = pPipeBuf->ops;929 struct pipe_buf_operations const *pOps = pPipeBuf->ops; 886 930 pPipeBuf->ops = NULL; 887 931 pOps->release(pPipe, pPipeBuf); 888 932 933 # ifdef PIPE_BUFFERS 889 934 pPipe->curbuf = (pPipe->curbuf + 1) % PIPE_BUFFERS; 935 # else 936 pPipe->curbuf = (pPipe->curbuf + 1) % pPipe->buffers; 937 # endif 890 938 pPipe->nrbufs -= 1; 891 939 pPipeBuf = &pPipe->bufs[pPipe->curbuf]; 892 940 941 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30) 893 942 fNeedWakeUp |= pPipe->inode != NULL; 943 # else 944 fNeedWakeUp = true; 945 # endif 894 946 } else { 895 947 Assert(cbActual == 0); … … 897 949 } 898 950 } 951 952 *poffset = offFile; 899 953 } else { 900 954 if (cbRet == 0) 901 cbRet = -RTErrConvertToErrno(vrc);955 cbRet = vrc == VERR_INTERRUPTED ? -ERESTARTSYS : -RTErrConvertToErrno(vrc); 902 956 SFLOGFLOW(("vbsf_splice_write: Write failed: %Rrc -> %zd (cbRet=%#zx)\n", 903 957 vrc, -RTErrConvertToErrno(vrc), cbRet)); … … 958 1012 } 959 1013 960 #endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31*/1014 #endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */ 961 1015 962 1016 … … 1395 1449 * to the host. 1396 1450 */ 1397 void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange, 1398 uint8_t const *pbSrcBuf, struct page **papSrcPages, uint32_t offSrcPage, size_t cSrcPages) 1451 static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange, 1452 uint8_t const *pbSrcBuf, struct page **papSrcPages, 1453 uint32_t offSrcPage, size_t cSrcPages) 1399 1454 { 1400 1455 Assert(offSrcPage < PAGE_SIZE); … … 3106 3161 * - Sendfile reimplemented using splice in 2.6.23. 3107 3162 * - The default_file_splice_read/write no-page-cache fallback functions, 3108 * were introduced in 2.6.31. 3109 * - Since linux 4.9 the generic_file_splice_read/write functions are using 3110 * read_iter/write_iter. 3163 * were introduced in 2.6.31. The write one work in page units. 3164 * - Since linux 3.16 there is iter_file_splice_write that uses iter_write. 3165 * - Since linux 4.9 the generic_file_splice_read function started using 3166 * read_iter. 3111 3167 */ 3112 3168 struct file_operations vbsf_reg_fops = { … … 3129 3185 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 3130 3186 .splice_read = vbsf_splice_read, 3187 #endif 3188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 3189 .splice_write = iter_file_splice_write, 3190 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) 3131 3191 .splice_write = vbsf_splice_write, 3132 3192 #endif
Note:
See TracChangeset
for help on using the changeset viewer.