VirtualBox

Changeset 77943 in vbox for trunk


Ignore:
Timestamp:
Mar 28, 2019 6:34:05 PM (6 years ago)
Author:
vboxsync
Message:

linux/vboxsf: More tweaking of vbsf_splice_write and friends. Use our own write method for performance reasons till 3.16.0 when iter_file_splice_write was introduced. bugref:9172

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Additions/linux/sharedfolders/regops.c

    r77942 r77943  
    4646#endif
    4747#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
    48  && LINUX_VERSION_CODE <  KERNEL_VERSION(2, 6, 31)
     48 && LINUX_VERSION_CODE <  KERNEL_VERSION(3, 16, 0)
    4949# include <linux/splice.h>
    5050#endif
     
    125125*********************************************************************************************************************************/
    126126DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack);
     127static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
     128                                           uint8_t const *pbSrcBuf, struct page **papSrcPages,
     129                                           uint32_t offSrcPage, size_t cSrcPages);
    127130
    128131
     
    528531
    529532#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
    530  && LINUX_VERSION_CODE <  KERNEL_VERSION(2, 6, 31)
    531 
    532 
    533 /** Verify pipe buffer content (needed for page-cache to ensure idle page). */
    534 static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
    535 {
    536     /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/
    537     return 0;
    538 }
    539 
    540 /** Maps the buffer page. */
    541 static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic)
    542 {
    543     void *pvRet;
    544     if (!atomic)
    545         pvRet = kmap(pPipeBuf->page);
    546     else {
    547         pPipeBuf->flags |= PIPE_BUF_FLAG_ATOMIC;
    548         pvRet = kmap_atomic(pPipeBuf->page, KM_USER0);
    549     }
    550     /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/
    551     return pvRet;
    552 }
    553 
    554 /** Unmaps the buffer page. */
    555 static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping)
    556 {
    557     /*SFLOG3(("vbsf_pipe_buf_unmap: %p/%p\n", pPipeBuf, pvMapping)); */
    558     if (!(pPipeBuf->flags & PIPE_BUF_FLAG_ATOMIC))
    559         kunmap(pPipeBuf->page);
    560     else {
    561         pPipeBuf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
    562         kunmap_atomic(pvMapping, KM_USER0);
    563     }
    564 }
    565 
    566 /** Gets a reference to the page. */
    567 static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
    568 {
    569     page_cache_get(pPipeBuf->page);
    570     /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
    571 }
    572 
    573 /** Release the buffer page (counter to vbsf_pipe_buf_get). */
    574 static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
    575 {
    576     /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
    577     page_cache_release(pPipeBuf->page);
    578 }
    579 
    580 /** Attempt to steal the page.
    581  * @returns 0 success, 1 on failure.  */
    582 static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
    583 {
    584     if (page_count(pPipeBuf->page) == 1) {
    585         lock_page(pPipeBuf->page);
    586         SFLOG3(("vbsf_pipe_buf_steal: %p -> 0\n", pPipeBuf));
    587         return 0;
    588     }
    589     SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf));
    590     return 1;
    591 }
    592 
    593 /**
    594  * Pipe buffer operations for used by vbsf_feed_pages_to_pipe.
    595  */
    596 static struct pipe_buf_operations vbsf_pipe_buf_ops = {
    597     .can_merge = 0,
    598 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
    599     .confirm   = vbsf_pipe_buf_confirm,
     533 && LINUX_VERSION_CODE <  KERNEL_VERSION(3, 16, 0)
     534
     535# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
     536#  define LOCK_PIPE(a_pPipe)   do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0)
     537#  define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0)
    600538# else
    601     .pin       = vbsf_pipe_buf_confirm,
    602 # endif
    603     .map       = vbsf_pipe_buf_map,
    604     .unmap     = vbsf_pipe_buf_unmap,
    605     .get       = vbsf_pipe_buf_get,
    606     .release   = vbsf_pipe_buf_release,
    607     .steal     = vbsf_pipe_buf_steal,
    608 };
    609 
    610 # define LOCK_PIPE(a_pPipe)   do { if ((a_pPipe)->inode) mutex_lock(&(a_pPipe)->inode->i_mutex); } while (0)
    611 # define UNLOCK_PIPE(a_pPipe) do { if ((a_pPipe)->inode) mutex_unlock(&(a_pPipe)->inode->i_mutex); } while (0)
     539#  define LOCK_PIPE(a_pPipe)   pipe_lock(a_pPipe)
     540#  define UNLOCK_PIPE(a_pPipe) pipe_unlock(a_pPipe)
     541# endif
     542
    612543
    613544/** Waits for the pipe buffer status to change. */
     
    639570        kill_fasync(&pPipe->fasync_writers, SIGIO, POLL_OUT);
    640571}
     572
     573#endif
     574#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
     575 && LINUX_VERSION_CODE <  KERNEL_VERSION(2, 6, 31)
     576
     577/** Verify pipe buffer content (needed for page-cache to ensure idle page). */
     578static int vbsf_pipe_buf_confirm(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
     579{
     580    /*SFLOG3(("vbsf_pipe_buf_confirm: %p\n", pPipeBuf));*/
     581    return 0;
     582}
     583
     584
     585/** Maps the buffer page. */
     586static void *vbsf_pipe_buf_map(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, int atomic)
     587{
     588    void *pvRet;
     589    if (!atomic)
     590        pvRet = kmap(pPipeBuf->page);
     591    else {
     592        pPipeBuf->flags |= PIPE_BUF_FLAG_ATOMIC;
     593        pvRet = kmap_atomic(pPipeBuf->page, KM_USER0);
     594    }
     595    /*SFLOG3(("vbsf_pipe_buf_map: %p -> %p\n", pPipeBuf, pvRet));*/
     596    return pvRet;
     597}
     598
     599
     600/** Unmaps the buffer page. */
     601static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf, void *pvMapping)
     602{
     603    /*SFLOG3(("vbsf_pipe_buf_unmap: %p/%p\n", pPipeBuf, pvMapping)); */
     604    if (!(pPipeBuf->flags & PIPE_BUF_FLAG_ATOMIC))
     605        kunmap(pPipeBuf->page);
     606    else {
     607        pPipeBuf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
     608        kunmap_atomic(pvMapping, KM_USER0);
     609    }
     610}
     611
     612
     613/** Gets a reference to the page. */
     614static void vbsf_pipe_buf_get(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
     615{
     616    page_cache_get(pPipeBuf->page);
     617    /*SFLOG3(("vbsf_pipe_buf_get: %p (return count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
     618}
     619
     620
     621/** Release the buffer page (counter to vbsf_pipe_buf_get). */
     622static void vbsf_pipe_buf_release(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
     623{
     624    /*SFLOG3(("vbsf_pipe_buf_release: %p (incoming count=%d)\n", pPipeBuf, page_count(pPipeBuf->page)));*/
     625    page_cache_release(pPipeBuf->page);
     626}
     627
     628
     629/** Attempt to steal the page.
     630 * @returns 0 success, 1 on failure.  */
     631static int vbsf_pipe_buf_steal(struct pipe_inode_info *pPipe, struct pipe_buffer *pPipeBuf)
     632{
     633    if (page_count(pPipeBuf->page) == 1) {
     634        lock_page(pPipeBuf->page);
     635        SFLOG3(("vbsf_pipe_buf_steal: %p -> 0\n", pPipeBuf));
     636        return 0;
     637    }
     638    SFLOG3(("vbsf_pipe_buf_steal: %p -> 1\n", pPipeBuf));
     639    return 1;
     640}
     641
     642
     643/**
     644 * Pipe buffer operations for used by vbsf_feed_pages_to_pipe.
     645 */
     646static struct pipe_buf_operations vbsf_pipe_buf_ops = {
     647    .can_merge = 0,
     648# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
     649    .confirm   = vbsf_pipe_buf_confirm,
     650# else
     651    .pin       = vbsf_pipe_buf_confirm,
     652# endif
     653    .map       = vbsf_pipe_buf_map,
     654    .unmap     = vbsf_pipe_buf_unmap,
     655    .get       = vbsf_pipe_buf_get,
     656    .release   = vbsf_pipe_buf_release,
     657    .steal     = vbsf_pipe_buf_steal,
     658};
     659
    641660
    642661/**
     
    800819}
    801820
     821#endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */
     822#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) \
     823 && LINUX_VERSION_CODE <  KERNEL_VERSION(3, 16, 0)
    802824
    803825/**
    804826 * For splicing from a pipe to a file.
     827 *
     828 * Since we can combine buffers and request allocations, this should be faster
     829 * than the default implementation.
    805830 */
    806831static ssize_t vbsf_splice_write(struct pipe_inode_info *pPipe, struct file *file, loff_t *poffset, size_t len, unsigned int flags)
     
    811836
    812837    SFLOGFLOW(("vbsf_splice_write: pPipe=%p file=%p poffset=%p{%#RX64} len=%#zx flags=%#x\n", pPipe, file, poffset, *poffset, len, flags));
    813     if (false /** @todo  later */) {
     838    /** @todo later if (false) {
    814839        cbRet = generic_file_splice_write(pPipe, file, poffset, len, flags);
    815     } else {
     840    } else */ {
    816841        /*
    817842         * Prepare a write request.
    818843         */
    819         struct vbsf_reg_info *sf_r       = (struct vbsf_reg_info *)file->private_data;
    820         loff_t                offFile    = *poffset;
    821         uint32_t const        cMaxPages  = RT_MIN(PIPE_BUFFERS, RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
    822         VBOXSFWRITEPGLSTREQ  *pReq       = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
    823                                                                                                        PgLst.aPages[cMaxPages]));
     844# ifdef PIPE_BUFFERS
     845        uint32_t const cMaxPages  = RT_MIN(PIPE_BUFFERS, RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
     846# else
     847        uint32_t const cMaxPages  = RT_MIN(RT_MAX(RT_MIN(pPipe->buffers, 256), PIPE_DEF_BUFFERS),
     848                                           RT_ALIGN_Z(len, PAGE_SIZE) >> PAGE_SHIFT);
     849# endif
     850        VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
     851                                                                                                PgLst.aPages[cMaxPages]));
    824852        if (pReq) {
    825853            /*
    826854             * Feed from the pipe.
    827855             */
    828             bool fNeedWakeUp = false;
     856            struct vbsf_reg_info *sf_r        = (struct vbsf_reg_info *)file->private_data;
     857            struct address_space *mapping     = inode->i_mapping;
     858            loff_t                offFile     = *poffset;
     859            bool                  fNeedWakeUp = false;
    829860            cbRet = 0;
    830861
     
    853884                           && cPagesToWrite < cMaxPages
    854885                           && ((pReq->PgLst.offFirstPage + cbToWrite) & PAGE_OFFSET_MASK) == 0) {
     886# ifdef PIPE_BUFFERS
    855887                        struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % PIPE_BUFFERS];
     888# else
     889                        struct pipe_buffer *pPipeBuf2 = &pPipe->bufs[(pPipe->curbuf + cPagesToWrite) % pPipe->buffers];
     890# endif
    856891                        Assert(pPipeBuf2->len <= PAGE_SIZE);
    857892                        Assert(pPipeBuf2->offset < PAGE_SIZE);
     
    863898                    }
    864899
    865                     vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbToWrite, cPagesToWrite);
     900                    /* Check that we don't have signals pending before we issue the write, as
     901                       we'll only end up having to cancel the HGCM request 99% of the time: */
     902                    if (!signal_pending(current))
     903                        vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbToWrite, cPagesToWrite);
     904                    else
     905                        vrc = VERR_INTERRUPTED;
    866906                    if (RT_SUCCESS(vrc)) {
    867907                        /*
     
    873913                        SFLOG2(("vbsf_splice_write: write -> %#x bytes @ %#RX64\n", cbActual, offFile));
    874914
    875                         cbRet   += cbActual;
    876                         offFile += cbActual;
    877                         *poffset = offFile;
     915                        cbRet += cbActual;
    878916
    879917                        while (cbActual > 0) {
    880918                            uint32_t cbAdvance = RT_MIN(pPipeBuf->len, cbActual);
     919
     920                            vbsf_reg_write_sync_page_cache(mapping, offFile, cbAdvance, NULL,
     921                                                           &pPipeBuf->page, pPipeBuf->offset, 1);
     922
     923                            offFile          += cbAdvance;
    881924                            cbActual         -= cbAdvance;
    882925                            pPipeBuf->offset += cbAdvance;
    883926                            pPipeBuf->len    -= cbAdvance;
     927
    884928                            if (!pPipeBuf->len) {
    885                                 struct pipe_buf_operations *pOps = pPipeBuf->ops;
     929                                struct pipe_buf_operations const *pOps = pPipeBuf->ops;
    886930                                pPipeBuf->ops = NULL;
    887931                                pOps->release(pPipe, pPipeBuf);
    888932
     933# ifdef PIPE_BUFFERS
    889934                                pPipe->curbuf  = (pPipe->curbuf + 1) % PIPE_BUFFERS;
     935# else
     936                                pPipe->curbuf  = (pPipe->curbuf + 1) % pPipe->buffers;
     937# endif
    890938                                pPipe->nrbufs -= 1;
    891939                                pPipeBuf = &pPipe->bufs[pPipe->curbuf];
    892940
     941# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
    893942                                fNeedWakeUp |= pPipe->inode != NULL;
     943# else
     944                                fNeedWakeUp = true;
     945# endif
    894946                            } else {
    895947                                Assert(cbActual == 0);
     
    897949                            }
    898950                        }
     951
     952                        *poffset = offFile;
    899953                    } else {
    900954                        if (cbRet == 0)
    901                             cbRet = -RTErrConvertToErrno(vrc);
     955                            cbRet = vrc == VERR_INTERRUPTED ? -ERESTARTSYS : -RTErrConvertToErrno(vrc);
    902956                        SFLOGFLOW(("vbsf_splice_write: Write failed: %Rrc -> %zd (cbRet=%#zx)\n",
    903957                                   vrc, -RTErrConvertToErrno(vrc), cbRet));
     
    9581012}
    9591013
    960 #endif /* 2.6.17 <= LINUX_VERSION_CODE < 2.6.31 */
     1014#endif /* 2.6.17 <= LINUX_VERSION_CODE < 3.16.0 */
    9611015
    9621016
     
    13951449 * to the host.
    13961450 */
    1397 void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
    1398                                     uint8_t const *pbSrcBuf, struct page **papSrcPages, uint32_t offSrcPage, size_t cSrcPages)
     1451static void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
     1452                                           uint8_t const *pbSrcBuf, struct page **papSrcPages,
     1453                                           uint32_t offSrcPage, size_t cSrcPages)
    13991454{
    14001455    Assert(offSrcPage < PAGE_SIZE);
     
    31063161 *      - Sendfile reimplemented using splice in 2.6.23.
    31073162 *      - The default_file_splice_read/write no-page-cache fallback functions,
    3108  *        were introduced in 2.6.31.
    3109  *      - Since linux 4.9 the generic_file_splice_read/write functions are using
    3110  *        read_iter/write_iter.
     3163 *        were introduced in 2.6.31.  The write one work in page units.
     3164 *      - Since linux 3.16 there is iter_file_splice_write that uses iter_write.
     3165 *      - Since linux 4.9 the generic_file_splice_read function started using
     3166 *        read_iter.
    31113167 */
    31123168struct file_operations vbsf_reg_fops = {
     
    31293185#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
    31303186    .splice_read     = vbsf_splice_read,
     3187#endif
     3188#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     3189    .splice_write    = iter_file_splice_write,
     3190#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17)
    31313191    .splice_write    = vbsf_splice_write,
    31323192#endif
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette