VirtualBox

Changeset 77419 in vbox for trunk/src


Ignore:
Timestamp:
Feb 21, 2019 11:53:28 PM (6 years ago)
Author:
vboxsync
Message:

linux/vboxsf: Support non-COW mmap too. llseek needs to make sure the file size is up-to-date when using it in a seek (host may have changed it). Bunch of build fixes for weird older kernels. bugref:9172

Location:
trunk/src/VBox/Additions/linux/sharedfolders
Files:
5 edited

Legend:

Unmodified
Added
Removed
  • trunk/src/VBox/Additions/linux/sharedfolders/lnkops.c

    r76939 r77419  
    3131#include "vfsmod.h"
    3232
    33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
     33#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) /* no generic_readlink() before 2.6.8 */
    3434
    3535# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
     
    117117};
    118118
    119 #endif  /* LINUX_VERSION_CODE >= 2.6.0 */
     119#endif  /* LINUX_VERSION_CODE >= 2.6.8 */
  • trunk/src/VBox/Additions/linux/sharedfolders/regops.c

    r77303 r77419  
    2929 */
    3030
    31 /*
    32  * Limitations: only COW memory mapping is supported
    33  */
    34 
    3531#include "vfsmod.h"
     32#include <linux/uio.h>
     33#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
     34# include <linux/aio.h> /* struct kiocb before 4.1 */
     35#endif
     36#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
     37# include <linux/buffer_head.h>
     38#endif
     39#if LINUX_VERSION_CODE <  KERNEL_VERSION(2, 6, 31) \
     40 && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
     41# include <linux/writeback.h>
     42#endif
     43#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
     44 && LINUX_VERSION_CODE <  KERNEL_VERSION(2, 6, 31)
     45# include <linux/splice.h>
     46#endif
     47
     48#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
     49# define SEEK_END 2
     50#endif
     51
    3652
    3753#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
     
    5571
    5672#endif /* < 2.6.0 */
     73
    5774
    5875/* fops */
     
    144161};
    145162
    146 #define LOCK_PIPE(pipe) \
    147     if (pipe->inode) \
    148         mutex_lock(&pipe->inode->i_mutex);
    149 
    150 #define UNLOCK_PIPE(pipe) \
    151     if (pipe->inode) \
    152         mutex_unlock(&pipe->inode->i_mutex);
     163# define LOCK_PIPE(pipe)   do { if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); } while (0)
     164# define UNLOCK_PIPE(pipe) do { if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); } while (0)
    153165
    154166ssize_t
     
    296308
    297309/**
     310 * Read function used when accessing files that are memory mapped.
     311 *
     312 * We read from the page cache here to present the a cohertent picture of the
     313 * the file content.
     314 */
     315static ssize_t sf_reg_read_mapped(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
     316{
     317#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     318        struct iovec    iov = { .iov_base = buf, .iov_len = size };
     319        struct iov_iter iter;
     320        struct kiocb    kiocb;
     321        ssize_t         cbRet;
     322
     323        init_sync_kiocb(&kiocb, file);
     324        kiocb.ki_pos = *off;
     325        iov_iter_init(&iter, READ, &iov, 1, size);
     326
     327        cbRet = generic_file_read_iter(&kiocb, &iter);
     328
     329        *off = kiocb.ki_pos;
     330        return cbRet;
     331
     332#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
     333        struct iovec    iov = { .iov_base = buf, .iov_len = size };
     334        struct kiocb    kiocb;
     335        ssize_t         cbRet;
     336
     337        init_sync_kiocb(&kiocb, file);
     338        kiocb.ki_pos = *off;
     339
     340        cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off);
     341        if (cbRet == -EIOCBQUEUED)
     342                cbRet = wait_on_sync_kiocb(&kiocb);
     343
     344        *off = kiocb.ki_pos;
     345        return cbRet;
     346
     347#else /* 2.6.18 or earlier: */
     348        return generic_file_read(file, buf, size, off);
     349#endif
     350}
     351
     352
     353/**
    298354 * Fallback case of sf_reg_read() that locks the user buffers and let the host
    299355 * write directly to them.
     
    419475        struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
    420476        struct sf_reg_info *sf_r = file->private_data;
     477#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 2)
     478        struct address_space *mapping = file->f_mapping;
     479#else
     480        struct address_space *mapping = inode->i_mapping;
     481#endif
    421482
    422483        TRACE();
     
    430491        if (!size)
    431492                return 0;
     493
     494        /*
     495         * If there is a mapping and O_DIRECT isn't in effect, we must at a
     496         * heed dirty pages in the mapping and read from them.  For simplicity
     497         * though, we just do page cache reading when there are writable
     498         * mappings around with any kind of pages loaded.
     499         */
     500        if (   mapping
     501            && mapping->nrpages > 0
     502            && mapping_writably_mapped(mapping)
     503            && !(file->f_flags & O_DIRECT)
     504            && 1 /** @todo make this behaviour configurable */ )
     505                return sf_reg_read_mapped(file, buf, size, off);
    432506
    433507        /*
     
    894968}
    895969
    896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
    897 static int sf_reg_fault(struct vm_fault *vmf)
    898 #elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    899 static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
    900 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    901 static struct page *sf_reg_nopage(struct vm_area_struct *vma,
    902                                   unsigned long vaddr, int *type)
    903 # define SET_TYPE(t) *type = (t)
    904 #else  /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */
    905 static struct page *sf_reg_nopage(struct vm_area_struct *vma,
    906                                   unsigned long vaddr, int unused)
    907 # define SET_TYPE(t)
     970/**
     971 * Wrapper around generic/default seek function that ensures that we've got
     972 * the up-to-date file size when doing anything relative to EOF.
     973 *
     974 * The issue is that the host may extend the file while we weren't looking and
     975 * if the caller wishes to append data, it may end up overwriting existing data
     976 * if we operate with a stale size.  So, we always retrieve the file size on EOF
     977 * relative seeks.
     978 */
     979static loff_t sf_reg_llseek(struct file *file, loff_t off, int whence)
     980{
     981        switch (whence) {
     982#ifdef SEEK_HOLE
     983                case SEEK_HOLE:
     984                case SEEK_DATA:
    908985#endif
    909 {
    910         struct page *page;
    911         char *buf;
    912         loff_t off;
    913         uint32_t nread = PAGE_SIZE;
    914         int err;
    915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
    916         struct vm_area_struct *vma = vmf->vma;
     986                case SEEK_END: {
     987                        struct sf_reg_info *sf_r = file->private_data;
     988                        int rc = sf_inode_revalidate_with_handle(GET_F_DENTRY(file), sf_r->handle, true /*fForce*/);
     989                        if (rc == 0)
     990                                break;
     991                        return rc;
     992                }
     993        }
     994
     995#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8)
     996        return generic_file_llseek(file, off, whence);
     997#else
     998        return default_llseek(file, off, whence);
    917999#endif
    918         struct file *file = vma->vm_file;
    919         struct inode *inode = GET_F_DENTRY(file)->d_inode;
    920         struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
    921         struct sf_reg_info *sf_r = file->private_data;
    922 
    923         TRACE();
    924 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    925         if (vmf->pgoff > vma->vm_end)
    926                 return VM_FAULT_SIGBUS;
    927 #else
    928         if (vaddr > vma->vm_end) {
    929                 SET_TYPE(VM_FAULT_SIGBUS);
    930                 return NOPAGE_SIGBUS;
    931         }
    932 #endif
    933 
    934         /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead()
    935          * which works on virtual addresses. On Linux cannot reliably determine the
    936          * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
    937         page = alloc_page(GFP_USER);
    938         if (!page) {
    939                 LogRelFunc(("failed to allocate page\n"));
    940 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    941                 return VM_FAULT_OOM;
    942 #else
    943                 SET_TYPE(VM_FAULT_OOM);
    944                 return NOPAGE_OOM;
    945 #endif
    946         }
    947 
    948         buf = kmap(page);
    949 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    950         off = (vmf->pgoff << PAGE_SHIFT);
    951 #else
    952         off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
    953 #endif
    954         err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
    955         if (err) {
    956                 kunmap(page);
    957                 put_page(page);
    958 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    959                 return VM_FAULT_SIGBUS;
    960 #else
    961                 SET_TYPE(VM_FAULT_SIGBUS);
    962                 return NOPAGE_SIGBUS;
    963 #endif
    964         }
    965 
    966         BUG_ON(nread > PAGE_SIZE);
    967         if (!nread) {
    968 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    969                 clear_user_page(page_address(page), vmf->pgoff, page);
    970 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    971                 clear_user_page(page_address(page), vaddr, page);
    972 #else
    973                 clear_user_page(page_address(page), vaddr);
    974 #endif
    975         } else
    976                 memset(buf + nread, 0, PAGE_SIZE - nread);
    977 
    978         flush_dcache_page(page);
    979         kunmap(page);
    980 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    981         vmf->page = page;
    982         return 0;
    983 #else
    984         SET_TYPE(VM_FAULT_MAJOR);
    985         return page;
    986 #endif
    987 }
    988 
    989 static struct vm_operations_struct sf_vma_ops = {
    990 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
    991         .fault = sf_reg_fault
    992 #else
    993         .nopage = sf_reg_nopage
    994 #endif
    995 };
    996 
    997 static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
    998 {
    999         TRACE();
    1000         if (vma->vm_flags & VM_SHARED) {
    1001                 LogFunc(("shared mmapping not available\n"));
    1002                 return -EINVAL;
    1003         }
    1004 
    1005         vma->vm_ops = &sf_vma_ops;
    1006         return 0;
    1007 }
     1000}
     1001
     1002/**
     1003 * Flush region of file - chiefly mmap/msync.
     1004 *
     1005 * We cannot use the noop_fsync / simple_sync_file here as that means
     1006 * msync(,,MS_SYNC) will return before the data hits the host, thereby
     1007 * causing coherency issues with O_DIRECT access to the same file as
     1008 * well as any host interaction with the file.
     1009 */
     1010#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
     1011static int sf_reg_fsync(struct file *file, loff_t start, loff_t end, int datasync)
     1012{
     1013# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
     1014        return __generic_file_fsync(file, start, end, datasync);
     1015# else
     1016        return generic_file_fsync(file, start, end, datasync);
     1017# endif
     1018}
     1019#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
     1020static int sf_reg_fsync(struct file *file, int datasync)
     1021{
     1022        return generic_file_fsync(file, datasync);
     1023}
     1024#else /* < 2.6.35 */
     1025static int sf_reg_fsync(struct file *file, struct dentry *dentry, int datasync)
     1026{
     1027# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
     1028        return simple_fsync(file, dentry, datasync);
     1029# else
     1030        int rc;
     1031        struct inode *inode = dentry->d_inode;
     1032        AssertReturn(inode, -EINVAL);
     1033
     1034        /** @todo What about file_fsync()? (<= 2.5.11) */
     1035
     1036#  if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
     1037        rc = sync_mapping_buffers(inode->i_mapping);
     1038        if (   rc == 0
     1039            && (inode->i_state & I_DIRTY)
     1040            && ((inode->i_state & I_DIRTY_DATASYNC) || !datasync)
     1041           ) {
     1042                struct writeback_control wbc = {
     1043                        .sync_mode = WB_SYNC_ALL,
     1044                        .nr_to_write = 0
     1045                };
     1046                rc = sync_inode(inode, &wbc);
     1047        }
     1048#  else  /* < 2.5.12 */
     1049        rc  = fsync_inode_buffers(inode);
     1050#   if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
     1051        rc |= fsync_inode_data_buffers(inode);
     1052#   endif
     1053        /** @todo probably need to do more here... */
     1054#  endif /* < 2.5.12 */
     1055        return rc;
     1056# endif
     1057}
     1058#endif /* < 2.6.35 */
     1059
    10081060
    10091061struct file_operations sf_reg_fops = {
     
    10121064        .write = sf_reg_write,
    10131065        .release = sf_reg_release,
    1014         .mmap = sf_reg_mmap,
     1066        .mmap = generic_file_mmap,
    10151067#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    10161068# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
     
    10251077        .aio_write = generic_file_aio_write,
    10261078# endif
    1027 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
    1028         .fsync = noop_fsync,
    1029 # else
    1030         .fsync = simple_sync_file,
    1031 # endif
    1032         .llseek = generic_file_llseek,
    10331079#endif
     1080        .llseek = sf_reg_llseek,
     1081        .fsync = sf_reg_fsync,
    10341082};
    10351083
     
    10901138        TRACE();
    10911139
    1092 /** @todo rig up a FsPerf testcase for this code! */
    1093 
    10941140        if (page->index >= end_index)
    10951141                nwritten = inode->i_size & (PAGE_SIZE - 1);
     
    11241170{
    11251171        TRACE();
     1172/** @todo rig up a FsPerf testcase for this code! */
    11261173
    11271174        return simple_write_begin(file, mapping, pos, len, flags, pagep,
     
    11411188
    11421189        TRACE();
    1143 
    11441190/** @todo rig up a FsPerf testcase for this code! */
    11451191
     
    12021248        .readpage = sf_readpage,
    12031249        .writepage = sf_writepage,
     1250# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
     1251        .set_page_dirty = __set_page_dirty_buffers,
     1252# endif
    12041253# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
    12051254        .write_begin = sf_write_begin,
  • trunk/src/VBox/Additions/linux/sharedfolders/utils.c

    r77303 r77419  
    4949
    5050#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
    51 static void sf_ftime_from_timespec(time_t * time, RTTIMESPEC * ts)
     51
     52DECLINLINE(void) sf_ftime_from_timespec(time_t * time, RTTIMESPEC *ts)
    5253{
    5354        int64_t t = RTTimeSpecGetNano(ts);
    54 
    55         do_div(t, 1000000000);
     55        do_div(t, RT_NS_1SEC);
    5656        *time = t;
    5757}
    5858
    59 static void sf_timespec_from_ftime(RTTIMESPEC * ts, time_t * time)
    60 {
    61         int64_t t = 1000000000 * *time;
    62         RTTimeSpecSetNano(ts, t);
    63 }
    64 #else                           /* >= 2.6.0 */
    65 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
    66 static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts)
    67 #else
    68 static void sf_ftime_from_timespec(struct timespec64 *tv, RTTIMESPEC *ts)
    69 #endif
     59DECLINLINE(void) sf_timespec_from_ftime(RTTIMESPEC * ts, time_t *time)
     60{
     61        RTTimeSpecSetNano(ts, RT_NS_1SEC_64 * *time);
     62}
     63
     64#else   /* >= 2.6.0 */
     65
     66# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
     67DECLINLINE(void) sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts)
     68# else
     69DECLINLINE(void) sf_ftime_from_timespec(struct timespec64 *tv, RTTIMESPEC *ts)
     70# endif
    7071{
    7172        int64_t t = RTTimeSpecGetNano(ts);
    72         int64_t nsec;
    73 
    74         nsec = do_div(t, 1000000000);
    75         tv->tv_sec = t;
    76         tv->tv_nsec = nsec;
    77 }
    78 
    79 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
    80 static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec *tv)
    81 #else
    82 static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec64 *tv)
    83 #endif
    84 {
    85         int64_t t = (int64_t) tv->tv_nsec + (int64_t) tv->tv_sec * 1000000000;
    86         RTTimeSpecSetNano(ts, t);
    87 }
    88 #endif                          /* >= 2.6.0 */
     73        tv->tv_nsec = do_div(t, RT_NS_1SEC);
     74        tv->tv_sec  = t;
     75}
     76
     77# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0)
     78DECLINLINE(void) sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec *tv)
     79# else
     80DECLINLINE(void) sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec64 *tv)
     81# endif
     82{
     83        RTTimeSpecSetNano(ts, tv->tv_nsec + tv->tv_sec * (int64_t)RT_NS_1SEC);
     84}
     85
     86#endif  /* >= 2.6.0 */
    8987
    9088/* set [inode] attributes based on [info], uid/gid based on [sf_g] */
     
    117115        inode->i_mapping->a_ops = &sf_reg_aops;
    118116# if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
    119         /* XXX Was this ever necessary? */
    120         inode->i_mapping->backing_dev_info = &sf_g->bdi;
     117        inode->i_mapping->backing_dev_info = &sf_g->bdi; /* This is needed for mmap. */
    121118# endif
    122119#endif
     
    136133#endif
    137134        }
    138 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
     135#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8)
    139136        else if (RTFS_IS_SYMLINK(attr->fMode)) {
    140137                inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode;
     
    241238        TRACE();
    242239        if (!dentry || !dentry->d_inode) {
    243                 LogFunc(("no dentry(%p) or inode(%p)\n", dentry,
    244                          dentry->d_inode));
     240                LogFunc(("no dentry(%p) or inode(%p)\n", dentry, dentry ? dentry->d_inode : NULL));
    245241                return -EINVAL;
    246242        }
     
    268264
    269265        dentry->d_time = jiffies;
     266/** @todo bird has severe inode locking / rcu concerns here:  */
    270267        sf_init_inode(sf_g, dentry->d_inode, &info);
    271268        return 0;
     269}
     270
     271/**
     272 * Similar to sf_inode_revalidate, but uses associated host file handle as that
     273 * is quite a bit faster.
     274 */
     275int sf_inode_revalidate_with_handle(struct dentry *dentry, SHFLHANDLE hHostFile, bool fForced)
     276{
     277        int err;
     278        struct inode *pInode = dentry ? dentry->d_inode : NULL;
     279        if (!pInode) {
     280                LogFunc(("no dentry(%p) or inode(%p)\n", dentry, pInode));
     281                err = -EINVAL;
     282        } else {
     283                struct sf_inode_info *sf_i = GET_INODE_INFO(pInode);
     284                struct sf_glob_info  *sf_g = GET_GLOB_INFO(pInode->i_sb);
     285                AssertReturn(sf_i, -EINVAL);
     286                AssertReturn(sf_g, -EINVAL);
     287
     288                /*
     289                 * Can we get away without any action here?
     290                 */
     291                if (   !fForced
     292                    && !sf_i->force_restat
     293                    && jiffies - dentry->d_time < sf_g->ttl)
     294                        err = 0;
     295                else {
     296                        /*
     297                         * No, we have to query the file info from the host.
     298                         */
     299                        VBOXSFOBJINFOREQ *pReq = (VBOXSFOBJINFOREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
     300                        if (pReq) {
     301                                RT_ZERO(*pReq);
     302                                err = VbglR0SfHostReqQueryObjInfo(sf_g->map.root, pReq, hHostFile);
     303                                if (RT_SUCCESS(err)) {
     304                                        /*
     305                                         * Reset the TTL and copy the info over into the inode structure.
     306                                         */
     307                                        dentry->d_time = jiffies;
     308/** @todo bird has severe inode locking / rcu concerns here:  */
     309                                        sf_init_inode(sf_g, pInode, &pReq->ObjInfo);
     310                                } else {
     311                                        LogFunc(("VbglR0SfHostReqQueryObjInfo failed on %#RX64: %Rrc\n", hHostFile, err));
     312                                        err = -RTErrConvertToErrno(err);
     313                                }
     314                                VbglR0PhysHeapFree(pReq);
     315                        } else
     316                                err = -ENOMEM;
     317                }
     318        }
     319        return err;
    272320}
    273321
     
    895943};
    896944
    897 int sf_init_backing_dev(struct sf_glob_info *sf_g)
     945int sf_init_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g)
    898946{
    899947        int rc = 0;
    900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
     948/** @todo this needs sorting out between 3.19 and 4.11   */
     949#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) //&& LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
    901950        /* Each new shared folder map gets a new uint64_t identifier,
    902951         * allocated in sequence.  We ASSUME the sequence will not wrap. */
    903952        static uint64_t s_u64Sequence = 0;
    904953        uint64_t u64CurrentSequence = ASMAtomicIncU64(&s_u64Sequence);
    905 
    906         sf_g->bdi.ra_pages = 0; /* No readahead */
     954        struct backing_dev_info *bdi;
     955
     956#  if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
     957        rc = super_setup_bdi_name(sb, "vboxsf-%llu", (unsigned long long)u64CurrentSequence);
     958        if (!rc)
     959                bdi = sb->s_bdi;
     960        else
     961                return rc;
     962#  else
     963        bdi = &sf_g->bdi;
     964#  endif
     965
     966        bdi->ra_pages = 0;                      /* No readahead */
     967
    907968# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12)
    908         sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT     /* MAP_SHARED */
    909             | BDI_CAP_MAP_COPY  /* MAP_PRIVATE */
    910             | BDI_CAP_READ_MAP  /* can be mapped for reading */
    911             | BDI_CAP_WRITE_MAP /* can be mapped for writing */
    912             | BDI_CAP_EXEC_MAP; /* can be mapped for execution */
    913 # endif                         /* >= 2.6.12 */
    914 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
     969        bdi->capabilities = 0
     970#  ifdef BDI_CAP_MAP_DIRECT
     971                          | BDI_CAP_MAP_DIRECT  /* MAP_SHARED */
     972#  endif
     973#  ifdef BDI_CAP_MAP_COPY
     974                          | BDI_CAP_MAP_COPY    /* MAP_PRIVATE */
     975#  endif
     976#  ifdef BDI_CAP_READ_MAP
     977                          | BDI_CAP_READ_MAP    /* can be mapped for reading */
     978#  endif
     979#  ifdef BDI_CAP_WRITE_MAP
     980                          | BDI_CAP_WRITE_MAP   /* can be mapped for writing */
     981#  endif
     982#  ifdef BDI_CAP_EXEC_MAP
     983                          | BDI_CAP_EXEC_MAP    /* can be mapped for execution */
     984#  endif
     985#  ifdef BDI_CAP_STRICTLIMIT
     986                          | BDI_CAP_STRICTLIMIT;
     987#  endif
     988                          ;
     989#  ifdef BDI_CAP_STRICTLIMIT
     990        /* Smalles possible amount of dirty pages: %1 of RAM */
     991        bdi_set_max_ratio(bdi, 1);
     992#  endif
     993# endif /* >= 2.6.12 */
     994
     995# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
    915996        rc = bdi_init(&sf_g->bdi);
    916997#  if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
     
    9201001#  endif /* >= 2.6.26 */
    9211002# endif  /* >= 2.6.24 */
    922 #endif   /* >= 2.6.0 && <= 3.19.0 */
     1003#endif   /* >= 2.6.0 */
    9231004        return rc;
    9241005}
    9251006
    926 void sf_done_backing_dev(struct sf_glob_info *sf_g)
     1007void sf_done_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g)
    9271008{
    9281009#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0)
  • trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.c

    r77303 r77419  
    328328        }
    329329
    330         if (sf_init_backing_dev(sf_g)) {
     330        if (sf_init_backing_dev(sb, sf_g)) {
    331331                err = -EINVAL;
    332332                LogFunc(("could not init bdi\n"));
     
    363363
    364364 fail5:
    365         sf_done_backing_dev(sf_g);
     365        sf_done_backing_dev(sb, sf_g);
    366366
    367367 fail4:
     
    456456        sf_g = GET_GLOB_INFO(sb);
    457457        BUG_ON(!sf_g);
    458         sf_done_backing_dev(sf_g);
     458        sf_done_backing_dev(sb, sf_g);
    459459        sf_glob_free(sf_g);
    460460}
  • trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h

    r77303 r77419  
    6363         * This applies to read and write operations.  */
    6464        uint32_t cMaxIoPages;
    65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
     65#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
    6666        struct backing_dev_info bdi;
    6767#endif
     
    117117                   SHFLSTRING * path, PSHFLFSOBJINFO result, int ok_to_fail);
    118118extern int sf_inode_revalidate(struct dentry *dentry);
     119int sf_inode_revalidate_with_handle(struct dentry *dentry, SHFLHANDLE hHostFile, bool fForced);
    119120#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
    120121# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
     
    139140                           struct sf_inode_info *sf_i, struct sf_dir_info *sf_d,
    140141                           SHFLHANDLE handle);
    141 extern int sf_init_backing_dev(struct sf_glob_info *sf_g);
    142 extern void sf_done_backing_dev(struct sf_glob_info *sf_g);
     142extern int sf_init_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g);
     143extern void sf_done_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g);
    143144
    144145#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
Note: See TracChangeset for help on using the changeset viewer.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette