- Timestamp:
- Feb 21, 2019 11:53:28 PM (6 years ago)
- Location:
- trunk/src/VBox/Additions/linux/sharedfolders
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/lnkops.c
r76939 r77419 31 31 #include "vfsmod.h" 32 32 33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) /* no generic_readlink() before 2.6.8 */ 34 34 35 35 # if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) … … 117 117 }; 118 118 119 #endif /* LINUX_VERSION_CODE >= 2.6. 0*/119 #endif /* LINUX_VERSION_CODE >= 2.6.8 */ -
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77303 r77419 29 29 */ 30 30 31 /*32 * Limitations: only COW memory mapping is supported33 */34 35 31 #include "vfsmod.h" 32 #include <linux/uio.h> 33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32) 34 # include <linux/aio.h> /* struct kiocb before 4.1 */ 35 #endif 36 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) 37 # include <linux/buffer_head.h> 38 #endif 39 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) \ 40 && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) 41 # include <linux/writeback.h> 42 #endif 43 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \ 44 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 45 # include <linux/splice.h> 46 #endif 47 48 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) 49 # define SEEK_END 2 50 #endif 51 36 52 37 53 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) … … 55 71 56 72 #endif /* < 2.6.0 */ 73 57 74 58 75 /* fops */ … … 144 161 }; 145 162 146 #define LOCK_PIPE(pipe) \ 147 if (pipe->inode) \ 148 mutex_lock(&pipe->inode->i_mutex); 149 150 #define UNLOCK_PIPE(pipe) \ 151 if (pipe->inode) \ 152 mutex_unlock(&pipe->inode->i_mutex); 163 # define LOCK_PIPE(pipe) do { if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); } while (0) 164 # define UNLOCK_PIPE(pipe) do { if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); } while (0) 153 165 154 166 ssize_t … … 296 308 297 309 /** 310 * Read function used when accessing files that are memory mapped. 311 * 312 * We read from the page cache here to present the a cohertent picture of the 313 * the file content. 314 */ 315 static ssize_t sf_reg_read_mapped(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off) 316 { 317 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 318 struct iovec iov = { .iov_base = buf, .iov_len = size }; 319 struct iov_iter iter; 320 struct kiocb kiocb; 321 ssize_t cbRet; 322 323 init_sync_kiocb(&kiocb, file); 324 kiocb.ki_pos = *off; 325 iov_iter_init(&iter, READ, &iov, 1, size); 326 327 cbRet = generic_file_read_iter(&kiocb, &iter); 328 329 *off = kiocb.ki_pos; 330 return cbRet; 331 332 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) 333 struct iovec iov = { .iov_base = buf, .iov_len = size }; 334 struct kiocb kiocb; 335 ssize_t cbRet; 336 337 init_sync_kiocb(&kiocb, file); 338 kiocb.ki_pos = *off; 339 340 cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off); 341 if (cbRet == -EIOCBQUEUED) 342 cbRet = wait_on_sync_kiocb(&kiocb); 343 344 *off = kiocb.ki_pos; 345 return cbRet; 346 347 #else /* 2.6.18 or earlier: */ 348 return generic_file_read(file, buf, size, off); 349 #endif 350 } 351 352 353 /** 298 354 * Fallback case of sf_reg_read() that locks the user buffers and let the host 299 355 * write directly to them. … … 419 475 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 420 476 struct sf_reg_info *sf_r = file->private_data; 477 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 2) 478 struct address_space *mapping = file->f_mapping; 479 #else 480 struct address_space *mapping = inode->i_mapping; 481 #endif 421 482 422 483 TRACE(); … … 430 491 if (!size) 431 492 return 0; 493 494 /* 495 * If there is a mapping and O_DIRECT isn't in effect, we must at a 496 * heed dirty pages in the mapping and read from them. For simplicity 497 * though, we just do page cache reading when there are writable 498 * mappings around with any kind of pages loaded. 499 */ 500 if ( mapping 501 && mapping->nrpages > 0 502 && mapping_writably_mapped(mapping) 503 && !(file->f_flags & O_DIRECT) 504 && 1 /** @todo make this behaviour configurable */ ) 505 return sf_reg_read_mapped(file, buf, size, off); 432 506 433 507 /* … … 894 968 } 895 969 896 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 897 static int sf_reg_fault(struct vm_fault *vmf) 898 #elif LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 899 static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 900 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 901 static struct page *sf_reg_nopage(struct vm_area_struct *vma, 902 unsigned long vaddr, int *type) 903 # define SET_TYPE(t) *type = (t) 904 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ 905 static struct page *sf_reg_nopage(struct vm_area_struct *vma, 906 unsigned long vaddr, int unused) 907 # define SET_TYPE(t) 970 /** 971 * Wrapper around generic/default seek function that ensures that we've got 972 * the up-to-date file size when doing anything relative to EOF. 973 * 974 * The issue is that the host may extend the file while we weren't looking and 975 * if the caller wishes to append data, it may end up overwriting existing data 976 * if we operate with a stale size. So, we always retrieve the file size on EOF 977 * relative seeks. 978 */ 979 static loff_t sf_reg_llseek(struct file *file, loff_t off, int whence) 980 { 981 switch (whence) { 982 #ifdef SEEK_HOLE 983 case SEEK_HOLE: 984 case SEEK_DATA: 908 985 #endif 909 { 910 struct page *page; 911 char *buf; 912 loff_t off; 913 uint32_t nread = PAGE_SIZE; 914 int err; 915 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 916 struct vm_area_struct *vma = vmf->vma; 986 case SEEK_END: { 987 struct sf_reg_info *sf_r = file->private_data; 988 int rc = sf_inode_revalidate_with_handle(GET_F_DENTRY(file), sf_r->handle, true /*fForce*/); 989 if (rc == 0) 990 break; 991 return rc; 992 } 993 } 994 995 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) 996 return generic_file_llseek(file, off, whence); 997 #else 998 return default_llseek(file, off, whence); 917 999 #endif 918 struct file *file = vma->vm_file; 919 struct inode *inode = GET_F_DENTRY(file)->d_inode; 920 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 921 struct sf_reg_info *sf_r = file->private_data; 922 923 TRACE(); 924 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 925 if (vmf->pgoff > vma->vm_end) 926 return VM_FAULT_SIGBUS; 927 #else 928 if (vaddr > vma->vm_end) { 929 SET_TYPE(VM_FAULT_SIGBUS); 930 return NOPAGE_SIGBUS; 931 } 932 #endif 933 934 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead() 935 * which works on virtual addresses. On Linux cannot reliably determine the 936 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */ 937 page = alloc_page(GFP_USER); 938 if (!page) { 939 LogRelFunc(("failed to allocate page\n")); 940 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 941 return VM_FAULT_OOM; 942 #else 943 SET_TYPE(VM_FAULT_OOM); 944 return NOPAGE_OOM; 945 #endif 946 } 947 948 buf = kmap(page); 949 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 950 off = (vmf->pgoff << PAGE_SHIFT); 951 #else 952 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 953 #endif 954 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); 955 if (err) { 956 kunmap(page); 957 put_page(page); 958 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 959 return VM_FAULT_SIGBUS; 960 #else 961 SET_TYPE(VM_FAULT_SIGBUS); 962 return NOPAGE_SIGBUS; 963 #endif 964 } 965 966 BUG_ON(nread > PAGE_SIZE); 967 if (!nread) { 968 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 969 clear_user_page(page_address(page), vmf->pgoff, page); 970 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 971 clear_user_page(page_address(page), vaddr, page); 972 #else 973 clear_user_page(page_address(page), vaddr); 974 #endif 975 } else 976 memset(buf + nread, 0, PAGE_SIZE - nread); 977 978 flush_dcache_page(page); 979 kunmap(page); 980 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 981 vmf->page = page; 982 return 0; 983 #else 984 SET_TYPE(VM_FAULT_MAJOR); 985 return page; 986 #endif 987 } 988 989 static struct vm_operations_struct sf_vma_ops = { 990 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 991 .fault = sf_reg_fault 992 #else 993 .nopage = sf_reg_nopage 994 #endif 995 }; 996 997 static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma) 998 { 999 TRACE(); 1000 if (vma->vm_flags & VM_SHARED) { 1001 LogFunc(("shared mmapping not available\n")); 1002 return -EINVAL; 1003 } 1004 1005 vma->vm_ops = &sf_vma_ops; 1006 return 0; 1007 } 1000 } 1001 1002 /** 1003 * Flush region of file - chiefly mmap/msync. 1004 * 1005 * We cannot use the noop_fsync / simple_sync_file here as that means 1006 * msync(,,MS_SYNC) will return before the data hits the host, thereby 1007 * causing coherency issues with O_DIRECT access to the same file as 1008 * well as any host interaction with the file. 1009 */ 1010 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0) 1011 static int sf_reg_fsync(struct file *file, loff_t start, loff_t end, int datasync) 1012 { 1013 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) 1014 return __generic_file_fsync(file, start, end, datasync); 1015 # else 1016 return generic_file_fsync(file, start, end, datasync); 1017 # endif 1018 } 1019 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) 1020 static int sf_reg_fsync(struct file *file, int datasync) 1021 { 1022 return generic_file_fsync(file, datasync); 1023 } 1024 #else /* < 2.6.35 */ 1025 static int sf_reg_fsync(struct file *file, struct dentry *dentry, int datasync) 1026 { 1027 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) 1028 return simple_fsync(file, dentry, datasync); 1029 # else 1030 int rc; 1031 struct inode *inode = dentry->d_inode; 1032 AssertReturn(inode, -EINVAL); 1033 1034 /** @todo What about file_fsync()? (<= 2.5.11) */ 1035 1036 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) 1037 rc = sync_mapping_buffers(inode->i_mapping); 1038 if ( rc == 0 1039 && (inode->i_state & I_DIRTY) 1040 && ((inode->i_state & I_DIRTY_DATASYNC) || !datasync) 1041 ) { 1042 struct writeback_control wbc = { 1043 .sync_mode = WB_SYNC_ALL, 1044 .nr_to_write = 0 1045 }; 1046 rc = sync_inode(inode, &wbc); 1047 } 1048 # else /* < 2.5.12 */ 1049 rc = fsync_inode_buffers(inode); 1050 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10) 1051 rc |= fsync_inode_data_buffers(inode); 1052 # endif 1053 /** @todo probably need to do more here... */ 1054 # endif /* < 2.5.12 */ 1055 return rc; 1056 # endif 1057 } 1058 #endif /* < 2.6.35 */ 1059 1008 1060 1009 1061 struct file_operations sf_reg_fops = { … … 1012 1064 .write = sf_reg_write, 1013 1065 .release = sf_reg_release, 1014 .mmap = sf_reg_mmap,1066 .mmap = generic_file_mmap, 1015 1067 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 1016 1068 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) … … 1025 1077 .aio_write = generic_file_aio_write, 1026 1078 # endif 1027 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)1028 .fsync = noop_fsync,1029 # else1030 .fsync = simple_sync_file,1031 # endif1032 .llseek = generic_file_llseek,1033 1079 #endif 1080 .llseek = sf_reg_llseek, 1081 .fsync = sf_reg_fsync, 1034 1082 }; 1035 1083 … … 1090 1138 TRACE(); 1091 1139 1092 /** @todo rig up a FsPerf testcase for this code! */1093 1094 1140 if (page->index >= end_index) 1095 1141 nwritten = inode->i_size & (PAGE_SIZE - 1); … … 1124 1170 { 1125 1171 TRACE(); 1172 /** @todo rig up a FsPerf testcase for this code! */ 1126 1173 1127 1174 return simple_write_begin(file, mapping, pos, len, flags, pagep, … … 1141 1188 1142 1189 TRACE(); 1143 1144 1190 /** @todo rig up a FsPerf testcase for this code! */ 1145 1191 … … 1202 1248 .readpage = sf_readpage, 1203 1249 .writepage = sf_writepage, 1250 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) 1251 .set_page_dirty = __set_page_dirty_buffers, 1252 # endif 1204 1253 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 1205 1254 .write_begin = sf_write_begin, -
trunk/src/VBox/Additions/linux/sharedfolders/utils.c
r77303 r77419 49 49 50 50 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 51 static void sf_ftime_from_timespec(time_t * time, RTTIMESPEC * ts) 51 52 DECLINLINE(void) sf_ftime_from_timespec(time_t * time, RTTIMESPEC *ts) 52 53 { 53 54 int64_t t = RTTimeSpecGetNano(ts); 54 55 do_div(t, 1000000000); 55 do_div(t, RT_NS_1SEC); 56 56 *time = t; 57 57 } 58 58 59 static void sf_timespec_from_ftime(RTTIMESPEC * ts, time_t * time) 60 { 61 int64_t t = 1000000000 * *time; 62 RTTimeSpecSetNano(ts, t); 63 } 64 #else /* >= 2.6.0 */ 65 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) 66 static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts) 67 #else 68 static void sf_ftime_from_timespec(struct timespec64 *tv, RTTIMESPEC *ts) 69 #endif 59 DECLINLINE(void) sf_timespec_from_ftime(RTTIMESPEC * ts, time_t *time) 60 { 61 RTTimeSpecSetNano(ts, RT_NS_1SEC_64 * *time); 62 } 63 64 #else /* >= 2.6.0 */ 65 66 # if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) 67 DECLINLINE(void) sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC *ts) 68 # else 69 DECLINLINE(void) sf_ftime_from_timespec(struct timespec64 *tv, RTTIMESPEC *ts) 70 # endif 70 71 { 71 72 int64_t t = RTTimeSpecGetNano(ts); 72 int64_t nsec; 73 74 nsec = do_div(t, 1000000000); 75 tv->tv_sec = t; 76 tv->tv_nsec = nsec; 77 } 78 79 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) 80 static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec *tv) 81 #else 82 static void sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec64 *tv) 83 #endif 84 { 85 int64_t t = (int64_t) tv->tv_nsec + (int64_t) tv->tv_sec * 1000000000; 86 RTTimeSpecSetNano(ts, t); 87 } 88 #endif /* >= 2.6.0 */ 73 tv->tv_nsec = do_div(t, RT_NS_1SEC); 74 tv->tv_sec = t; 75 } 76 77 # if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) 78 DECLINLINE(void) sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec *tv) 79 # else 80 DECLINLINE(void) sf_timespec_from_ftime(RTTIMESPEC *ts, struct timespec64 *tv) 81 # endif 82 { 83 RTTimeSpecSetNano(ts, tv->tv_nsec + tv->tv_sec * (int64_t)RT_NS_1SEC); 84 } 85 86 #endif /* >= 2.6.0 */ 89 87 90 88 /* set [inode] attributes based on [info], uid/gid based on [sf_g] */ … … 117 115 inode->i_mapping->a_ops = &sf_reg_aops; 118 116 # if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 119 /* XXX Was this ever necessary? */ 120 inode->i_mapping->backing_dev_info = &sf_g->bdi; 117 inode->i_mapping->backing_dev_info = &sf_g->bdi; /* This is needed for mmap. */ 121 118 # endif 122 119 #endif … … 136 133 #endif 137 134 } 138 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)135 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) 139 136 else if (RTFS_IS_SYMLINK(attr->fMode)) { 140 137 inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode; … … 241 238 TRACE(); 242 239 if (!dentry || !dentry->d_inode) { 243 LogFunc(("no dentry(%p) or inode(%p)\n", dentry, 244 dentry->d_inode)); 240 LogFunc(("no dentry(%p) or inode(%p)\n", dentry, dentry ? dentry->d_inode : NULL)); 245 241 return -EINVAL; 246 242 } … … 268 264 269 265 dentry->d_time = jiffies; 266 /** @todo bird has severe inode locking / rcu concerns here: */ 270 267 sf_init_inode(sf_g, dentry->d_inode, &info); 271 268 return 0; 269 } 270 271 /** 272 * Similar to sf_inode_revalidate, but uses associated host file handle as that 273 * is quite a bit faster. 274 */ 275 int sf_inode_revalidate_with_handle(struct dentry *dentry, SHFLHANDLE hHostFile, bool fForced) 276 { 277 int err; 278 struct inode *pInode = dentry ? dentry->d_inode : NULL; 279 if (!pInode) { 280 LogFunc(("no dentry(%p) or inode(%p)\n", dentry, pInode)); 281 err = -EINVAL; 282 } else { 283 struct sf_inode_info *sf_i = GET_INODE_INFO(pInode); 284 struct sf_glob_info *sf_g = GET_GLOB_INFO(pInode->i_sb); 285 AssertReturn(sf_i, -EINVAL); 286 AssertReturn(sf_g, -EINVAL); 287 288 /* 289 * Can we get away without any action here? 290 */ 291 if ( !fForced 292 && !sf_i->force_restat 293 && jiffies - dentry->d_time < sf_g->ttl) 294 err = 0; 295 else { 296 /* 297 * No, we have to query the file info from the host. 298 */ 299 VBOXSFOBJINFOREQ *pReq = (VBOXSFOBJINFOREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); 300 if (pReq) { 301 RT_ZERO(*pReq); 302 err = VbglR0SfHostReqQueryObjInfo(sf_g->map.root, pReq, hHostFile); 303 if (RT_SUCCESS(err)) { 304 /* 305 * Reset the TTL and copy the info over into the inode structure. 306 */ 307 dentry->d_time = jiffies; 308 /** @todo bird has severe inode locking / rcu concerns here: */ 309 sf_init_inode(sf_g, pInode, &pReq->ObjInfo); 310 } else { 311 LogFunc(("VbglR0SfHostReqQueryObjInfo failed on %#RX64: %Rrc\n", hHostFile, err)); 312 err = -RTErrConvertToErrno(err); 313 } 314 VbglR0PhysHeapFree(pReq); 315 } else 316 err = -ENOMEM; 317 } 318 } 319 return err; 272 320 } 273 321 … … 895 943 }; 896 944 897 int sf_init_backing_dev(struct s f_glob_info *sf_g)945 int sf_init_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g) 898 946 { 899 947 int rc = 0; 900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 948 /** @todo this needs sorting out between 3.19 and 4.11 */ 949 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) //&& LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 901 950 /* Each new shared folder map gets a new uint64_t identifier, 902 951 * allocated in sequence. We ASSUME the sequence will not wrap. */ 903 952 static uint64_t s_u64Sequence = 0; 904 953 uint64_t u64CurrentSequence = ASMAtomicIncU64(&s_u64Sequence); 905 906 sf_g->bdi.ra_pages = 0; /* No readahead */ 954 struct backing_dev_info *bdi; 955 956 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 957 rc = super_setup_bdi_name(sb, "vboxsf-%llu", (unsigned long long)u64CurrentSequence); 958 if (!rc) 959 bdi = sb->s_bdi; 960 else 961 return rc; 962 # else 963 bdi = &sf_g->bdi; 964 # endif 965 966 bdi->ra_pages = 0; /* No readahead */ 967 907 968 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) 908 sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */ 909 | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ 910 | BDI_CAP_READ_MAP /* can be mapped for reading */ 911 | BDI_CAP_WRITE_MAP /* can be mapped for writing */ 912 | BDI_CAP_EXEC_MAP; /* can be mapped for execution */ 913 # endif /* >= 2.6.12 */ 914 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 969 bdi->capabilities = 0 970 # ifdef BDI_CAP_MAP_DIRECT 971 | BDI_CAP_MAP_DIRECT /* MAP_SHARED */ 972 # endif 973 # ifdef BDI_CAP_MAP_COPY 974 | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ 975 # endif 976 # ifdef BDI_CAP_READ_MAP 977 | BDI_CAP_READ_MAP /* can be mapped for reading */ 978 # endif 979 # ifdef BDI_CAP_WRITE_MAP 980 | BDI_CAP_WRITE_MAP /* can be mapped for writing */ 981 # endif 982 # ifdef BDI_CAP_EXEC_MAP 983 | BDI_CAP_EXEC_MAP /* can be mapped for execution */ 984 # endif 985 # ifdef BDI_CAP_STRICTLIMIT 986 | BDI_CAP_STRICTLIMIT; 987 # endif 988 ; 989 # ifdef BDI_CAP_STRICTLIMIT 990 /* Smalles possible amount of dirty pages: %1 of RAM */ 991 bdi_set_max_ratio(bdi, 1); 992 # endif 993 # endif /* >= 2.6.12 */ 994 995 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) 915 996 rc = bdi_init(&sf_g->bdi); 916 997 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) … … 920 1001 # endif /* >= 2.6.26 */ 921 1002 # endif /* >= 2.6.24 */ 922 #endif /* >= 2.6.0 && <= 3.19.0*/1003 #endif /* >= 2.6.0 */ 923 1004 return rc; 924 1005 } 925 1006 926 void sf_done_backing_dev(struct s f_glob_info *sf_g)1007 void sf_done_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g) 927 1008 { 928 1009 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.c
r77303 r77419 328 328 } 329 329 330 if (sf_init_backing_dev(s f_g)) {330 if (sf_init_backing_dev(sb, sf_g)) { 331 331 err = -EINVAL; 332 332 LogFunc(("could not init bdi\n")); … … 363 363 364 364 fail5: 365 sf_done_backing_dev(s f_g);365 sf_done_backing_dev(sb, sf_g); 366 366 367 367 fail4: … … 456 456 sf_g = GET_GLOB_INFO(sb); 457 457 BUG_ON(!sf_g); 458 sf_done_backing_dev(s f_g);458 sf_done_backing_dev(sb, sf_g); 459 459 sf_glob_free(sf_g); 460 460 } -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h
r77303 r77419 63 63 * This applies to read and write operations. */ 64 64 uint32_t cMaxIoPages; 65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) 66 66 struct backing_dev_info bdi; 67 67 #endif … … 117 117 SHFLSTRING * path, PSHFLFSOBJINFO result, int ok_to_fail); 118 118 extern int sf_inode_revalidate(struct dentry *dentry); 119 int sf_inode_revalidate_with_handle(struct dentry *dentry, SHFLHANDLE hHostFile, bool fForced); 119 120 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 120 121 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) … … 139 140 struct sf_inode_info *sf_i, struct sf_dir_info *sf_d, 140 141 SHFLHANDLE handle); 141 extern int sf_init_backing_dev(struct s f_glob_info *sf_g);142 extern void sf_done_backing_dev(struct s f_glob_info *sf_g);142 extern int sf_init_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g); 143 extern void sf_done_backing_dev(struct super_block *sb, struct sf_glob_info *sf_g); 143 144 144 145 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
Note:
See TracChangeset
for help on using the changeset viewer.