VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 39837

Last change on this file since 39837 was 39789, checked in by vboxsync, 13 years ago

Additions/linux/sharedfolders: make sure all data is written out when closing a file. Really only use on kernels where it is available. And indent correctly.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 19.4 KB
Line 
1/** @file
2 *
3 * vboxsf -- VirtualBox Guest Additions for Linux:
4 * Regular file inode and file operations
5 */
6
7/*
8 * Copyright (C) 2006-2010 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19/*
20 * Limitations: only COW memory mapping is supported
21 */
22
23#include "vfsmod.h"
24
25static void *alloc_bounch_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t
26 xfer_size, const char *caller)
27{
28 size_t tmp_size;
29 void *tmp;
30
31 /* try for big first. */
32 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
33 if (tmp_size > 16U*_1K)
34 tmp_size = 16U*_1K;
35 tmp = kmalloc(tmp_size, GFP_KERNEL);
36 if (!tmp)
37 {
38 /* fall back on a page sized buffer. */
39 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL);
40 if (!tmp)
41 {
42 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
43 return NULL;
44 }
45 tmp_size = PAGE_SIZE;
46 }
47
48 *tmp_sizep = tmp_size;
49 *physp = virt_to_phys(tmp);
50 return tmp;
51}
52
53static void free_bounch_buffer(void *tmp)
54{
55 kfree (tmp);
56}
57
58
59/* fops */
60static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g,
61 struct sf_reg_info *sf_r, void *buf,
62 uint32_t *nread, uint64_t pos)
63{
64 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
65 * contiguous in physical memory (kmalloc or single page), we should
66 * use a physical address here to speed things up. */
67 int rc = vboxCallRead(&client_handle, &sf_g->map, sf_r->handle,
68 pos, nread, buf, false /* already locked? */);
69 if (RT_FAILURE(rc))
70 {
71 LogFunc(("vboxCallRead failed. caller=%s, rc=%Rrc\n", caller, rc));
72 return -EPROTO;
73 }
74 return 0;
75}
76
77static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g,
78 struct sf_reg_info *sf_r, void *buf,
79 uint32_t *nwritten, uint64_t pos)
80{
81 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
82 * contiguous in physical memory (kmalloc or single page), we should
83 * use a physical address here to speed things up. */
84 int rc = vboxCallWrite(&client_handle, &sf_g->map, sf_r->handle,
85 pos, nwritten, buf, false /* already locked? */);
86 if (RT_FAILURE(rc))
87 {
88 LogFunc(("vboxCallWrite failed. caller=%s, rc=%Rrc\n",
89 caller, rc));
90 return -EPROTO;
91 }
92 return 0;
93}
94
95/**
96 * Read from a regular file.
97 *
98 * @param file the file
99 * @param buf the buffer
100 * @param size length of the buffer
101 * @param off offset within the file
102 * @returns the number of read bytes on success, Linux error code otherwise
103 */
104static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off)
105{
106 int err;
107 void *tmp;
108 RTCCPHYS tmp_phys;
109 size_t tmp_size;
110 size_t left = size;
111 ssize_t total_bytes_read = 0;
112 struct inode *inode = file->f_dentry->d_inode;
113 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
114 struct sf_reg_info *sf_r = file->private_data;
115 loff_t pos = *off;
116
117 TRACE();
118 if (!S_ISREG(inode->i_mode))
119 {
120 LogFunc(("read from non regular file %d\n", inode->i_mode));
121 return -EINVAL;
122 }
123
124 /** XXX Check read permission according to inode->i_mode! */
125
126 if (!size)
127 return 0;
128
129 tmp = alloc_bounch_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
130 if (!tmp)
131 return -ENOMEM;
132
133 while (left)
134 {
135 uint32_t to_read, nread;
136
137 to_read = tmp_size;
138 if (to_read > left)
139 to_read = (uint32_t) left;
140
141 nread = to_read;
142
143 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);
144 if (err)
145 goto fail;
146
147 if (copy_to_user(buf, tmp, nread))
148 {
149 err = -EFAULT;
150 goto fail;
151 }
152
153 pos += nread;
154 left -= nread;
155 buf += nread;
156 total_bytes_read += nread;
157 if (nread != to_read)
158 break;
159 }
160
161 *off += total_bytes_read;
162 free_bounch_buffer(tmp);
163 return total_bytes_read;
164
165fail:
166 free_bounch_buffer(tmp);
167 return err;
168}
169
170/**
171 * Write to a regular file.
172 *
173 * @param file the file
174 * @param buf the buffer
175 * @param size length of the buffer
176 * @param off offset within the file
177 * @returns the number of written bytes on success, Linux error code otherwise
178 */
179static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off)
180{
181 int err;
182 void *tmp;
183 RTCCPHYS tmp_phys;
184 size_t tmp_size;
185 size_t left = size;
186 ssize_t total_bytes_written = 0;
187 struct inode *inode = file->f_dentry->d_inode;
188 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
189 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
190 struct sf_reg_info *sf_r = file->private_data;
191 loff_t pos;
192
193 TRACE();
194 BUG_ON(!sf_i);
195 BUG_ON(!sf_g);
196 BUG_ON(!sf_r);
197
198 if (!S_ISREG(inode->i_mode))
199 {
200 LogFunc(("write to non regular file %d\n", inode->i_mode));
201 return -EINVAL;
202 }
203
204 pos = *off;
205 if (file->f_flags & O_APPEND)
206 {
207 pos = inode->i_size;
208 *off = pos;
209 }
210
211 /** XXX Check write permission according to inode->i_mode! */
212
213 if (!size)
214 return 0;
215
216 tmp = alloc_bounch_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
217 if (!tmp)
218 return -ENOMEM;
219
220 while (left)
221 {
222 uint32_t to_write, nwritten;
223
224 to_write = tmp_size;
225 if (to_write > left)
226 to_write = (uint32_t) left;
227
228 nwritten = to_write;
229
230 if (copy_from_user(tmp, buf, to_write))
231 {
232 err = -EFAULT;
233 goto fail;
234 }
235
236#if 1
237 if (VbglR0CanUsePhysPageList())
238 {
239 err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle,
240 pos, &nwritten, tmp_phys);
241 err = RT_FAILURE(err) ? -EPROTO : 0;
242 }
243 else
244#endif
245 err = sf_reg_write_aux(__func__, sf_g, sf_r, tmp, &nwritten, pos);
246 if (err)
247 goto fail;
248
249 pos += nwritten;
250 left -= nwritten;
251 buf += nwritten;
252 total_bytes_written += nwritten;
253 if (nwritten != to_write)
254 break;
255 }
256
257 *off += total_bytes_written;
258 if (*off > inode->i_size)
259 inode->i_size = *off;
260
261 sf_i->force_restat = 1;
262 free_bounch_buffer(tmp);
263 return total_bytes_written;
264
265fail:
266 free_bounch_buffer(tmp);
267 return err;
268}
269
270/**
271 * Open a regular file.
272 *
273 * @param inode the inode
274 * @param file the file
275 * @returns 0 on success, Linux error code otherwise
276 */
277static int sf_reg_open(struct inode *inode, struct file *file)
278{
279 int rc, rc_linux = 0;
280 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
281 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
282 struct sf_reg_info *sf_r;
283 SHFLCREATEPARMS params;
284
285 TRACE();
286 BUG_ON(!sf_g);
287 BUG_ON(!sf_i);
288
289 LogFunc(("open %s\n", sf_i->path->String.utf8));
290
291 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
292 if (!sf_r)
293 {
294 LogRelFunc(("could not allocate reg info\n"));
295 return -ENOMEM;
296 }
297
298 /* Already open? */
299 if (sf_i->handle != SHFL_HANDLE_NIL)
300 {
301 /*
302 * This inode was created with sf_create_aux(). Check the CreateFlags:
303 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
304 * about the access flags (SHFL_CF_ACCESS_*).
305 */
306 sf_i->force_restat = 1;
307 sf_r->handle = sf_i->handle;
308 sf_i->handle = SHFL_HANDLE_NIL;
309 sf_i->file = file;
310 file->private_data = sf_r;
311 return 0;
312 }
313
314 RT_ZERO(params);
315 params.Handle = SHFL_HANDLE_NIL;
316 /* We check the value of params.Handle afterwards to find out if
317 * the call succeeded or failed, as the API does not seem to cleanly
318 * distinguish error and informational messages.
319 *
320 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
321 * make the shared folders host service use our fMode parameter */
322
323 if (file->f_flags & O_CREAT)
324 {
325 LogFunc(("O_CREAT set\n"));
326 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
327 /* We ignore O_EXCL, as the Linux kernel seems to call create
328 beforehand itself, so O_EXCL should always fail. */
329 if (file->f_flags & O_TRUNC)
330 {
331 LogFunc(("O_TRUNC set\n"));
332 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
333 | SHFL_CF_ACCESS_WRITE);
334 }
335 else
336 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
337 }
338 else
339 {
340 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
341 if (file->f_flags & O_TRUNC)
342 {
343 LogFunc(("O_TRUNC set\n"));
344 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
345 | SHFL_CF_ACCESS_WRITE);
346 }
347 }
348
349 if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE))
350 {
351 switch (file->f_flags & O_ACCMODE)
352 {
353 case O_RDONLY:
354 params.CreateFlags |= SHFL_CF_ACCESS_READ;
355 break;
356
357 case O_WRONLY:
358 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
359 break;
360
361 case O_RDWR:
362 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
363 break;
364
365 default:
366 BUG ();
367 }
368 }
369
370 if (file->f_flags & O_APPEND)
371 {
372 LogFunc(("O_APPEND set\n"));
373 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
374 }
375
376 params.Info.Attr.fMode = inode->i_mode;
377 LogFunc(("sf_reg_open: calling vboxCallCreate, file %s, flags=%#x, %#x\n",
378 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
379 rc = vboxCallCreate(&client_handle, &sf_g->map, sf_i->path, &params);
380 if (RT_FAILURE(rc))
381 {
382 LogFunc(("vboxCallCreate failed flags=%d,%#x rc=%Rrc\n",
383 file->f_flags, params.CreateFlags, rc));
384 kfree(sf_r);
385 return -RTErrConvertToErrno(rc);
386 }
387
388 if (SHFL_HANDLE_NIL == params.Handle)
389 {
390 switch (params.Result)
391 {
392 case SHFL_PATH_NOT_FOUND:
393 case SHFL_FILE_NOT_FOUND:
394 rc_linux = -ENOENT;
395 break;
396 case SHFL_FILE_EXISTS:
397 rc_linux = -EEXIST;
398 break;
399 default:
400 break;
401 }
402 }
403
404 sf_i->force_restat = 1;
405 sf_r->handle = params.Handle;
406 sf_i->file = file;
407 file->private_data = sf_r;
408 return rc_linux;
409}
410
411/**
412 * Close a regular file.
413 *
414 * @param inode the inode
415 * @param file the file
416 * @returns 0 on success, Linux error code otherwise
417 */
418static int sf_reg_release(struct inode *inode, struct file *file)
419{
420 int rc;
421 struct sf_reg_info *sf_r;
422 struct sf_glob_info *sf_g;
423 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
424
425 TRACE();
426 sf_g = GET_GLOB_INFO(inode->i_sb);
427 sf_r = file->private_data;
428
429 BUG_ON(!sf_g);
430 BUG_ON(!sf_r);
431
432#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
433 /* See the smbfs source (file.c). mmap in particular can cause data to be
434 * written to the file after it is closed, which we can't cope with. We
435 * copy and paste the body of filemap_write_and_wait() here as it was not
436 * defined before 2.6.6 and not exported until quite a bit later. */
437 /* filemap_write_and_wait(inode->i_mapping); */
438 if ( inode->i_mapping->nrpages
439 && filemap_fdatawrite(inode->i_mapping) != -EIO)
440 filemap_fdatawait(inode->i_mapping);
441#endif
442 rc = vboxCallClose(&client_handle, &sf_g->map, sf_r->handle);
443 if (RT_FAILURE(rc))
444 LogFunc(("vboxCallClose failed rc=%Rrc\n", rc));
445
446 kfree(sf_r);
447 sf_i->file = NULL;
448 sf_i->handle = SHFL_HANDLE_NIL;
449 file->private_data = NULL;
450 return 0;
451}
452
453#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
454static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
455#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
456static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type)
457# define SET_TYPE(t) *type = (t)
458#else /* LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0) */
459static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused)
460# define SET_TYPE(t)
461#endif
462{
463 struct page *page;
464 char *buf;
465 loff_t off;
466 uint32_t nread = PAGE_SIZE;
467 int err;
468 struct file *file = vma->vm_file;
469 struct inode *inode = file->f_dentry->d_inode;
470 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
471 struct sf_reg_info *sf_r = file->private_data;
472
473 TRACE();
474#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
475 if (vmf->pgoff > vma->vm_end)
476 return VM_FAULT_SIGBUS;
477#else
478 if (vaddr > vma->vm_end)
479 {
480 SET_TYPE(VM_FAULT_SIGBUS);
481 return NOPAGE_SIGBUS;
482 }
483#endif
484
485 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls vboxCallRead()
486 * which works on virtual addresses. On Linux cannot reliably determine the
487 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */
488 page = alloc_page(GFP_USER);
489 if (!page) {
490 LogRelFunc(("failed to allocate page\n"));
491#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
492 return VM_FAULT_OOM;
493#else
494 SET_TYPE(VM_FAULT_OOM);
495 return NOPAGE_OOM;
496#endif
497 }
498
499 buf = kmap(page);
500#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
501 off = (vmf->pgoff << PAGE_SHIFT);
502#else
503 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
504#endif
505 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
506 if (err)
507 {
508 kunmap(page);
509 put_page(page);
510#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
511 return VM_FAULT_SIGBUS;
512#else
513 SET_TYPE(VM_FAULT_SIGBUS);
514 return NOPAGE_SIGBUS;
515#endif
516 }
517
518 BUG_ON (nread > PAGE_SIZE);
519 if (!nread)
520 {
521#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
522 clear_user_page(page_address(page), vmf->pgoff, page);
523#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
524 clear_user_page(page_address(page), vaddr, page);
525#else
526 clear_user_page(page_address(page), vaddr);
527#endif
528 }
529 else
530 memset(buf + nread, 0, PAGE_SIZE - nread);
531
532 flush_dcache_page(page);
533 kunmap(page);
534#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
535 vmf->page = page;
536 return 0;
537#else
538 SET_TYPE(VM_FAULT_MAJOR);
539 return page;
540#endif
541}
542
543static struct vm_operations_struct sf_vma_ops =
544{
545#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
546 .fault = sf_reg_fault
547#else
548 .nopage = sf_reg_nopage
549#endif
550};
551
552static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma)
553{
554 TRACE();
555 if (vma->vm_flags & VM_SHARED)
556 {
557 LogFunc(("shared mmapping not available\n"));
558 return -EINVAL;
559 }
560
561 vma->vm_ops = &sf_vma_ops;
562 return 0;
563}
564
565struct file_operations sf_reg_fops =
566{
567 .read = sf_reg_read,
568 .open = sf_reg_open,
569 .write = sf_reg_write,
570 .release = sf_reg_release,
571 .mmap = sf_reg_mmap,
572#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
573# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
574 .splice_read = generic_file_splice_read,
575# else
576 .sendfile = generic_file_sendfile,
577# endif
578 .aio_read = generic_file_aio_read,
579 .aio_write = generic_file_aio_write,
580# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
581 .fsync = noop_fsync,
582# else
583 .fsync = simple_sync_file,
584# endif
585 .llseek = generic_file_llseek,
586#endif
587};
588
589
590struct inode_operations sf_reg_iops =
591{
592#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
593 .revalidate = sf_inode_revalidate
594#else
595 .getattr = sf_getattr,
596 .setattr = sf_setattr
597#endif
598};
599
600
601#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
602static int sf_readpage(struct file *file, struct page *page)
603{
604 struct inode *inode = file->f_dentry->d_inode;
605 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
606 struct sf_reg_info *sf_r = file->private_data;
607 uint32_t nread = PAGE_SIZE;
608 char *buf;
609 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
610 int ret;
611
612 TRACE();
613
614 buf = kmap(page);
615 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off);
616 if (ret)
617 {
618 kunmap(page);
619 if (PageLocked(page))
620 unlock_page(page);
621 return ret;
622 }
623 BUG_ON(nread > PAGE_SIZE);
624 memset(&buf[nread], 0, PAGE_SIZE - nread);
625 flush_dcache_page(page);
626 kunmap(page);
627 SetPageUptodate(page);
628 unlock_page(page);
629 return 0;
630}
631
632static int
633sf_writepage(struct page *page, struct writeback_control *wbc)
634{
635 struct address_space *mapping = page->mapping;
636 struct inode *inode = mapping->host;
637 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
638 struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
639 struct file *file = sf_i->file;
640 struct sf_reg_info *sf_r = file->private_data;
641 char *buf;
642 uint32_t nwritten = PAGE_SIZE;
643 int end_index = inode->i_size >> PAGE_SHIFT;
644 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
645 int err;
646
647 TRACE();
648
649 if (page->index >= end_index)
650 nwritten = inode->i_size & (PAGE_SIZE-1);
651
652 buf = kmap(page);
653
654 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
655 if (err < 0)
656 {
657 ClearPageUptodate(page);
658 goto out;
659 }
660
661 if (off > inode->i_size)
662 inode->i_size = off;
663
664 if (PageError(page))
665 ClearPageError(page);
666 err = 0;
667
668out:
669 kunmap(page);
670
671 unlock_page(page);
672 return err;
673}
674
675# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
676int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
677 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
678{
679 TRACE();
680
681 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
682}
683
684int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
685 unsigned len, unsigned copied, struct page *page, void *fsdata)
686{
687 struct inode *inode = mapping->host;
688 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
689 struct sf_reg_info *sf_r = file->private_data;
690 void *buf;
691 unsigned from = pos & (PAGE_SIZE - 1);
692 uint32_t nwritten = len;
693 int err;
694
695 TRACE();
696
697 buf = kmap(page);
698 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos);
699 kunmap(page);
700
701 if (!PageUptodate(page) && err == PAGE_SIZE)
702 SetPageUptodate(page);
703
704 if (err >= 0) {
705 pos += nwritten;
706 if (pos > inode->i_size)
707 inode->i_size = pos;
708 }
709
710 unlock_page(page);
711 page_cache_release(page);
712
713 return nwritten;
714}
715
716# endif /* KERNEL_VERSION >= 2.6.24 */
717
718struct address_space_operations sf_reg_aops =
719{
720 .readpage = sf_readpage,
721 .writepage = sf_writepage,
722# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
723 .write_begin = sf_write_begin,
724 .write_end = sf_write_end,
725# else
726 .prepare_write = simple_prepare_write,
727 .commit_write = simple_commit_write,
728# endif
729};
730#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette