VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 28253

Last change on this file since 28253 was 28253, checked in by vboxsync, 15 years ago

SharedFolders/linux: Don't close an inode for a regular file after it was created with sf_create(). We assume that this inode will be opened with sf_reg_open() and later closed with sf_reg_close(). Fixes copying of read-only files on shared folders.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 20.1 KB
Line 
1/** @file
2 *
3 * vboxvfs -- VirtualBox Guest Additions for Linux:
4 * Regular file inode and file operations
5 */
6
7/*
8 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22
23/*
24 * Limitations: only COW memory mapping is supported
25 */
26
27#include "vfsmod.h"
28
29
30static void *alloc_bounch_buffer (size_t *tmp_sizep, PRTCCPHYS physp, size_t xfer_size, const char *caller)
31{
32 size_t tmp_size;
33 void *tmp;
34
35 /* try for big first. */
36 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE);
37 if (tmp_size > 16U*_1K)
38 tmp_size = 16U*_1K;
39 tmp = kmalloc (tmp_size, GFP_KERNEL);
40 if (!tmp) {
41
42 /* fall back on a page sized buffer. */
43 tmp = kmalloc (PAGE_SIZE, GFP_KERNEL);
44 if (!tmp) {
45 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size));
46 return NULL;
47 }
48 tmp_size = PAGE_SIZE;
49 }
50
51 *tmp_sizep = tmp_size;
52 *physp = virt_to_phys(tmp);
53 return tmp;
54}
55
56static void free_bounch_buffer (void *tmp)
57{
58 kfree (tmp);
59}
60
61
62/* fops */
63static int
64sf_reg_read_aux (const char *caller, struct sf_glob_info *sf_g,
65 struct sf_reg_info *sf_r, void *buf, uint32_t *nread,
66 uint64_t pos)
67{
68 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
69 * contiguous in physical memory (kmalloc or single page), we should
70 * use a physical address here to speed things up. */
71 int rc = vboxCallRead (&client_handle, &sf_g->map, sf_r->handle,
72 pos, nread, buf, false /* already locked? */);
73 if (RT_FAILURE (rc)) {
74 LogFunc(("vboxCallRead failed. caller=%s, rc=%Rrc\n",
75 caller, rc));
76 return -EPROTO;
77 }
78 return 0;
79}
80
81static int
82sf_reg_write_aux (const char *caller, struct sf_glob_info *sf_g,
83 struct sf_reg_info *sf_r, void *buf, uint32_t *nwritten,
84 uint64_t pos)
85{
86 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
87 * contiguous in physical memory (kmalloc or single page), we should
88 * use a physical address here to speed things up. */
89 int rc = vboxCallWrite (&client_handle, &sf_g->map, sf_r->handle,
90 pos, nwritten, buf, false /* already locked? */);
91 if (RT_FAILURE (rc)) {
92 LogFunc(("vboxCallWrite failed. caller=%s, rc=%Rrc\n",
93 caller, rc));
94 return -EPROTO;
95 }
96 return 0;
97}
98
99static ssize_t
100sf_reg_read (struct file *file, char *buf, size_t size, loff_t *off)
101{
102 int err;
103 void *tmp;
104 RTCCPHYS tmp_phys;
105 size_t tmp_size;
106 size_t left = size;
107 ssize_t total_bytes_read = 0;
108 struct inode *inode = file->f_dentry->d_inode;
109 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
110 struct sf_reg_info *sf_r = file->private_data;
111 loff_t pos = *off;
112
113 TRACE ();
114 if (!S_ISREG (inode->i_mode)) {
115 LogFunc(("read from non regular file %d\n", inode->i_mode));
116 return -EINVAL;
117 }
118
119 /** XXX Check read permission accoring to inode->i_mode! */
120
121 if (!size) {
122 return 0;
123 }
124
125 tmp = alloc_bounch_buffer (&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
126 if (!tmp)
127 return -ENOMEM;
128
129 while (left) {
130 uint32_t to_read, nread;
131
132 to_read = tmp_size;
133 if (to_read > left) {
134 to_read = (uint32_t) left;
135 }
136 nread = to_read;
137
138 err = sf_reg_read_aux (__func__, sf_g, sf_r, tmp, &nread, pos);
139 if (err)
140 goto fail;
141
142 if (copy_to_user (buf, tmp, nread)) {
143 err = -EFAULT;
144 goto fail;
145 }
146
147 pos += nread;
148 left -= nread;
149 buf += nread;
150 total_bytes_read += nread;
151 if (nread != to_read) {
152 break;
153 }
154 }
155
156 *off += total_bytes_read;
157 free_bounch_buffer (tmp);
158 return total_bytes_read;
159
160 fail:
161 free_bounch_buffer (tmp);
162 return err;
163}
164
165static ssize_t
166sf_reg_write (struct file *file, const char *buf, size_t size, loff_t *off)
167{
168 int err;
169 void *tmp;
170 RTCCPHYS tmp_phys;
171 size_t tmp_size;
172 size_t left = size;
173 ssize_t total_bytes_written = 0;
174 struct inode *inode = file->f_dentry->d_inode;
175 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
176 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
177 struct sf_reg_info *sf_r = file->private_data;
178 loff_t pos;
179
180 TRACE ();
181 BUG_ON (!sf_i);
182 BUG_ON (!sf_g);
183 BUG_ON (!sf_r);
184
185 if (!S_ISREG (inode->i_mode)) {
186 LogFunc(("write to non regular file %d\n", inode->i_mode));
187 return -EINVAL;
188 }
189
190 pos = *off;
191 if (file->f_flags & O_APPEND)
192 {
193 pos = inode->i_size;
194 *off = pos;
195 }
196
197 /** XXX Check write permission accoring to inode->i_mode! */
198
199 if (!size)
200 return 0;
201
202 tmp = alloc_bounch_buffer (&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);
203 if (!tmp)
204 return -ENOMEM;
205
206 while (left) {
207 uint32_t to_write, nwritten;
208
209 to_write = tmp_size;
210 if (to_write > left) {
211 to_write = (uint32_t) left;
212 }
213 nwritten = to_write;
214
215 if (copy_from_user (tmp, buf, to_write)) {
216 err = -EFAULT;
217 goto fail;
218 }
219
220#if 1
221 if (VbglR0CanUsePhysPageList()) {
222 err = VbglR0SfWritePhysCont (&client_handle, &sf_g->map, sf_r->handle,
223 pos, &nwritten, tmp_phys);
224 err = RT_FAILURE(err) ? -EPROTO : 0;
225 } else
226#endif
227 err = sf_reg_write_aux (__func__, sf_g, sf_r, tmp, &nwritten, pos);
228 if (err)
229 goto fail;
230
231 pos += nwritten;
232 left -= nwritten;
233 buf += nwritten;
234 total_bytes_written += nwritten;
235 if (nwritten != to_write)
236 break;
237 }
238
239 *off += total_bytes_written;
240 if (*off > inode->i_size)
241 inode->i_size = *off;
242
243 sf_i->force_restat = 1;
244 free_bounch_buffer (tmp);
245 return total_bytes_written;
246
247 fail:
248 free_bounch_buffer (tmp);
249 return err;
250}
251
252static int
253sf_reg_open (struct inode *inode, struct file *file)
254{
255 int rc, rc_linux = 0;
256 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
257 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
258 struct sf_reg_info *sf_r;
259 SHFLCREATEPARMS params;
260
261 TRACE ();
262 BUG_ON (!sf_g);
263 BUG_ON (!sf_i);
264
265 LogFunc(("open %s\n", sf_i->path->String.utf8));
266
267 sf_r = kmalloc (sizeof (*sf_r), GFP_KERNEL);
268 if (!sf_r) {
269 LogRelFunc(("could not allocate reg info\n"));
270 return -ENOMEM;
271 }
272
273 /* Already open? */
274 if (sf_i->handle != SHFL_HANDLE_NIL)
275 {
276 /*
277 * This inode was created with sf_create_aux(). Check the CreateFlags:
278 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
279 * about the access flags (SHFL_CF_ACCESS_*).
280 */
281 sf_i->force_restat = 1;
282 sf_r->handle = sf_i->handle;
283 sf_i->handle = SHFL_HANDLE_NIL;
284 sf_i->file = file;
285 file->private_data = sf_r;
286 return 0;
287 }
288
289 RT_ZERO(params);
290 params.Handle = SHFL_HANDLE_NIL;
291 /* We check the value of params.Handle afterwards to find out if
292 * the call succeeded or failed, as the API does not seem to cleanly
293 * distinguish error and informational messages.
294 *
295 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
296 * make the shared folders host service use our fMode parameter */
297
298 if (file->f_flags & O_CREAT) {
299 LogFunc(("O_CREAT set\n"));
300 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
301 /* We ignore O_EXCL, as the Linux kernel seems to call create
302 beforehand itself, so O_EXCL should always fail. */
303 if (file->f_flags & O_TRUNC) {
304 LogFunc(("O_TRUNC set\n"));
305 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
306 | SHFL_CF_ACCESS_WRITE);
307 }
308 else {
309 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
310 }
311 }
312 else {
313 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
314 if (file->f_flags & O_TRUNC) {
315 LogFunc(("O_TRUNC set\n"));
316 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
317 | SHFL_CF_ACCESS_WRITE);
318 }
319 }
320
321 if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE)) {
322 switch (file->f_flags & O_ACCMODE) {
323 case O_RDONLY:
324 params.CreateFlags |= SHFL_CF_ACCESS_READ;
325 break;
326
327 case O_WRONLY:
328 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
329 break;
330
331 case O_RDWR:
332 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
333 break;
334
335 default:
336 BUG ();
337 }
338 }
339
340 if (file->f_flags & O_APPEND) {
341 LogFunc(("O_APPEND set\n"));
342 params.CreateFlags |= SHFL_CF_ACCESS_APPEND;
343 }
344
345 params.Info.Attr.fMode = inode->i_mode;
346 LogFunc(("sf_reg_open: calling vboxCallCreate, file %s, flags=%#x, %#x\n",
347 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
348 rc = vboxCallCreate (&client_handle, &sf_g->map, sf_i->path, &params);
349
350 if (RT_FAILURE (rc)) {
351 LogFunc(("vboxCallCreate failed flags=%d,%#x rc=%Rrc\n",
352 file->f_flags, params.CreateFlags, rc));
353 kfree (sf_r);
354 return -RTErrConvertToErrno(rc);
355 }
356
357 if (SHFL_HANDLE_NIL == params.Handle) {
358 switch (params.Result) {
359 case SHFL_PATH_NOT_FOUND:
360 case SHFL_FILE_NOT_FOUND:
361 rc_linux = -ENOENT;
362 break;
363 case SHFL_FILE_EXISTS:
364 rc_linux = -EEXIST;
365 break;
366 default:
367 break;
368 }
369 }
370
371 sf_i->force_restat = 1;
372 sf_r->handle = params.Handle;
373 sf_i->file = file;
374 file->private_data = sf_r;
375 return rc_linux;
376}
377
378static int
379sf_reg_release (struct inode *inode, struct file *file)
380{
381 int rc;
382 struct sf_reg_info *sf_r;
383 struct sf_glob_info *sf_g;
384 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
385
386 TRACE ();
387 sf_g = GET_GLOB_INFO (inode->i_sb);
388 sf_r = file->private_data;
389
390 BUG_ON (!sf_g);
391 BUG_ON (!sf_r);
392
393 rc = vboxCallClose (&client_handle, &sf_g->map, sf_r->handle);
394 if (RT_FAILURE (rc)) {
395 LogFunc(("vboxCallClose failed rc=%Rrc\n", rc));
396 }
397
398 kfree (sf_r);
399 sf_i->file = NULL;
400 sf_i->handle = SHFL_HANDLE_NIL;
401 file->private_data = NULL;
402 return 0;
403}
404
405#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
406static int
407sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
408#elif LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
409static struct page *
410sf_reg_nopage (struct vm_area_struct *vma, unsigned long vaddr, int *type)
411# define SET_TYPE(t) *type = (t)
412#else /* LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0) */
413static struct page *
414sf_reg_nopage (struct vm_area_struct *vma, unsigned long vaddr, int unused)
415# define SET_TYPE(t)
416#endif
417{
418 struct page *page;
419 char *buf;
420 loff_t off;
421 uint32_t nread = PAGE_SIZE;
422 int err;
423 struct file *file = vma->vm_file;
424 struct inode *inode = file->f_dentry->d_inode;
425 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
426 struct sf_reg_info *sf_r = file->private_data;
427
428 TRACE ();
429#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
430 if (vmf->pgoff > vma->vm_end)
431 return VM_FAULT_SIGBUS;
432#else
433 if (vaddr > vma->vm_end) {
434 SET_TYPE (VM_FAULT_SIGBUS);
435 return NOPAGE_SIGBUS;
436 }
437#endif
438
439 page = alloc_page (GFP_HIGHUSER);
440 if (!page) {
441 LogRelFunc(("failed to allocate page\n"));
442#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
443 return VM_FAULT_OOM;
444#else
445 SET_TYPE (VM_FAULT_OOM);
446 return NOPAGE_OOM;
447#endif
448 }
449
450 buf = kmap (page);
451#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
452 off = (vmf->pgoff << PAGE_SHIFT);
453#else
454 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
455#endif
456 err = sf_reg_read_aux (__func__, sf_g, sf_r, buf, &nread, off);
457 if (err) {
458 kunmap (page);
459 put_page (page);
460#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
461 return VM_FAULT_SIGBUS;
462#else
463 SET_TYPE (VM_FAULT_SIGBUS);
464 return NOPAGE_SIGBUS;
465#endif
466 }
467
468 BUG_ON (nread > PAGE_SIZE);
469 if (!nread) {
470#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
471 clear_user_page (page_address (page), vmf->pgoff, page);
472#elif LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
473 clear_user_page (page_address (page), vaddr, page);
474#else
475 clear_user_page (page_address (page), vaddr);
476#endif
477 }
478 else {
479 memset (buf + nread, 0, PAGE_SIZE - nread);
480 }
481
482 flush_dcache_page (page);
483 kunmap (page);
484#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
485 vmf->page = page;
486 return 0;
487#else
488 SET_TYPE (VM_FAULT_MAJOR);
489 return page;
490#endif
491}
492
493static struct vm_operations_struct sf_vma_ops = {
494#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
495 .fault = sf_reg_fault
496#else
497 .nopage = sf_reg_nopage
498#endif
499};
500
501static int
502sf_reg_mmap (struct file *file, struct vm_area_struct *vma)
503{
504 TRACE ();
505 if (vma->vm_flags & VM_SHARED) {
506 LogFunc(("shared mmapping not available\n"));
507 return -EINVAL;
508 }
509
510 vma->vm_ops = &sf_vma_ops;
511 return 0;
512}
513
514struct file_operations sf_reg_fops = {
515 .read = sf_reg_read,
516 .open = sf_reg_open,
517 .write = sf_reg_write,
518 .release = sf_reg_release,
519 .mmap = sf_reg_mmap,
520#if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
521# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 23)
522 .splice_read = generic_file_splice_read,
523# else
524 .sendfile = generic_file_sendfile,
525# endif
526 .aio_read = generic_file_aio_read,
527 .aio_write = generic_file_aio_write,
528 .fsync = simple_sync_file,
529 .llseek = generic_file_llseek,
530#endif
531};
532
533
534struct inode_operations sf_reg_iops = {
535#if LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0)
536 .revalidate = sf_inode_revalidate
537#else
538 .getattr = sf_getattr,
539 .setattr = sf_setattr
540#endif
541};
542
543
544#if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
545static int
546sf_readpage(struct file *file, struct page *page)
547{
548 struct inode *inode = file->f_dentry->d_inode;
549 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
550 struct sf_reg_info *sf_r = file->private_data;
551 uint32_t nread = PAGE_SIZE;
552 char *buf;
553 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
554 int ret;
555
556 TRACE ();
557
558 buf = kmap(page);
559 ret = sf_reg_read_aux (__func__, sf_g, sf_r, buf, &nread, off);
560 if (ret) {
561 kunmap (page);
562 if (PageLocked(page))
563 unlock_page(page);
564 return ret;
565 }
566 BUG_ON (nread > PAGE_SIZE);
567 memset(&buf[nread], 0, PAGE_SIZE - nread);
568 flush_dcache_page (page);
569 kunmap (page);
570 SetPageUptodate(page);
571 unlock_page(page);
572 return 0;
573}
574
575static int
576sf_writepage(struct page *page, struct writeback_control *wbc)
577{
578 struct address_space *mapping = page->mapping;
579 struct inode *inode = mapping->host;
580 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
581 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
582 struct file *file = sf_i->file;
583 struct sf_reg_info *sf_r = file->private_data;
584 char *buf;
585 uint32_t nwritten = PAGE_SIZE;
586 int end_index = inode->i_size >> PAGE_SHIFT;
587 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
588 int err;
589
590 TRACE ();
591
592 if (page->index >= end_index)
593 nwritten = inode->i_size & (PAGE_SIZE-1);
594
595 buf = kmap(page);
596
597 err = sf_reg_write_aux (__func__, sf_g, sf_r, buf, &nwritten, off);
598 if (err < 0) {
599 ClearPageUptodate(page);
600 goto out;
601 }
602
603 if (off > inode->i_size)
604 inode->i_size = off;
605
606 if (PageError(page))
607 ClearPageError(page);
608 err = 0;
609out:
610 kunmap(page);
611
612 unlock_page(page);
613 return err;
614}
615
616# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 24)
617int
618sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
619 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
620{
621 TRACE ();
622
623 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
624}
625
626int
627sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
628 unsigned len, unsigned copied, struct page *page, void *fsdata)
629{
630 struct inode *inode = mapping->host;
631 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
632 struct sf_reg_info *sf_r = file->private_data;
633 void *buf;
634 unsigned from = pos & (PAGE_SIZE - 1);
635 uint32_t nwritten = len;
636 int err;
637
638 TRACE ();
639
640 buf = kmap(page);
641 err = sf_reg_write_aux (__func__, sf_g, sf_r, buf+from, &nwritten, pos);
642 kunmap(page);
643
644 if (!PageUptodate(page) && err == PAGE_SIZE)
645 SetPageUptodate(page);
646
647 if (err >= 0) {
648 pos += nwritten;
649 if (pos > inode->i_size)
650 inode->i_size = pos;
651 }
652
653 unlock_page(page);
654 page_cache_release(page);
655
656 return nwritten;
657}
658
659# endif /* KERNEL_VERSION >= 2.6.24 */
660
661struct address_space_operations sf_reg_aops = {
662 .readpage = sf_readpage,
663 .writepage = sf_writepage,
664# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 24)
665 .write_begin = sf_write_begin,
666 .write_end = sf_write_end,
667# else
668 .prepare_write = simple_prepare_write,
669 .commit_write = simple_commit_write,
670# endif
671};
672#endif
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette