VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 21325

Last change on this file since 21325 was 21233, checked in by vboxsync, 16 years ago

linux sf: two @todos.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 18.3 KB
Line 
1/** @file
2 *
3 * vboxvfs -- VirtualBox Guest Additions for Linux:
4 * Regular file inode and file operations
5 */
6
7/*
8 * Copyright (C) 2006-2007 Sun Microsystems, Inc.
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 *
18 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
19 * Clara, CA 95054 USA or visit http://www.sun.com if you need
20 * additional information or have any questions.
21 */
22
23/*
24 * Limitations: only COW memory mapping is supported
25 */
26
27#include "vfsmod.h"
28
29#define CHUNK_SIZE 4096
30
31/* fops */
32static int
33sf_reg_read_aux (const char *caller, struct sf_glob_info *sf_g,
34 struct sf_reg_info *sf_r, void *buf, uint32_t *nread,
35 uint64_t pos)
36{
37 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
38 * contiguous in physical memory (kmalloc or single page), we should
39 * use a physical address here to speed things up. */
40 int rc = vboxCallRead (&client_handle, &sf_g->map, sf_r->handle,
41 pos, nread, buf, false /* already locked? */);
42 if (RT_FAILURE (rc)) {
43 LogFunc(("vboxCallRead failed. caller=%s, rc=%Rrc\n",
44 caller, rc));
45 return -EPROTO;
46 }
47 return 0;
48}
49
50static int
51sf_reg_write_aux (const char *caller, struct sf_glob_info *sf_g,
52 struct sf_reg_info *sf_r, void *buf, uint32_t *nwritten,
53 uint64_t pos)
54{
55 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is
56 * contiguous in physical memory (kmalloc or single page), we should
57 * use a physical address here to speed things up. */
58 int rc = vboxCallWrite (&client_handle, &sf_g->map, sf_r->handle,
59 pos, nwritten, buf, false /* already locked? */);
60 if (RT_FAILURE (rc)) {
61 LogFunc(("vboxCallWrite failed. caller=%s, rc=%Rrc\n",
62 caller, rc));
63 return -EPROTO;
64 }
65 return 0;
66}
67
68static ssize_t
69sf_reg_read (struct file *file, char *buf, size_t size, loff_t *off)
70{
71 int err;
72 void *tmp;
73 size_t left = size;
74 ssize_t total_bytes_read = 0;
75 struct inode *inode = file->f_dentry->d_inode;
76 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
77 struct sf_reg_info *sf_r = file->private_data;
78 loff_t pos = *off;
79
80 TRACE ();
81 if (!S_ISREG (inode->i_mode)) {
82 LogFunc(("read from non regular file %d\n", inode->i_mode));
83 return -EINVAL;
84 }
85
86 /** XXX Check read permission accoring to inode->i_mode! */
87
88 if (!size) {
89 return 0;
90 }
91
92 tmp = kmalloc (CHUNK_SIZE, GFP_KERNEL);
93 if (!tmp) {
94 LogRelFunc(("could not allocate bounce buffer memory %d bytes\n", CHUNK_SIZE));
95 return -ENOMEM;
96 }
97
98 while (left) {
99 uint32_t to_read, nread;
100
101 to_read = CHUNK_SIZE;
102 if (to_read > left) {
103 to_read = (uint32_t) left;
104 }
105 nread = to_read;
106
107 err = sf_reg_read_aux (__func__, sf_g, sf_r, tmp, &nread, pos);
108 if (err)
109 goto fail;
110
111 if (copy_to_user (buf, tmp, nread)) {
112 err = -EFAULT;
113 goto fail;
114 }
115
116 pos += nread;
117 left -= nread;
118 buf += nread;
119 total_bytes_read += nread;
120 if (nread != to_read) {
121 break;
122 }
123 }
124
125 *off += total_bytes_read;
126 kfree (tmp);
127 return total_bytes_read;
128
129 fail:
130 kfree (tmp);
131 return err;
132}
133
134static ssize_t
135sf_reg_write (struct file *file, const char *buf, size_t size, loff_t *off)
136{
137 int err;
138 void *tmp;
139 size_t left = size;
140 ssize_t total_bytes_written = 0;
141 struct inode *inode = file->f_dentry->d_inode;
142 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
143 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
144 struct sf_reg_info *sf_r = file->private_data;
145 loff_t pos;
146
147 TRACE ();
148 BUG_ON (!sf_i);
149 BUG_ON (!sf_g);
150 BUG_ON (!sf_r);
151
152 if (!S_ISREG (inode->i_mode)) {
153 LogFunc(("write to non regular file %d\n", inode->i_mode));
154 return -EINVAL;
155 }
156
157 pos = *off;
158 if (file->f_flags & O_APPEND)
159 pos += inode->i_size;
160
161 /** XXX Check write permission accoring to inode->i_mode! */
162
163 if (!size)
164 return 0;
165
166 tmp = kmalloc (CHUNK_SIZE, GFP_KERNEL);
167 if (!tmp) {
168 LogRelFunc(("could not allocate bounce buffer memory %d bytes\n", CHUNK_SIZE));
169 return -ENOMEM;
170 }
171
172 while (left) {
173 uint32_t to_write, nwritten;
174
175 to_write = CHUNK_SIZE;
176 if (to_write > left) {
177 to_write = (uint32_t) left;
178 }
179 nwritten = to_write;
180
181 if (copy_from_user (tmp, buf, to_write)) {
182 err = -EFAULT;
183 goto fail;
184 }
185
186 err = sf_reg_write_aux (__func__, sf_g, sf_r, tmp, &nwritten, pos);
187 if (err)
188 goto fail;
189
190 pos += nwritten;
191 left -= nwritten;
192 buf += nwritten;
193 total_bytes_written += nwritten;
194 if (nwritten != to_write)
195 break;
196 }
197
198#if 1 /* XXX: which way is correct? */
199 *off += total_bytes_written;
200#else
201 file->f_pos += total_bytes_written;
202#endif
203 sf_i->force_restat = 1;
204 kfree (tmp);
205 return total_bytes_written;
206
207 fail:
208 kfree (tmp);
209 return err;
210}
211
212static int
213sf_reg_open (struct inode *inode, struct file *file)
214{
215 int rc, rc_linux = 0;
216 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
217 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
218 struct sf_reg_info *sf_r;
219 SHFLCREATEPARMS params;
220
221 TRACE ();
222 BUG_ON (!sf_g);
223 BUG_ON (!sf_i);
224
225 LogFunc(("open %s\n", sf_i->path->String.utf8));
226
227 sf_r = kmalloc (sizeof (*sf_r), GFP_KERNEL);
228 if (!sf_r) {
229 LogRelFunc(("could not allocate reg info\n"));
230 return -ENOMEM;
231 }
232
233 memset(&params, 0, sizeof(params));
234 params.Handle = SHFL_HANDLE_NIL;
235 /* We check the value of params.Handle afterwards to find out if
236 * the call succeeded or failed, as the API does not seem to cleanly
237 * distinguish error and informational messages.
238 *
239 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to
240 * make the shared folders host service use our fMode parameter */
241
242 if (file->f_flags & O_CREAT) {
243 LogFunc(("O_CREAT set\n"));
244 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
245 /* We ignore O_EXCL, as the Linux kernel seems to call create
246 beforehand itself, so O_EXCL should always fail. */
247 if (file->f_flags & O_TRUNC) {
248 LogFunc(("O_TRUNC set\n"));
249 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
250 | SHFL_CF_ACCESS_WRITE);
251 }
252 else {
253 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
254 }
255 }
256 else {
257 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
258 if (file->f_flags & O_TRUNC) {
259 LogFunc(("O_TRUNC set\n"));
260 params.CreateFlags |= ( SHFL_CF_ACT_OVERWRITE_IF_EXISTS
261 | SHFL_CF_ACCESS_WRITE);
262 }
263 }
264
265 if (!(params.CreateFlags & SHFL_CF_ACCESS_READWRITE)) {
266 switch (file->f_flags & O_ACCMODE) {
267 case O_RDONLY:
268 params.CreateFlags |= SHFL_CF_ACCESS_READ;
269 break;
270
271 case O_WRONLY:
272 params.CreateFlags |= SHFL_CF_ACCESS_WRITE;
273 break;
274
275 case O_RDWR:
276 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE;
277 break;
278
279 default:
280 BUG ();
281 }
282 }
283
284 params.Info.Attr.fMode = inode->i_mode;
285 LogFunc(("sf_reg_open: calling vboxCallCreate, file %s, flags=%d, %#x\n",
286 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags));
287 rc = vboxCallCreate (&client_handle, &sf_g->map, sf_i->path, &params);
288
289 if (RT_FAILURE (rc)) {
290 LogFunc(("vboxCallCreate failed flags=%d,%#x rc=%Rrc\n",
291 file->f_flags, params.CreateFlags, rc));
292 kfree (sf_r);
293 return -RTErrConvertToErrno(rc);
294 }
295
296 if (SHFL_HANDLE_NIL == params.Handle) {
297 switch (params.Result) {
298 case SHFL_PATH_NOT_FOUND:
299 case SHFL_FILE_NOT_FOUND:
300 rc_linux = -ENOENT;
301 break;
302 case SHFL_FILE_EXISTS:
303 rc_linux = -EEXIST;
304 break;
305 default:
306 break;
307 }
308 }
309
310 sf_i->force_restat = 1;
311 sf_r->handle = params.Handle;
312 sf_i->file = file;
313 file->private_data = sf_r;
314 return rc_linux;
315}
316
317static int
318sf_reg_release (struct inode *inode, struct file *file)
319{
320 int rc;
321 struct sf_reg_info *sf_r;
322 struct sf_glob_info *sf_g;
323 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
324
325 TRACE ();
326 sf_g = GET_GLOB_INFO (inode->i_sb);
327 sf_r = file->private_data;
328
329 BUG_ON (!sf_g);
330 BUG_ON (!sf_r);
331
332 rc = vboxCallClose (&client_handle, &sf_g->map, sf_r->handle);
333 if (RT_FAILURE (rc)) {
334 LogFunc(("vboxCallClose failed rc=%Rrc\n", rc));
335 }
336
337 kfree (sf_r);
338 sf_i->file = NULL;
339 file->private_data = NULL;
340 return 0;
341}
342
343#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
344static int
345sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
346#elif LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
347static struct page *
348sf_reg_nopage (struct vm_area_struct *vma, unsigned long vaddr, int *type)
349# define SET_TYPE(t) *type = (t)
350#else /* LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0) */
351static struct page *
352sf_reg_nopage (struct vm_area_struct *vma, unsigned long vaddr, int unused)
353# define SET_TYPE(t)
354#endif
355{
356 struct page *page;
357 char *buf;
358 loff_t off;
359 uint32_t nread = PAGE_SIZE;
360 int err;
361 struct file *file = vma->vm_file;
362 struct inode *inode = file->f_dentry->d_inode;
363 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
364 struct sf_reg_info *sf_r = file->private_data;
365
366 TRACE ();
367#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
368 if (vmf->pgoff > vma->vm_end)
369 return VM_FAULT_SIGBUS;
370#else
371 if (vaddr > vma->vm_end) {
372 SET_TYPE (VM_FAULT_SIGBUS);
373 return NOPAGE_SIGBUS;
374 }
375#endif
376
377 page = alloc_page (GFP_HIGHUSER);
378 if (!page) {
379 LogRelFunc(("failed to allocate page\n"));
380#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
381 return VM_FAULT_OOM;
382#else
383 SET_TYPE (VM_FAULT_OOM);
384 return NOPAGE_OOM;
385#endif
386 }
387
388 buf = kmap (page);
389#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
390 off = (vmf->pgoff << PAGE_SHIFT);
391#else
392 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
393#endif
394 err = sf_reg_read_aux (__func__, sf_g, sf_r, buf, &nread, off);
395 if (err) {
396 kunmap (page);
397 put_page (page);
398#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
399 return VM_FAULT_SIGBUS;
400#else
401 SET_TYPE (VM_FAULT_SIGBUS);
402 return NOPAGE_SIGBUS;
403#endif
404 }
405
406 BUG_ON (nread > PAGE_SIZE);
407 if (!nread) {
408#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
409 clear_user_page (page_address (page), vmf->pgoff, page);
410#elif LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
411 clear_user_page (page_address (page), vaddr, page);
412#else
413 clear_user_page (page_address (page), vaddr);
414#endif
415 }
416 else {
417 memset (buf + nread, 0, PAGE_SIZE - nread);
418 }
419
420 flush_dcache_page (page);
421 kunmap (page);
422#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
423 vmf->page = page;
424 return 0;
425#else
426 SET_TYPE (VM_FAULT_MAJOR);
427 return page;
428#endif
429}
430
431static struct vm_operations_struct sf_vma_ops = {
432#if LINUX_VERSION_CODE > KERNEL_VERSION (2, 6, 25)
433 .fault = sf_reg_fault
434#else
435 .nopage = sf_reg_nopage
436#endif
437};
438
439static int
440sf_reg_mmap (struct file *file, struct vm_area_struct *vma)
441{
442 TRACE ();
443 if (vma->vm_flags & VM_SHARED) {
444 LogFunc(("shared mmapping not available\n"));
445 return -EINVAL;
446 }
447
448 vma->vm_ops = &sf_vma_ops;
449 return 0;
450}
451
452struct file_operations sf_reg_fops = {
453 .read = sf_reg_read,
454 .open = sf_reg_open,
455 .write = sf_reg_write,
456 .release = sf_reg_release,
457 .mmap = sf_reg_mmap,
458#if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
459# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 23)
460 .splice_read = generic_file_splice_read,
461# else
462 .sendfile = generic_file_sendfile,
463# endif
464 .aio_read = generic_file_aio_read,
465 .aio_write = generic_file_aio_write,
466 .fsync = simple_sync_file,
467 .llseek = generic_file_llseek,
468#endif
469};
470
471
472struct inode_operations sf_reg_iops = {
473#if LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 0)
474 .revalidate = sf_inode_revalidate
475#else
476 .getattr = sf_getattr,
477 .setattr = sf_setattr
478#endif
479};
480
481
482#if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 0)
483static int
484sf_readpage(struct file *file, struct page *page)
485{
486 struct inode *inode = file->f_dentry->d_inode;
487 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
488 struct sf_reg_info *sf_r = file->private_data;
489 uint32_t nread = PAGE_SIZE;
490 char *buf;
491 loff_t off = ((loff_t)page->index) << PAGE_SHIFT;
492 int ret;
493
494 TRACE ();
495
496 buf = kmap(page);
497 ret = sf_reg_read_aux (__func__, sf_g, sf_r, buf, &nread, off);
498 if (ret) {
499 kunmap (page);
500 if (PageLocked(page))
501 unlock_page(page);
502 return ret;
503 }
504 BUG_ON (nread > PAGE_SIZE);
505 memset(&buf[nread], 0, PAGE_SIZE - nread);
506 flush_dcache_page (page);
507 kunmap (page);
508 SetPageUptodate(page);
509 unlock_page(page);
510 return 0;
511}
512
513static int
514sf_writepage(struct page *page, struct writeback_control *wbc)
515{
516 struct address_space *mapping = page->mapping;
517 struct inode *inode = mapping->host;
518 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
519 struct sf_inode_info *sf_i = GET_INODE_INFO (inode);
520 struct file *file = sf_i->file;
521 struct sf_reg_info *sf_r = file->private_data;
522 char *buf;
523 uint32_t nwritten = PAGE_SIZE;
524 int end_index = inode->i_size >> PAGE_SHIFT;
525 loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
526 int err;
527
528 TRACE ();
529
530 if (page->index >= end_index)
531 nwritten = inode->i_size & (PAGE_SIZE-1);
532
533 buf = kmap(page);
534
535 err = sf_reg_write_aux (__func__, sf_g, sf_r, buf, &nwritten, off);
536 if (err < 0) {
537 ClearPageUptodate(page);
538 goto out;
539 }
540
541 if (off > inode->i_size)
542 inode->i_size = off;
543
544 if (PageError(page))
545 ClearPageError(page);
546 err = 0;
547out:
548 kunmap(page);
549
550 unlock_page(page);
551 return err;
552}
553
554# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 24)
555int
556sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
557 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
558{
559 TRACE ();
560
561 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
562}
563
564int
565sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
566 unsigned len, unsigned copied, struct page *page, void *fsdata)
567{
568 struct inode *inode = mapping->host;
569 struct sf_glob_info *sf_g = GET_GLOB_INFO (inode->i_sb);
570 struct sf_reg_info *sf_r = file->private_data;
571 void *buf;
572 unsigned from = pos & (PAGE_SIZE - 1);
573 uint32_t nwritten = len;
574 int err;
575
576 TRACE ();
577
578 buf = kmap(page);
579 err = sf_reg_write_aux (__func__, sf_g, sf_r, buf+from, &nwritten, pos);
580 kunmap(page);
581
582 if (!PageUptodate(page) && err == PAGE_SIZE)
583 SetPageUptodate(page);
584
585 if (err >= 0) {
586 pos += nwritten;
587 if (pos > inode->i_size)
588 inode->i_size = pos;
589 }
590
591 unlock_page(page);
592 page_cache_release(page);
593
594 return nwritten;
595}
596
597# endif /* KERNEL_VERSION >= 2.6.24 */
598
599struct address_space_operations sf_reg_aops = {
600 .readpage = sf_readpage,
601 .writepage = sf_writepage,
602# if LINUX_VERSION_CODE >= KERNEL_VERSION (2, 6, 24)
603 .write_begin = sf_write_begin,
604 .write_end = sf_write_end,
605# else
606 .prepare_write = simple_prepare_write,
607 .commit_write = simple_commit_write,
608# endif
609};
610#endif
Note: See TracBrowser for help on using the repository browser.

© 2025 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette