VirtualBox

source: vbox/trunk/src/VBox/Additions/linux/sharedfolders/regops.c@ 77873

Last change on this file since 77873 was 77873, checked in by vboxsync, 6 years ago

linux/vboxsf: Kicked out generic_file_aio_read and generic_file_aio_write for 2.6.23-2.6.31 as they broke mmap coherency, using the read_iter/write_iter code with added glue instead and setting .aio_read/write for 2.6.19-3.16 (when they changed name to _iter). Also did some dir entry cache TTL tweaking. bugref:9172

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 116.2 KB
Line 
1/* $Id: regops.c 77873 2019-03-26 01:36:36Z vboxsync $ */
2/** @file
3 * vboxsf - VBox Linux Shared Folders VFS, regular file inode and file operations.
4 */
5
6/*
7 * Copyright (C) 2006-2019 Oracle Corporation
8 *
9 * Permission is hereby granted, free of charge, to any person
10 * obtaining a copy of this software and associated documentation
11 * files (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use,
13 * copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the
15 * Software is furnished to do so, subject to the following
16 * conditions:
17 *
18 * The above copyright notice and this permission notice shall be
19 * included in all copies or substantial portions of the Software.
20 *
21 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
22 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
23 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
24 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
25 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
26 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 * OTHER DEALINGS IN THE SOFTWARE.
29 */
30
31
32/*********************************************************************************************************************************
33* Header Files *
34*********************************************************************************************************************************/
35#include "vfsmod.h"
36#include <linux/uio.h>
37#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 32)
38# include <linux/aio.h> /* struct kiocb before 4.1 */
39#endif
40#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
41# include <linux/buffer_head.h>
42#endif
43#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) \
44 && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
45# include <linux/writeback.h>
46#endif
47#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
48 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
49# include <linux/splice.h>
50#endif
51#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
52# include <linux/swap.h> /* for mark_page_accessed */
53#endif
54#include <iprt/err.h>
55
56#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)
57# define SEEK_END 2
58#endif
59
60#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
61# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & ITER_KVEC) )
62#elif LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
63# define iter_is_iovec(a_pIter) ( !((a_pIter)->type & (ITER_KVEC | ITER_BVEC)) )
64#endif
65
66#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)
67# define vm_fault_t int
68#endif
69
70#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 20)
71# define pgoff_t unsigned long
72#endif
73
74
75/*********************************************************************************************************************************
76* Structures and Typedefs *
77*********************************************************************************************************************************/
78#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
79struct vbsf_iov_iter {
80 unsigned int type;
81 unsigned int v_write : 1;
82 size_t iov_offset;
83 size_t nr_segs;
84 struct iovec const *iov;
85# ifdef VBOX_STRICT
86 struct iovec const * const iov_org;
87 size_t const nr_segs_org;
88# endif
89};
90# ifdef VBOX_STRICT
91# define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) { 0, a_fWrite, 0, a_cSegs, a_pIov, a_pIov, a_cSegs }
92# else
93# define VBSF_IOV_ITER_INITIALIZER(a_cSegs, a_pIov, a_fWrite) { 0, a_fWrite, 0, a_cSegs, a_pIov }
94# endif
95# define ITER_KVEC 1
96# define iov_iter vbsf_iov_iter
97#endif
98
99#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
100/** Used by vbsf_iter_lock_pages() to keep the first page of the next segment. */
101struct vbsf_iter_stash {
102 struct page *pPage;
103 size_t off;
104 size_t cb;
105# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
106 size_t offFromEnd;
107 struct iov_iter Copy;
108# endif
109};
110#endif /* >= 3.16.0 */
111/** Initializer for struct vbsf_iter_stash. */
112#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
113# define VBSF_ITER_STASH_INITIALIZER { NULL, 0 }
114#else
115# define VBSF_ITER_STASH_INITIALIZER { NULL, 0, ~(size_t)0 }
116#endif
117
118
119/*********************************************************************************************************************************
120* Internal Functions *
121*********************************************************************************************************************************/
122DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack);
123
124
125/*********************************************************************************************************************************
126* Provide more recent uio.h functionality to older kernels. *
127*********************************************************************************************************************************/
128#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
129
130# undef iov_iter_count
131# define iov_iter_count(a_pIter) vbsf_iov_iter_count(a_pIter)
132static size_t vbsf_iov_iter_count(struct vbsf_iov_iter const *iter)
133{
134 size_t cbRet = 0;
135 size_t cLeft = iter->nr_segs;
136 struct iovec const *iov = iter->iov;
137 while (cLeft-- > 0) {
138 cbRet += iov->iov_len;
139 iov++;
140 }
141 return cbRet - iter->iov_offset;
142}
143
144
145# undef iov_iter_single_seg_count
146# define iov_iter_single_seg_count(a_pIter) vbsf_iov_iter_single_seg_count(a_pIter)
147static size_t vbsf_iov_iter_single_seg_count(struct vbsf_iov_iter const *iter)
148{
149 if (iter->nr_segs > 0)
150 return iter->iov->iov_len - iter->iov_offset;
151 return 0;
152}
153
154
155# undef iov_iter_advance
156# define iov_iter_advance(a_pIter, a_cbSkip) vbsf_iov_iter_advance(a_pIter, a_cbSkip)
157static void vbsf_iov_iter_advance(struct vbsf_iov_iter *iter, size_t cbSkip)
158{
159 SFLOG2(("vbsf_iov_iter_advance: cbSkip=%#zx\n", cbSkip));
160 if (iter->nr_segs > 0) {
161 size_t const cbLeftCur = iter->iov->iov_len - iter->iov_offset;
162 Assert(iter->iov_offset <= iter->iov->iov_len);
163 if (cbLeftCur > cbSkip) {
164 iter->iov_offset += cbSkip;
165 } else {
166 cbSkip -= cbLeftCur;
167 iter->iov_offset = 0;
168 iter->iov++;
169 iter->nr_segs--;
170 while (iter->nr_segs > 0) {
171 size_t const cbSeg = iter->iov->iov_len;
172 if (cbSeg > cbSkip) {
173 iter->iov_offset = cbSkip;
174 break;
175 }
176 cbSkip -= cbSeg;
177 iter->iov++;
178 iter->nr_segs--;
179 }
180 }
181 }
182}
183
184
185# undef iov_iter_get_pages
186# define iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0) \
187 vbsf_iov_iter_get_pages(a_pIter, a_papPages, a_cbMax, a_cMaxPages, a_poffPg0)
188static ssize_t vbsf_iov_iter_get_pages(struct vbsf_iov_iter *iter, struct page **papPages,
189 size_t cbMax, unsigned cMaxPages, size_t *poffPg0)
190{
191 while (iter->nr_segs > 0) {
192 size_t const cbLeft = iter->iov->iov_len - iter->iov_offset;
193 Assert(iter->iov->iov_len >= iter->iov_offset);
194 if (cbLeft > 0) {
195 uintptr_t uPtrFrom = (uintptr_t)iter->iov->iov_base + iter->iov_offset;
196 size_t offPg0 = *poffPg0 = uPtrFrom & PAGE_OFFSET_MASK;
197 size_t cPagesLeft = RT_ALIGN_Z(offPg0 + cbLeft, PAGE_SIZE) >> PAGE_SHIFT;
198 size_t cPages = RT_MIN(cPagesLeft, cMaxPages);
199 struct task_struct *pTask = current;
200 size_t cPagesLocked;
201
202 down_read(&pTask->mm->mmap_sem);
203 cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, iter->v_write, 1 /*force*/, papPages, NULL);
204 up_read(&pTask->mm->mmap_sem);
205 if (cPagesLocked == cPages) {
206 size_t cbRet = (cPages << PAGE_SHIFT) - offPg0;
207 if (cPages == cPagesLeft) {
208 size_t offLastPg = (uPtrFrom + cbLeft) & PAGE_OFFSET_MASK;
209 if (offLastPg)
210 cbRet -= PAGE_SIZE - offLastPg;
211 }
212 Assert(cbRet <= cbLeft);
213 return cbRet;
214 }
215 if (cPagesLocked > 0)
216 vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
217 return -EFAULT;
218 }
219 iter->iov_offset = 0;
220 iter->iov++;
221 iter->nr_segs--;
222 }
223 AssertFailed();
224 return 0;
225}
226
227
228# undef iov_iter_truncate
229# define iov_iter_truncate(iter, cbNew) vbsf_iov_iter_truncate(iter, cbNew)
230static void vbsf_iov_iter_truncate(struct vbsf_iov_iter *iter, size_t cbNew)
231{
232 /* we have no counter or stuff, so it's a no-op. */
233 RT_NOREF(iter, cbNew);
234}
235
236
237# undef iov_iter_revert
238# define iov_iter_revert(a_pIter, a_cbRewind) vbsf_iov_iter_revert(a_pIter, a_cbRewind)
239void vbsf_iov_iter_revert(struct vbsf_iov_iter *iter, size_t cbRewind)
240{
241 SFLOG2(("vbsf_iov_iter_revert: cbRewind=%#zx\n", cbRewind));
242 if (iter->iov_offset > 0) {
243 if (cbRewind <= iter->iov_offset) {
244 iter->iov_offset -= cbRewind;
245 return;
246 }
247 cbRewind -= iter->iov_offset;
248 iter->iov_offset = 0;
249 }
250
251 while (cbRewind > 0) {
252 struct iovec const *pIov = --iter->iov;
253 size_t const cbSeg = pIov->iov_len;
254 iter->nr_segs++;
255
256 Assert((uintptr_t)pIov >= (uintptr_t)iter->iov_org);
257 Assert(iter->nr_segs <= iter->iter->nr_segs_org);
258
259 if (cbRewind <= cbSeg) {
260 iter->iov_offset = cbSeg - cbRewind;
261 break;
262 }
263 cbRewind -= cbSeg;
264 }
265}
266
267#endif /* 2.6.19 <= linux < 3.16.0 */
268#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) && LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
269
270static size_t copy_from_iter(uint8_t *pbDst, size_t cbToCopy, struct iov_iter *pSrcIter)
271{
272 size_t const cbTotal = cbToCopy;
273 Assert(iov_iter_count(pSrcIter) >= cbToCopy);
274# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
275 if (pSrcIter->type & ITER_BVEC) {
276 while (cbToCopy > 0) {
277 size_t const offPage = (uintptr_t)pbDst & PAGE_OFFSET_MASK;
278 size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
279 struct page *pPage = rtR0MemObjLinuxVirtToPage(pbDst);
280 size_t cbCopied = copy_page_from_iter(pPage, offPage, cbThisCopy, pSrcIter);
281 AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
282 pbDst += cbCopied;
283 cbToCopy -= cbCopied;
284 if (cbCopied != cbToCopy)
285 break;
286 }
287 } else
288# endif
289 {
290 while (cbToCopy > 0) {
291 size_t cbThisCopy = iov_iter_single_seg_count(pSrcIter);
292 if (cbThisCopy > 0) {
293 if (cbThisCopy > cbToCopy)
294 cbThisCopy = cbToCopy;
295 if (pSrcIter->type & ITER_KVEC)
296 memcpy(pbDst, (void *)pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy);
297 else if (!copy_from_user(pbDst, pSrcIter->iov->iov_base + pSrcIter->iov_offset, cbThisCopy))
298 break;
299 pbDst += cbThisCopy;
300 cbToCopy -= cbThisCopy;
301 }
302 iov_iter_advance(pSrcIter, cbThisCopy);
303 }
304 }
305 return cbTotal - cbToCopy;
306}
307
308
309static size_t copy_to_iter(uint8_t const *pbSrc, size_t cbToCopy, struct iov_iter *pDstIter)
310{
311 size_t const cbTotal = cbToCopy;
312 Assert(iov_iter_count(pDstIter) >= cbToCopy);
313# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
314 if (pDstIter->type & ITER_BVEC) {
315 while (cbToCopy > 0) {
316 size_t const offPage = (uintptr_t)pbSrc & PAGE_OFFSET_MASK;
317 size_t const cbThisCopy = RT_MIN(PAGE_SIZE - offPage, cbToCopy);
318 struct page *pPage = rtR0MemObjLinuxVirtToPage((void *)pbSrc);
319 size_t cbCopied = copy_page_to_iter(pPage, offPage, cbThisCopy, pDstIter);
320 AssertStmt(cbCopied <= cbThisCopy, cbCopied = cbThisCopy);
321 pbSrc += cbCopied;
322 cbToCopy -= cbCopied;
323 if (cbCopied != cbToCopy)
324 break;
325 }
326 } else
327# endif
328 {
329 while (cbToCopy > 0) {
330 size_t cbThisCopy = iov_iter_single_seg_count(pDstIter);
331 if (cbThisCopy > 0) {
332 if (cbThisCopy > cbToCopy)
333 cbThisCopy = cbToCopy;
334 if (pDstIter->type & ITER_KVEC)
335 memcpy((void *)pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy);
336 else if (!copy_to_user(pDstIter->iov->iov_base + pDstIter->iov_offset, pbSrc, cbThisCopy)) {
337 break;
338 }
339 pbSrc += cbThisCopy;
340 cbToCopy -= cbThisCopy;
341 }
342 iov_iter_advance(pDstIter, cbThisCopy);
343 }
344 }
345 return cbTotal - cbToCopy;
346}
347
348#endif /* 3.16.0 <= linux < 3.18.0 */
349
350
351
352/*********************************************************************************************************************************
353* Handle management *
354*********************************************************************************************************************************/
355
356/**
357 * Called when an inode is released to unlink all handles that might impossibly
358 * still be associated with it.
359 *
360 * @param pInodeInfo The inode which handles to drop.
361 */
362void vbsf_handle_drop_chain(struct vbsf_inode_info *pInodeInfo)
363{
364 struct vbsf_handle *pCur, *pNext;
365 unsigned long fSavedFlags;
366 SFLOGFLOW(("vbsf_handle_drop_chain: %p\n", pInodeInfo));
367 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
368
369 RTListForEachSafe(&pInodeInfo->HandleList, pCur, pNext, struct vbsf_handle, Entry) {
370 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
371 == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
372 pCur->fFlags |= VBSF_HANDLE_F_ON_LIST;
373 RTListNodeRemove(&pCur->Entry);
374 }
375
376 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
377}
378
379
380/**
381 * Locates a handle that matches all the flags in @a fFlags.
382 *
383 * @returns Pointer to handle on success (retained), use vbsf_handle_release() to
384 * release it. NULL if no suitable handle was found.
385 * @param pInodeInfo The inode info to search.
386 * @param fFlagsSet The flags that must be set.
387 * @param fFlagsClear The flags that must be clear.
388 */
389struct vbsf_handle *vbsf_handle_find(struct vbsf_inode_info *pInodeInfo, uint32_t fFlagsSet, uint32_t fFlagsClear)
390{
391 struct vbsf_handle *pCur;
392 unsigned long fSavedFlags;
393 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
394
395 RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
396 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
397 == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
398 if ((pCur->fFlags & (fFlagsSet | fFlagsClear)) == fFlagsSet) {
399 uint32_t cRefs = ASMAtomicIncU32(&pCur->cRefs);
400 if (cRefs > 1) {
401 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
402 SFLOGFLOW(("vbsf_handle_find: returns %p\n", pCur));
403 return pCur;
404 }
405 /* Oops, already being closed (safe as it's only ever increased here). */
406 ASMAtomicDecU32(&pCur->cRefs);
407 }
408 }
409
410 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
411 SFLOGFLOW(("vbsf_handle_find: returns NULL!\n"));
412 return NULL;
413}
414
415
416/**
417 * Slow worker for vbsf_handle_release() that does the freeing.
418 *
419 * @returns 0 (ref count).
420 * @param pHandle The handle to release.
421 * @param sf_g The info structure for the shared folder associated
422 * with the handle.
423 * @param pszCaller The caller name (for logging failures).
424 */
425uint32_t vbsf_handle_release_slow(struct vbsf_handle *pHandle, struct vbsf_super_info *sf_g, const char *pszCaller)
426{
427 int rc;
428 unsigned long fSavedFlags;
429
430 SFLOGFLOW(("vbsf_handle_release_slow: %p (%s)\n", pHandle, pszCaller));
431
432 /*
433 * Remove from the list.
434 */
435 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
436
437 AssertMsg((pHandle->fFlags & VBSF_HANDLE_F_MAGIC_MASK) == VBSF_HANDLE_F_MAGIC, ("%p %#x\n", pHandle, pHandle->fFlags));
438 Assert(pHandle->pInodeInfo);
439 Assert(pHandle->pInodeInfo && pHandle->pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
440
441 if (pHandle->fFlags & VBSF_HANDLE_F_ON_LIST) {
442 pHandle->fFlags &= ~VBSF_HANDLE_F_ON_LIST;
443 RTListNodeRemove(&pHandle->Entry);
444 }
445
446 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
447
448 /*
449 * Actually destroy it.
450 */
451 rc = VbglR0SfHostReqCloseSimple(sf_g->map.root, pHandle->hHost);
452 if (RT_FAILURE(rc))
453 LogFunc(("Caller %s: VbglR0SfHostReqCloseSimple %#RX64 failed with rc=%Rrc\n", pszCaller, pHandle->hHost, rc));
454 pHandle->hHost = SHFL_HANDLE_NIL;
455 pHandle->fFlags = VBSF_HANDLE_F_MAGIC_DEAD;
456 kfree(pHandle);
457 return 0;
458}
459
460
461/**
462 * Appends a handle to a handle list.
463 *
464 * @param pInodeInfo The inode to add it to.
465 * @param pHandle The handle to add.
466 */
467void vbsf_handle_append(struct vbsf_inode_info *pInodeInfo, struct vbsf_handle *pHandle)
468{
469#ifdef VBOX_STRICT
470 struct vbsf_handle *pCur;
471#endif
472 unsigned long fSavedFlags;
473
474 SFLOGFLOW(("vbsf_handle_append: %p (to %p)\n", pHandle, pInodeInfo));
475 AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
476 ("%p %#x\n", pHandle, pHandle->fFlags));
477 Assert(pInodeInfo->u32Magic == SF_INODE_INFO_MAGIC);
478
479 spin_lock_irqsave(&g_SfHandleLock, fSavedFlags);
480
481 AssertMsg((pHandle->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST)) == VBSF_HANDLE_F_MAGIC,
482 ("%p %#x\n", pHandle, pHandle->fFlags));
483#ifdef VBOX_STRICT
484 RTListForEach(&pInodeInfo->HandleList, pCur, struct vbsf_handle, Entry) {
485 Assert(pCur != pHandle);
486 AssertMsg( (pCur->fFlags & (VBSF_HANDLE_F_MAGIC_MASK | VBSF_HANDLE_F_ON_LIST))
487 == (VBSF_HANDLE_F_MAGIC | VBSF_HANDLE_F_ON_LIST), ("%p %#x\n", pCur, pCur->fFlags));
488 }
489 pHandle->pInodeInfo = pInodeInfo;
490#endif
491
492 pHandle->fFlags |= VBSF_HANDLE_F_ON_LIST;
493 RTListAppend(&pInodeInfo->HandleList, &pHandle->Entry);
494
495 spin_unlock_irqrestore(&g_SfHandleLock, fSavedFlags);
496}
497
498
499
500/*********************************************************************************************************************************
501* Pipe / splice stuff for 2.6.23 >= linux < 2.6.31 (figure out why we need this) *
502*********************************************************************************************************************************/
503
504#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) \
505 && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
506
507/*
508 * Some pipe stuff we apparently need for 2.6.23-2.6.30.
509 */
510
511static void vbsf_free_pipebuf(struct page *kpage)
512{
513 kunmap(kpage);
514 __free_pages(kpage, 0);
515}
516
517static void *vbsf_pipe_buf_map(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf, int atomic)
518{
519 return 0;
520}
521
522static void vbsf_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
523{
524}
525
526static void vbsf_pipe_buf_unmap(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf, void *map_data)
527{
528}
529
530static int vbsf_pipe_buf_steal(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
531{
532 return 0;
533}
534
535static void vbsf_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf)
536{
537 vbsf_free_pipebuf(pipe_buf->page);
538}
539
540static int vbsf_pipe_buf_confirm(struct pipe_inode_info *info, struct pipe_buffer *pipe_buf)
541{
542 return 0;
543}
544
545static struct pipe_buf_operations vbsf_pipe_buf_ops = {
546 .can_merge = 0,
547 .map = vbsf_pipe_buf_map,
548 .unmap = vbsf_pipe_buf_unmap,
549 .confirm = vbsf_pipe_buf_confirm,
550 .release = vbsf_pipe_buf_release,
551 .steal = vbsf_pipe_buf_steal,
552 .get = vbsf_pipe_buf_get,
553};
554
555static int vbsf_reg_read_aux(const char *caller, struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r,
556 void *buf, uint32_t *nread, uint64_t pos)
557{
558 int rc = VbglR0SfRead(&g_SfClient, &sf_g->map, sf_r->Handle.hHost, pos, nread, buf, false /* already locked? */ );
559 if (RT_FAILURE(rc)) {
560 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller,
561 rc));
562 return -EPROTO;
563 }
564 return 0;
565}
566
567# define LOCK_PIPE(pipe) do { if (pipe->inode) mutex_lock(&pipe->inode->i_mutex); } while (0)
568# define UNLOCK_PIPE(pipe) do { if (pipe->inode) mutex_unlock(&pipe->inode->i_mutex); } while (0)
569
570ssize_t vbsf_splice_read(struct file *in, loff_t * poffset, struct pipe_inode_info *pipe, size_t len, unsigned int flags)
571{
572 size_t bytes_remaining = len;
573 loff_t orig_offset = *poffset;
574 loff_t offset = orig_offset;
575 struct inode *inode = VBSF_GET_F_DENTRY(in)->d_inode;
576 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
577 struct vbsf_reg_info *sf_r = in->private_data;
578 ssize_t retval;
579 struct page *kpage = 0;
580 size_t nsent = 0;
581
582/** @todo rig up a FsPerf test for this code */
583 TRACE();
584 if (!S_ISREG(inode->i_mode)) {
585 LogFunc(("read from non regular file %d\n", inode->i_mode));
586 return -EINVAL;
587 }
588 if (!len) {
589 return 0;
590 }
591
592 LOCK_PIPE(pipe);
593
594 uint32_t req_size = 0;
595 while (bytes_remaining > 0) {
596 kpage = alloc_page(GFP_KERNEL);
597 if (unlikely(kpage == NULL)) {
598 UNLOCK_PIPE(pipe);
599 return -ENOMEM;
600 }
601 req_size = 0;
602 uint32_t nread = req_size = (uint32_t) min(bytes_remaining, (size_t) PAGE_SIZE);
603 uint32_t chunk = 0;
604 void *kbuf = kmap(kpage);
605 while (chunk < req_size) {
606 retval = vbsf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk, &nread, offset);
607 if (retval < 0)
608 goto err;
609 if (nread == 0)
610 break;
611 chunk += nread;
612 offset += nread;
613 nread = req_size - chunk;
614 }
615 if (!pipe->readers) {
616 send_sig(SIGPIPE, current, 0);
617 retval = -EPIPE;
618 goto err;
619 }
620 if (pipe->nrbufs < PIPE_BUFFERS) {
621 struct pipe_buffer *pipebuf = pipe->bufs + ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1));
622 pipebuf->page = kpage;
623 pipebuf->ops = &vbsf_pipe_buf_ops;
624 pipebuf->len = req_size;
625 pipebuf->offset = 0;
626 pipebuf->private = 0;
627 pipebuf->flags = 0;
628 pipe->nrbufs++;
629 nsent += req_size;
630 bytes_remaining -= req_size;
631 if (signal_pending(current))
632 break;
633 } else { /* pipe full */
634
635 if (flags & SPLICE_F_NONBLOCK) {
636 retval = -EAGAIN;
637 goto err;
638 }
639 vbsf_free_pipebuf(kpage);
640 break;
641 }
642 }
643 UNLOCK_PIPE(pipe);
644 if (!nsent && signal_pending(current))
645 return -ERESTARTSYS;
646 *poffset += nsent;
647 return offset - orig_offset;
648
649 err:
650 UNLOCK_PIPE(pipe);
651 vbsf_free_pipebuf(kpage);
652 return retval;
653}
654
655#endif /* 2.6.23 <= LINUX_VERSION_CODE < 2.6.31 */
656
657
658/*********************************************************************************************************************************
659* File operations on regular files *
660*********************************************************************************************************************************/
661
662/**
663 * Helper for deciding wheter we should do a read via the page cache or not.
664 *
665 * By default we will only use the page cache if there is a writable memory
666 * mapping of the file with a chance that it may have modified any of the pages
667 * already.
668 */
669DECLINLINE(bool) vbsf_should_use_cached_read(struct file *file, struct address_space *mapping, struct vbsf_super_info *sf_g)
670{
671 return mapping
672 && mapping->nrpages > 0
673 && mapping_writably_mapped(mapping)
674 && !(file->f_flags & O_DIRECT)
675 && 1 /** @todo make this behaviour configurable at mount time (sf_g) */;
676}
677
678/** Wrapper around put_page / page_cache_release. */
679DECLINLINE(void) vbsf_put_page(struct page *pPage)
680{
681#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
682 put_page(pPage);
683#else
684 page_cache_release(pPage);
685#endif
686}
687
688
689/** Wrapper around get_page / page_cache_get. */
690DECLINLINE(void) vbsf_get_page(struct page *pPage)
691{
692#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
693 get_page(pPage);
694#else
695 page_cache_get(pPage);
696#endif
697}
698
699
700/** Companion to vbsf_lock_user_pages(). */
701DECLINLINE(void) vbsf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty, bool fLockPgHack)
702{
703 /* We don't mark kernel pages dirty: */
704 if (fLockPgHack)
705 fSetDirty = false;
706
707 while (cPages-- > 0)
708 {
709 struct page *pPage = papPages[cPages];
710 if (fSetDirty && !PageReserved(pPage))
711 SetPageDirty(pPage);
712 vbsf_put_page(pPage);
713 }
714}
715
716
717/**
718 * Worker for vbsf_lock_user_pages_failed_check_kernel() and
719 * vbsf_iter_lock_pages().
720 */
721static int vbsf_lock_kernel_pages(uint8_t *pbStart, bool fWrite, size_t cPages, struct page **papPages)
722{
723 uintptr_t const uPtrFrom = (uintptr_t)pbStart;
724 uintptr_t const uPtrLast = (uPtrFrom & ~(uintptr_t)PAGE_OFFSET_MASK) + (cPages << PAGE_SHIFT) - 1;
725 uint8_t *pbPage = (uint8_t *)uPtrLast;
726 size_t iPage = cPages;
727
728 /*
729 * Touch the pages first (paranoia^2).
730 */
731 if (fWrite) {
732 uint8_t volatile *pbProbe = (uint8_t volatile *)uPtrFrom;
733 while (iPage-- > 0) {
734 *pbProbe = *pbProbe;
735 pbProbe += PAGE_SIZE;
736 }
737 } else {
738 uint8_t const *pbProbe = (uint8_t const *)uPtrFrom;
739 while (iPage-- > 0) {
740 ASMProbeReadByte(pbProbe);
741 pbProbe += PAGE_SIZE;
742 }
743 }
744
745 /*
746 * Get the pages.
747 * Note! Fixes here probably applies to rtR0MemObjNativeLockKernel as well.
748 */
749 iPage = cPages;
750 if ( uPtrFrom >= (unsigned long)__va(0)
751 && uPtrLast < (unsigned long)high_memory) {
752 /* The physical page mapping area: */
753 while (iPage-- > 0) {
754 struct page *pPage = papPages[iPage] = virt_to_page(pbPage);
755 vbsf_get_page(pPage);
756 pbPage -= PAGE_SIZE;
757 }
758 } else {
759 /* This is vmalloc or some such thing, so go thru page tables: */
760 while (iPage-- > 0) {
761 struct page *pPage = rtR0MemObjLinuxVirtToPage(pbPage);
762 if (pPage) {
763 papPages[iPage] = pPage;
764 vbsf_get_page(pPage);
765 pbPage -= PAGE_SIZE;
766 } else {
767 while (++iPage < cPages) {
768 pPage = papPages[iPage];
769 vbsf_put_page(pPage);
770 }
771 return -EFAULT;
772 }
773 }
774 }
775 return 0;
776}
777
778
779/**
780 * Catches kernel_read() and kernel_write() calls and works around them.
781 *
782 * The file_operations::read and file_operations::write callbacks supposedly
783 * hands us the user buffers to read into and write out of. To allow the kernel
784 * to read and write without allocating buffers in userland, they kernel_read()
785 * and kernel_write() increases the user space address limit before calling us
786 * so that copyin/copyout won't reject it. Our problem is that get_user_pages()
787 * works on the userspace address space structures and will not be fooled by an
788 * increased addr_limit.
789 *
790 * This code tries to detect this situation and fake get_user_lock() for the
791 * kernel buffer.
792 */
793static int vbsf_lock_user_pages_failed_check_kernel(uintptr_t uPtrFrom, size_t cPages, bool fWrite, int rcFailed,
794 struct page **papPages, bool *pfLockPgHack)
795{
796 /*
797 * Check that this is valid user memory that is actually in the kernel range.
798 */
799#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
800 if ( access_ok((void *)uPtrFrom, cPages << PAGE_SHIFT)
801 && uPtrFrom >= USER_DS.seg)
802#else
803 if ( access_ok(fWrite ? VERIFY_WRITE : VERIFY_READ, (void *)uPtrFrom, cPages << PAGE_SHIFT)
804 && uPtrFrom >= USER_DS.seg)
805#endif
806 {
807 int rc = vbsf_lock_kernel_pages((uint8_t *)uPtrFrom, fWrite, cPages, papPages);
808 if (rc == 0) {
809 *pfLockPgHack = true;
810 return 0;
811 }
812 }
813
814 return rcFailed;
815}
816
817
818/** Wrapper around get_user_pages. */
819DECLINLINE(int) vbsf_lock_user_pages(uintptr_t uPtrFrom, size_t cPages, bool fWrite, struct page **papPages, bool *pfLockPgHack)
820{
821# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0)
822 ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, papPages,
823 fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE);
824# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
825 ssize_t cPagesLocked = get_user_pages_unlocked(uPtrFrom, cPages, fWrite, 1 /*force*/, papPages);
826# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)
827 ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages);
828# else
829 struct task_struct *pTask = current;
830 size_t cPagesLocked;
831 down_read(&pTask->mm->mmap_sem);
832 cPagesLocked = get_user_pages(pTask, pTask->mm, uPtrFrom, cPages, fWrite, 1 /*force*/, papPages, NULL);
833 up_read(&pTask->mm->mmap_sem);
834# endif
835 *pfLockPgHack = false;
836 if (cPagesLocked == cPages)
837 return 0;
838
839 /*
840 * It failed.
841 */
842 if (cPagesLocked < 0)
843 return vbsf_lock_user_pages_failed_check_kernel(uPtrFrom, cPages, fWrite, (int)cPagesLocked, papPages, pfLockPgHack);
844
845 vbsf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/, false /*fLockPgHack*/);
846
847 /* We could use uPtrFrom + cPagesLocked to get the correct status here... */
848 return -EFAULT;
849}
850
851
852/**
853 * Read function used when accessing files that are memory mapped.
854 *
855 * We read from the page cache here to present the a cohertent picture of the
856 * the file content.
857 */
858static ssize_t vbsf_reg_read_mapped(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
859{
860#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
861 struct iovec iov = { .iov_base = buf, .iov_len = size };
862 struct iov_iter iter;
863 struct kiocb kiocb;
864 ssize_t cbRet;
865
866 init_sync_kiocb(&kiocb, file);
867 kiocb.ki_pos = *off;
868 iov_iter_init(&iter, READ, &iov, 1, size);
869
870 cbRet = generic_file_read_iter(&kiocb, &iter);
871
872 *off = kiocb.ki_pos;
873 return cbRet;
874
875#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
876 struct iovec iov = { .iov_base = buf, .iov_len = size };
877 struct kiocb kiocb;
878 ssize_t cbRet;
879
880 init_sync_kiocb(&kiocb, file);
881 kiocb.ki_pos = *off;
882
883 cbRet = generic_file_aio_read(&kiocb, &iov, 1, *off);
884 if (cbRet == -EIOCBQUEUED)
885 cbRet = wait_on_sync_kiocb(&kiocb);
886
887 *off = kiocb.ki_pos;
888 return cbRet;
889
890#else /* 2.6.18 or earlier: */
891 return generic_file_read(file, buf, size, off);
892#endif
893}
894
895
896/**
897 * Fallback case of vbsf_reg_read() that locks the user buffers and let the host
898 * write directly to them.
899 */
900static ssize_t vbsf_reg_read_locking(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off,
901 struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r)
902{
903 /*
904 * Lock pages and execute the read, taking care not to pass the host
905 * more than it can handle in one go or more than we care to allocate
906 * page arrays for. The latter limit is set at just short of 32KB due
907 * to how the physical heap works.
908 */
909 struct page *apPagesStack[16];
910 struct page **papPages = &apPagesStack[0];
911 struct page **papPagesFree = NULL;
912 VBOXSFREADPGLSTREQ *pReq;
913 loff_t offFile = *off;
914 ssize_t cbRet = -ENOMEM;
915 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
916 size_t cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 1), cPages);
917 bool fLockPgHack;
918
919 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
920 while (!pReq && cMaxPages > 4) {
921 cMaxPages /= 2;
922 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
923 }
924 if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
925 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
926 if (pReq && papPages) {
927 cbRet = 0;
928 for (;;) {
929 /*
930 * Figure out how much to process now and lock the user pages.
931 */
932 int rc;
933 size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK;
934 pReq->PgLst.offFirstPage = (uint16_t)cbChunk;
935 cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT;
936 if (cPages <= cMaxPages)
937 cbChunk = size;
938 else {
939 cPages = cMaxPages;
940 cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
941 }
942
943 rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, true /*fWrite*/, papPages, &fLockPgHack);
944 if (rc == 0) {
945 size_t iPage = cPages;
946 while (iPage-- > 0)
947 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
948 } else {
949 cbRet = rc;
950 break;
951 }
952
953 /*
954 * Issue the request and unlock the pages.
955 */
956 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
957
958 vbsf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/, fLockPgHack);
959
960 if (RT_SUCCESS(rc)) {
961 /*
962 * Success, advance position and buffer.
963 */
964 uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
965 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
966 cbRet += cbActual;
967 offFile += cbActual;
968 buf = (uint8_t *)buf + cbActual;
969 size -= cbActual;
970
971 /*
972 * Are we done already? If so commit the new file offset.
973 */
974 if (!size || cbActual < cbChunk) {
975 *off = offFile;
976 break;
977 }
978 } else if (rc == VERR_NO_MEMORY && cMaxPages > 4) {
979 /*
980 * The host probably doesn't have enough heap to handle the
981 * request, reduce the page count and retry.
982 */
983 cMaxPages /= 4;
984 Assert(cMaxPages > 0);
985 } else {
986 /*
987 * If we've successfully read stuff, return it rather than
988 * the error. (Not sure if this is such a great idea...)
989 */
990 if (cbRet > 0)
991 *off = offFile;
992 else
993 cbRet = -EPROTO;
994 break;
995 }
996 }
997 }
998 if (papPagesFree)
999 kfree(papPages);
1000 if (pReq)
1001 VbglR0PhysHeapFree(pReq);
1002 return cbRet;
1003}
1004
1005
1006/**
1007 * Read from a regular file.
1008 *
1009 * @param file the file
1010 * @param buf the buffer
1011 * @param size length of the buffer
1012 * @param off offset within the file (in/out).
1013 * @returns the number of read bytes on success, Linux error code otherwise
1014 */
1015static ssize_t vbsf_reg_read(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off)
1016{
1017 struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
1018 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
1019 struct vbsf_reg_info *sf_r = file->private_data;
1020 struct address_space *mapping = inode->i_mapping;
1021
1022 SFLOGFLOW(("vbsf_reg_read: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
1023
1024 if (!S_ISREG(inode->i_mode)) {
1025 LogFunc(("read from non regular file %d\n", inode->i_mode));
1026 return -EINVAL;
1027 }
1028
1029 /** @todo XXX Check read permission according to inode->i_mode! */
1030
1031 if (!size)
1032 return 0;
1033
1034 /*
1035 * If there is a mapping and O_DIRECT isn't in effect, we must at a
1036 * heed dirty pages in the mapping and read from them. For simplicity
1037 * though, we just do page cache reading when there are writable
1038 * mappings around with any kind of pages loaded.
1039 */
1040 if (vbsf_should_use_cached_read(file, mapping, sf_g))
1041 return vbsf_reg_read_mapped(file, buf, size, off);
1042
1043 /*
1044 * For small requests, try use an embedded buffer provided we get a heap block
1045 * that does not cross page boundraries (see host code).
1046 */
1047 if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) {
1048 uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + size;
1049 VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
1050 if (pReq) {
1051 if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
1052 ssize_t cbRet;
1053 int vrc = VbglR0SfHostReqReadEmbedded(sf_g->map.root, pReq, sf_r->Handle.hHost, *off, (uint32_t)size);
1054 if (RT_SUCCESS(vrc)) {
1055 cbRet = pReq->Parms.cb32Read.u.value32;
1056 AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
1057 if (copy_to_user(buf, pReq->abData, cbRet) == 0)
1058 *off += cbRet;
1059 else
1060 cbRet = -EFAULT;
1061 } else
1062 cbRet = -EPROTO;
1063 VbglR0PhysHeapFree(pReq);
1064 return cbRet;
1065 }
1066 VbglR0PhysHeapFree(pReq);
1067 }
1068 }
1069
1070#if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
1071 /*
1072 * For medium sized requests try use a bounce buffer.
1073 */
1074 if (size <= _64K /** @todo make this configurable? */) {
1075 void *pvBounce = kmalloc(size, GFP_KERNEL);
1076 if (pvBounce) {
1077 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
1078 if (pReq) {
1079 ssize_t cbRet;
1080 int vrc = VbglR0SfHostReqReadContig(sf_g->map.root, pReq, sf_r->Handle.hHost, *off,
1081 (uint32_t)size, pvBounce, virt_to_phys(pvBounce));
1082 if (RT_SUCCESS(vrc)) {
1083 cbRet = pReq->Parms.cb32Read.u.value32;
1084 AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
1085 if (copy_to_user(buf, pvBounce, cbRet) == 0)
1086 *off += cbRet;
1087 else
1088 cbRet = -EFAULT;
1089 } else
1090 cbRet = -EPROTO;
1091 VbglR0PhysHeapFree(pReq);
1092 kfree(pvBounce);
1093 return cbRet;
1094 }
1095 kfree(pvBounce);
1096 }
1097 }
1098#endif
1099
1100 return vbsf_reg_read_locking(file, buf, size, off, sf_g, sf_r);
1101}
1102
1103
1104/**
1105 * Helper the synchronizes the page cache content with something we just wrote
1106 * to the host.
1107 */
1108void vbsf_reg_write_sync_page_cache(struct address_space *mapping, loff_t offFile, uint32_t cbRange,
1109 uint8_t const *pbSrcBuf, struct page **papSrcPages, uint32_t offSrcPage, size_t cSrcPages)
1110{
1111 Assert(offSrcPage < PAGE_SIZE);
1112 if (mapping && mapping->nrpages > 0) {
1113 /*
1114 * Work the pages in the write range.
1115 */
1116 while (cbRange > 0) {
1117 /*
1118 * Lookup the page at offFile. We're fine if there aren't
1119 * any there. We're skip if it's dirty or is being written
1120 * back, at least for now.
1121 */
1122 size_t const offDstPage = offFile & PAGE_OFFSET_MASK;
1123 size_t const cbToCopy = RT_MIN(PAGE_SIZE - offDstPage, cbRange);
1124 pgoff_t const idxPage = offFile >> PAGE_SHIFT;
1125 struct page *pDstPage = find_lock_page(mapping, idxPage);
1126 if (pDstPage) {
1127 if ( pDstPage->mapping == mapping /* ignore if re-purposed (paranoia) */
1128 && pDstPage->index == idxPage
1129 && !PageDirty(pDstPage) /* ignore if dirty */
1130 && !PageWriteback(pDstPage) /* ignore if being written back */ ) {
1131 /*
1132 * Map the page and do the copying.
1133 */
1134 uint8_t *pbDst = (uint8_t *)kmap(pDstPage);
1135 if (pbSrcBuf)
1136 memcpy(&pbDst[offDstPage], pbSrcBuf, cbToCopy);
1137 else {
1138 uint32_t const cbSrc0 = PAGE_SIZE - offSrcPage;
1139 uint8_t const *pbSrc = (uint8_t const *)kmap(papSrcPages[0]);
1140 AssertMsg(cSrcPages >= 1, ("offFile=%#llx cbRange=%#zx cbToCopy=%#zx\n", offFile, cbRange, cbToCopy));
1141 memcpy(&pbDst[offDstPage], &pbSrc[offSrcPage], RT_MIN(cbToCopy, cbSrc0));
1142 kunmap(papSrcPages[0]);
1143 if (cbToCopy > cbSrc0) {
1144 AssertMsg(cSrcPages >= 2, ("offFile=%#llx cbRange=%#zx cbToCopy=%#zx\n", offFile, cbRange, cbToCopy));
1145 pbSrc = (uint8_t const *)kmap(papSrcPages[1]);
1146 memcpy(&pbDst[offDstPage + cbSrc0], pbSrc, cbToCopy - cbSrc0);
1147 kunmap(papSrcPages[1]);
1148 }
1149 }
1150 kunmap(pDstPage);
1151 flush_dcache_page(pDstPage);
1152 if (cbToCopy == PAGE_SIZE)
1153 SetPageUptodate(pDstPage);
1154# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
1155 mark_page_accessed(pDstPage);
1156# endif
1157 } else
1158 SFLOGFLOW(("vbsf_reg_write_sync_page_cache: Skipping page %p: mapping=%p (vs %p) writeback=%d offset=%#lx (vs%#lx)\n",
1159 pDstPage, pDstPage->mapping, mapping, PageWriteback(pDstPage), pDstPage->index, idxPage));
1160 unlock_page(pDstPage);
1161 vbsf_put_page(pDstPage);
1162 }
1163
1164 /*
1165 * Advance.
1166 */
1167 if (pbSrcBuf)
1168 pbSrcBuf += cbToCopy;
1169 else
1170 {
1171 offSrcPage += cbToCopy;
1172 Assert(offSrcPage < PAGE_SIZE * 2);
1173 if (offSrcPage >= PAGE_SIZE) {
1174 offSrcPage &= PAGE_OFFSET_MASK;
1175 papSrcPages++;
1176# ifdef VBOX_STRICT
1177 Assert(cSrcPages > 0);
1178 cSrcPages--;
1179# endif
1180 }
1181 }
1182 offFile += cbToCopy;
1183 cbRange -= cbToCopy;
1184 }
1185 }
1186 RT_NOREF(cSrcPages);
1187}
1188
1189
1190/**
1191 * Fallback case of vbsf_reg_write() that locks the user buffers and let the host
1192 * write directly to them.
1193 */
1194static ssize_t vbsf_reg_write_locking(struct file *file, const char /*__user*/ *buf, size_t size, loff_t *off, loff_t offFile,
1195 struct inode *inode, struct vbsf_inode_info *sf_i,
1196 struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r)
1197{
1198 /*
1199 * Lock pages and execute the write, taking care not to pass the host
1200 * more than it can handle in one go or more than we care to allocate
1201 * page arrays for. The latter limit is set at just short of 32KB due
1202 * to how the physical heap works.
1203 */
1204 struct page *apPagesStack[16];
1205 struct page **papPages = &apPagesStack[0];
1206 struct page **papPagesFree = NULL;
1207 VBOXSFWRITEPGLSTREQ *pReq;
1208 ssize_t cbRet = -ENOMEM;
1209 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT;
1210 size_t cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 1), cPages);
1211 bool fLockPgHack;
1212
1213 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
1214 while (!pReq && cMaxPages > 4) {
1215 cMaxPages /= 2;
1216 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
1217 }
1218 if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
1219 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
1220 if (pReq && papPages) {
1221 cbRet = 0;
1222 for (;;) {
1223 /*
1224 * Figure out how much to process now and lock the user pages.
1225 */
1226 int rc;
1227 size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK;
1228 pReq->PgLst.offFirstPage = (uint16_t)cbChunk;
1229 cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT;
1230 if (cPages <= cMaxPages)
1231 cbChunk = size;
1232 else {
1233 cPages = cMaxPages;
1234 cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk;
1235 }
1236
1237 rc = vbsf_lock_user_pages((uintptr_t)buf, cPages, false /*fWrite*/, papPages, &fLockPgHack);
1238 if (rc == 0) {
1239 size_t iPage = cPages;
1240 while (iPage-- > 0)
1241 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
1242 } else {
1243 cbRet = rc;
1244 break;
1245 }
1246
1247 /*
1248 * Issue the request and unlock the pages.
1249 */
1250 rc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
1251 if (RT_SUCCESS(rc)) {
1252 /*
1253 * Success, advance position and buffer.
1254 */
1255 uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
1256 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
1257
1258 vbsf_reg_write_sync_page_cache(inode->i_mapping, offFile, cbActual, NULL /*pbKrnlBuf*/,
1259 papPages, (uintptr_t)buf & PAGE_OFFSET_MASK, cPages);
1260 vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
1261
1262 cbRet += cbActual;
1263 offFile += cbActual;
1264 buf = (uint8_t *)buf + cbActual;
1265 size -= cbActual;
1266 if (offFile > i_size_read(inode))
1267 i_size_write(inode, offFile);
1268 sf_i->force_restat = 1; /* mtime (and size) may have changed */
1269
1270 /*
1271 * Are we done already? If so commit the new file offset.
1272 */
1273 if (!size || cbActual < cbChunk) {
1274 *off = offFile;
1275 break;
1276 }
1277 } else {
1278 vbsf_unlock_user_pages(papPages, cPages, false /*fSetDirty*/, fLockPgHack);
1279 if (rc == VERR_NO_MEMORY && cMaxPages > 4) {
1280 /*
1281 * The host probably doesn't have enough heap to handle the
1282 * request, reduce the page count and retry.
1283 */
1284 cMaxPages /= 4;
1285 Assert(cMaxPages > 0);
1286 } else {
1287 /*
1288 * If we've successfully written stuff, return it rather than
1289 * the error. (Not sure if this is such a great idea...)
1290 */
1291 if (cbRet > 0)
1292 *off = offFile;
1293 else
1294 cbRet = -EPROTO;
1295 break;
1296 }
1297 }
1298 }
1299 }
1300 if (papPagesFree)
1301 kfree(papPages);
1302 if (pReq)
1303 VbglR0PhysHeapFree(pReq);
1304 return cbRet;
1305}
1306
1307
1308/**
1309 * Write to a regular file.
1310 *
1311 * @param file the file
1312 * @param buf the buffer
1313 * @param size length of the buffer
1314 * @param off offset within the file
1315 * @returns the number of written bytes on success, Linux error code otherwise
1316 */
1317static ssize_t vbsf_reg_write(struct file *file, const char *buf, size_t size, loff_t * off)
1318{
1319 struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
1320 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
1321 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
1322 struct vbsf_reg_info *sf_r = file->private_data;
1323 struct address_space *mapping = inode->i_mapping;
1324 loff_t pos;
1325
1326 SFLOGFLOW(("vbsf_reg_write: inode=%p file=%p buf=%p size=%#zx off=%#llx\n", inode, file, buf, size, *off));
1327 BUG_ON(!sf_i);
1328 BUG_ON(!sf_g);
1329 BUG_ON(!sf_r);
1330 AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
1331
1332 pos = *off;
1333 /** @todo This should be handled by the host, it returning the new file
1334 * offset when appending. We may have an outdated i_size value here! */
1335 if (file->f_flags & O_APPEND)
1336 pos = i_size_read(inode);
1337
1338 /** @todo XXX Check write permission according to inode->i_mode! */
1339
1340 if (!size) {
1341 if (file->f_flags & O_APPEND) /** @todo check if this is the consensus behavior... */
1342 *off = pos;
1343 return 0;
1344 }
1345
1346 /*
1347 * If there are active writable mappings, coordinate with any
1348 * pending writes via those.
1349 */
1350 if ( mapping
1351 && mapping->nrpages > 0
1352 && mapping_writably_mapped(mapping)) {
1353#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
1354 int err = filemap_fdatawait_range(mapping, pos, pos + size - 1);
1355 if (err)
1356 return err;
1357#else
1358 /** @todo ... */
1359#endif
1360 }
1361
1362 /*
1363 * For small requests, try use an embedded buffer provided we get a heap block
1364 * that does not cross page boundraries (see host code).
1365 */
1366 if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) {
1367 uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + size;
1368 VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
1369 if ( pReq
1370 && (PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
1371 ssize_t cbRet;
1372 if (copy_from_user(pReq->abData, buf, size) == 0) {
1373 int vrc = VbglR0SfHostReqWriteEmbedded(sf_g->map.root, pReq, sf_r->Handle.hHost,
1374 pos, (uint32_t)size);
1375 if (RT_SUCCESS(vrc)) {
1376 cbRet = pReq->Parms.cb32Write.u.value32;
1377 AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
1378 vbsf_reg_write_sync_page_cache(mapping, pos, (uint32_t)cbRet, pReq->abData,
1379 NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
1380 pos += cbRet;
1381 *off = pos;
1382 if (pos > i_size_read(inode))
1383 i_size_write(inode, pos);
1384 } else
1385 cbRet = -EPROTO;
1386 sf_i->force_restat = 1; /* mtime (and size) may have changed */
1387 } else
1388 cbRet = -EFAULT;
1389
1390 VbglR0PhysHeapFree(pReq);
1391 return cbRet;
1392 }
1393 if (pReq)
1394 VbglR0PhysHeapFree(pReq);
1395 }
1396
1397#if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */
1398 /*
1399 * For medium sized requests try use a bounce buffer.
1400 */
1401 if (size <= _64K /** @todo make this configurable? */) {
1402 void *pvBounce = kmalloc(size, GFP_KERNEL);
1403 if (pvBounce) {
1404 if (copy_from_user(pvBounce, buf, size) == 0) {
1405 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
1406 if (pReq) {
1407 ssize_t cbRet;
1408 int vrc = VbglR0SfHostReqWriteContig(sf_g->map.root, pReq, sf_r->handle, pos,
1409 (uint32_t)size, pvBounce, virt_to_phys(pvBounce));
1410 if (RT_SUCCESS(vrc)) {
1411 cbRet = pReq->Parms.cb32Write.u.value32;
1412 AssertStmt(cbRet <= (ssize_t)size, cbRet = size);
1413 vbsf_reg_write_sync_page_cache(mapping, pos, (uint32_t)cbRet, (uint8_t const *)pvBounce,
1414 NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
1415 pos += cbRet;
1416 *off = pos;
1417 if (pos > i_size_read(inode))
1418 i_size_write(inode, pos);
1419 } else
1420 cbRet = -EPROTO;
1421 sf_i->force_restat = 1; /* mtime (and size) may have changed */
1422 VbglR0PhysHeapFree(pReq);
1423 kfree(pvBounce);
1424 return cbRet;
1425 }
1426 kfree(pvBounce);
1427 } else {
1428 kfree(pvBounce);
1429 return -EFAULT;
1430 }
1431 }
1432 }
1433#endif
1434
1435 return vbsf_reg_write_locking(file, buf, size, off, pos, inode, sf_i, sf_g, sf_r);
1436}
1437
1438#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
1439
1440/**
1441 * Companion to vbsf_iter_lock_pages().
1442 */
1443DECLINLINE(void) vbsf_iter_unlock_pages(struct iov_iter *iter, struct page **papPages, size_t cPages, bool fSetDirty)
1444{
1445 /* We don't mark kernel pages dirty: */
1446 if (iter->type & ITER_KVEC)
1447 fSetDirty = false;
1448
1449 while (cPages-- > 0)
1450 {
1451 struct page *pPage = papPages[cPages];
1452 if (fSetDirty && !PageReserved(pPage))
1453 SetPageDirty(pPage);
1454 vbsf_put_page(pPage);
1455 }
1456}
1457
1458
1459/**
1460 * Locks up to @a cMaxPages from the I/O vector iterator, advancing the
1461 * iterator.
1462 *
1463 * @returns 0 on success, negative errno value on failure.
1464 * @param iter The iterator to lock pages from.
1465 * @param fWrite Whether to write (true) or read (false) lock the pages.
1466 * @param pStash Where we stash peek results.
1467 * @param cMaxPages The maximum number of pages to get.
1468 * @param papPages Where to return the locked pages.
1469 * @param pcPages Where to return the number of pages.
1470 * @param poffPage0 Where to return the offset into the first page.
1471 * @param pcbChunk Where to return the number of bytes covered.
1472 */
1473static int vbsf_iter_lock_pages(struct iov_iter *iter, bool fWrite, struct vbsf_iter_stash *pStash, size_t cMaxPages,
1474 struct page **papPages, size_t *pcPages, size_t *poffPage0, size_t *pcbChunk)
1475{
1476 size_t cbChunk = 0;
1477 size_t cPages = 0;
1478 size_t offPage0 = 0;
1479 int rc = 0;
1480
1481 Assert(iov_iter_count(iter) + pStash->cb > 0);
1482 if (!(iter->type & ITER_KVEC)) {
1483 /*
1484 * Do we have a stashed page?
1485 */
1486 if (pStash->pPage) {
1487 papPages[0] = pStash->pPage;
1488 offPage0 = pStash->off;
1489 cbChunk = pStash->cb;
1490 cPages = 1;
1491 pStash->pPage = NULL;
1492 pStash->off = 0;
1493 pStash->cb = 0;
1494 if ( offPage0 + cbChunk < PAGE_SIZE
1495 || iov_iter_count(iter) == 0) {
1496 *poffPage0 = offPage0;
1497 *pcbChunk = cbChunk;
1498 *pcPages = cPages;
1499 SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx (stashed)\n",
1500 rc, cPages, offPage0, cbChunk));
1501 return 0;
1502 }
1503 cMaxPages -= 1;
1504 SFLOG3(("vbsf_iter_lock_pages: Picked up stashed page: %#zx LB %#zx\n", offPage0, cbChunk));
1505 } else {
1506# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
1507 /*
1508 * Copy out our starting point to assist rewinding.
1509 */
1510 pStash->offFromEnd = iov_iter_count(iter);
1511 pStash->Copy = *iter;
1512# endif
1513 }
1514
1515 /*
1516 * Get pages segment by segment.
1517 */
1518 do {
1519 /*
1520 * Make a special case of the first time thru here, since that's
1521 * the most typical scenario.
1522 */
1523 ssize_t cbSegRet;
1524 if (cPages == 0) {
1525# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
1526 while (!iov_iter_single_seg_count(iter)) /* Old code didn't skip empty segments which caused EFAULTs. */
1527 iov_iter_advance(iter, 0);
1528# endif
1529 cbSegRet = iov_iter_get_pages(iter, papPages, iov_iter_count(iter), cMaxPages, &offPage0);
1530 if (cbSegRet > 0) {
1531 iov_iter_advance(iter, cbSegRet);
1532 cbChunk = (size_t)cbSegRet;
1533 cPages = RT_ALIGN_Z(offPage0 + cbSegRet, PAGE_SIZE) >> PAGE_SHIFT;
1534 cMaxPages -= cPages;
1535 SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages -> %#zx @ %#zx; %#zx pages [first]\n", cbSegRet, offPage0, cPages));
1536 if ( cMaxPages == 0
1537 || ((offPage0 + (size_t)cbSegRet) & PAGE_OFFSET_MASK))
1538 break;
1539 } else {
1540 AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
1541 rc = (int)cbSegRet;
1542 break;
1543 }
1544 } else {
1545 /*
1546 * Probe first page of new segment to check that we've got a zero offset and
1547 * can continue on the current chunk. Stash the page if the offset isn't zero.
1548 */
1549 size_t offPgProbe;
1550 size_t cbSeg = iov_iter_single_seg_count(iter);
1551 while (!cbSeg) {
1552 iov_iter_advance(iter, 0);
1553 cbSeg = iov_iter_single_seg_count(iter);
1554 }
1555 cbSegRet = iov_iter_get_pages(iter, &papPages[cPages], iov_iter_count(iter), 1, &offPgProbe);
1556 if (cbSegRet > 0) {
1557 iov_iter_advance(iter, cbSegRet); /** @todo maybe not do this if we stash the page? */
1558 Assert(offPgProbe + cbSegRet <= PAGE_SIZE);
1559 if (offPgProbe == 0) {
1560 cbChunk += cbSegRet;
1561 cPages += 1;
1562 cMaxPages -= 1;
1563 SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages(1) -> %#zx @ %#zx\n", cbSegRet, offPgProbe));
1564 if ( cMaxPages == 0
1565 || cbSegRet != PAGE_SIZE)
1566 break;
1567
1568 /*
1569 * Get the rest of the segment (if anything remaining).
1570 */
1571 cbSeg -= cbSegRet;
1572 if (cbSeg > 0) {
1573 cbSegRet = iov_iter_get_pages(iter, &papPages[cPages], iov_iter_count(iter), cMaxPages, &offPgProbe);
1574 if (cbSegRet > 0) {
1575 size_t const cPgRet = RT_ALIGN_Z((size_t)cbSegRet, PAGE_SIZE) >> PAGE_SHIFT;
1576 Assert(offPgProbe == 0);
1577 iov_iter_advance(iter, cbSegRet);
1578 SFLOG3(("vbsf_iter_lock_pages: iov_iter_get_pages() -> %#zx; %#zx pages\n", cbSegRet, cPgRet));
1579 cPages += cPgRet;
1580 cMaxPages -= cPgRet;
1581 cbChunk += cbSegRet;
1582 if ( cMaxPages == 0
1583 || ((size_t)cbSegRet & PAGE_OFFSET_MASK))
1584 break;
1585 } else {
1586 AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
1587 rc = (int)cbSegRet;
1588 break;
1589 }
1590 }
1591 } else {
1592 /* The segment didn't start at a page boundrary, so stash it for
1593 the next round: */
1594 SFLOGFLOW(("vbsf_iter_lock_pages: iov_iter_get_pages(1) -> %#zx @ %#zx; stashed\n", cbSegRet, offPgProbe));
1595 Assert(papPages[cPages]);
1596 pStash->pPage = papPages[cPages];
1597 pStash->off = offPgProbe;
1598 pStash->cb = cbSegRet;
1599 break;
1600 }
1601 } else {
1602 AssertStmt(cbSegRet < 0, cbSegRet = -EFAULT);
1603 rc = (int)cbSegRet;
1604 break;
1605 }
1606 }
1607 Assert(cMaxPages > 0);
1608 } while (iov_iter_count(iter) > 0);
1609
1610 } else {
1611 /*
1612 * The silly iov_iter_get_pages_alloc() function doesn't handle KVECs,
1613 * so everyone needs to do that by themselves.
1614 *
1615 * Note! Fixes here may apply to rtR0MemObjNativeLockKernel()
1616 * and vbsf_lock_user_pages_failed_check_kernel() as well.
1617 */
1618# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
1619 pStash->offFromEnd = iov_iter_count(iter);
1620 pStash->Copy = *iter;
1621# endif
1622 do {
1623 uint8_t *pbBuf;
1624 size_t offStart;
1625 size_t cPgSeg;
1626
1627 size_t cbSeg = iov_iter_single_seg_count(iter);
1628 while (!cbSeg) {
1629 iov_iter_advance(iter, 0);
1630 cbSeg = iov_iter_single_seg_count(iter);
1631 }
1632
1633# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
1634 pbBuf = iter->kvec->iov_base + iter->iov_offset;
1635# else
1636 pbBuf = iter->iov->iov_base + iter->iov_offset;
1637# endif
1638 offStart = (uintptr_t)pbBuf & PAGE_OFFSET_MASK;
1639 if (!cPages)
1640 offPage0 = offStart;
1641 else if (offStart)
1642 break;
1643
1644 cPgSeg = RT_ALIGN_Z(cbSeg, PAGE_SIZE) >> PAGE_SHIFT;
1645 if (cPgSeg > cMaxPages) {
1646 cPgSeg = cMaxPages;
1647 cbSeg = (cPgSeg << PAGE_SHIFT) - offStart;
1648 }
1649
1650 rc = vbsf_lock_kernel_pages(pbBuf, fWrite, cPgSeg, &papPages[cPages]);
1651 if (rc == 0) {
1652 iov_iter_advance(iter, cbSeg);
1653 cbChunk += cbSeg;
1654 cPages += cPgSeg;
1655 cMaxPages -= cPgSeg;
1656 if ( cMaxPages == 0
1657 || ((offStart + cbSeg) & PAGE_OFFSET_MASK) != 0)
1658 break;
1659 } else
1660 break;
1661 } while (iov_iter_count(iter) > 0);
1662 }
1663
1664 /*
1665 * Clean up if we failed; set return values.
1666 */
1667 if (rc == 0) {
1668 /* likely */
1669 } else {
1670 if (cPages > 0)
1671 vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
1672 offPage0 = cbChunk = cPages = 0;
1673 }
1674 *poffPage0 = offPage0;
1675 *pcbChunk = cbChunk;
1676 *pcPages = cPages;
1677 SFLOGFLOW(("vbsf_iter_lock_pages: returns %d - cPages=%#zx offPage0=%#zx cbChunk=%zx\n", rc, cPages, offPage0, cbChunk));
1678 return rc;
1679}
1680
1681
1682/**
1683 * Rewinds the I/O vector.
1684 */
1685static bool vbsf_iter_rewind(struct iov_iter *iter, struct vbsf_iter_stash *pStash, size_t cbToRewind, size_t cbChunk)
1686{
1687 size_t cbExtra;
1688 if (!pStash->pPage) {
1689 cbExtra = 0;
1690 } else {
1691 cbExtra = pStash->cb;
1692 vbsf_put_page(pStash->pPage);
1693 pStash->pPage = NULL;
1694 pStash->cb = 0;
1695 pStash->off = 0;
1696 }
1697
1698# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) || LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
1699 iov_iter_revert(iter, cbToRewind + cbExtra);
1700 return true;
1701# else
1702 /** @todo impl this */
1703 return false;
1704# endif
1705}
1706
1707
1708/**
1709 * Cleans up the page locking stash.
1710 */
1711DECLINLINE(void) vbsf_iter_cleanup_stash(struct iov_iter *iter, struct vbsf_iter_stash *pStash)
1712{
1713 if (pStash->pPage)
1714 vbsf_iter_rewind(iter, pStash, 0, 0);
1715}
1716
1717
1718/**
1719 * Calculates the longest span of pages we could transfer to the host in a
1720 * single request.
1721 *
1722 * @returns Page count, non-zero.
1723 * @param iter The I/O vector iterator to inspect.
1724 */
1725static size_t vbsf_iter_max_span_of_pages(struct iov_iter *iter)
1726{
1727 size_t cPages;
1728# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1729 if (iter_is_iovec(iter) || (iter->type & ITER_KVEC)) {
1730#endif
1731 const struct iovec *pCurIov = iter->iov;
1732 size_t cLeft = iter->nr_segs;
1733 size_t cPagesSpan = 0;
1734
1735 /* iovect and kvec are identical, except for the __user tagging of iov_base. */
1736 AssertCompileMembersSameSizeAndOffset(struct iovec, iov_base, struct kvec, iov_base);
1737 AssertCompileMembersSameSizeAndOffset(struct iovec, iov_len, struct kvec, iov_len);
1738 AssertCompile(sizeof(struct iovec) == sizeof(struct kvec));
1739
1740 cPages = 1;
1741 AssertReturn(cLeft > 0, cPages);
1742
1743 /* Special case: segment offset. */
1744 if (iter->iov_offset > 0) {
1745 if (iter->iov_offset < pCurIov->iov_len) {
1746 size_t const cbSegLeft = pCurIov->iov_len - iter->iov_offset;
1747 size_t const offPage0 = ((uintptr_t)pCurIov->iov_base + iter->iov_offset) & PAGE_OFFSET_MASK;
1748 cPages = cPagesSpan = RT_ALIGN_Z(offPage0 + cbSegLeft, PAGE_SIZE) >> PAGE_SHIFT;
1749 if ((offPage0 + cbSegLeft) & PAGE_OFFSET_MASK)
1750 cPagesSpan = 0;
1751 }
1752 SFLOGFLOW(("vbsf_iter: seg[0]= %p LB %#zx\n", pCurIov->iov_base, pCurIov->iov_len));
1753 pCurIov++;
1754 cLeft--;
1755 }
1756
1757 /* Full segments. */
1758 while (cLeft-- > 0) {
1759 if (pCurIov->iov_len > 0) {
1760 size_t const offPage0 = (uintptr_t)pCurIov->iov_base & PAGE_OFFSET_MASK;
1761 if (offPage0 == 0) {
1762 if (!(pCurIov->iov_len & PAGE_OFFSET_MASK)) {
1763 cPagesSpan += pCurIov->iov_len >> PAGE_SHIFT;
1764 } else {
1765 cPagesSpan += RT_ALIGN_Z(pCurIov->iov_len, PAGE_SIZE) >> PAGE_SHIFT;
1766 if (cPagesSpan > cPages)
1767 cPages = cPagesSpan;
1768 cPagesSpan = 0;
1769 }
1770 } else {
1771 if (cPagesSpan > cPages)
1772 cPages = cPagesSpan;
1773 if (!((offPage0 + pCurIov->iov_len) & PAGE_OFFSET_MASK)) {
1774 cPagesSpan = pCurIov->iov_len >> PAGE_SHIFT;
1775 } else {
1776 cPagesSpan += RT_ALIGN_Z(offPage0 + pCurIov->iov_len, PAGE_SIZE) >> PAGE_SHIFT;
1777 if (cPagesSpan > cPages)
1778 cPages = cPagesSpan;
1779 cPagesSpan = 0;
1780 }
1781 }
1782 }
1783 SFLOGFLOW(("vbsf_iter: seg[%u]= %p LB %#zx\n", iter->nr_segs - cLeft, pCurIov->iov_base, pCurIov->iov_len));
1784 pCurIov++;
1785 }
1786 if (cPagesSpan > cPages)
1787 cPages = cPagesSpan;
1788# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1789 } else {
1790 /* Won't bother with accurate counts for the next two types, just make
1791 some rough estimates (does pipes have segments?): */
1792 size_t cSegs = iter->type & ITER_BVEC ? RT_MAX(1, iter->nr_segs) : 1;
1793 cPages = (iov_iter_count(iter) + (PAGE_SIZE * 2 - 2) * cSegs) >> PAGE_SHIFT;
1794 }
1795# endif
1796 SFLOGFLOW(("vbsf_iter_max_span_of_pages: returns %#zx\n", cPages));
1797 return cPages;
1798}
1799
1800
1801/**
1802 * Worker for vbsf_reg_read_iter() that deals with larger reads using page
1803 * locking.
1804 */
1805static ssize_t vbsf_reg_read_iter_locking(struct kiocb *kio, struct iov_iter *iter, size_t cbToRead,
1806 struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r)
1807{
1808 /*
1809 * Estimate how many pages we may possible submit in a single request so
1810 * that we can allocate matching request buffer and page array.
1811 */
1812 struct page *apPagesStack[16];
1813 struct page **papPages = &apPagesStack[0];
1814 struct page **papPagesFree = NULL;
1815 VBOXSFREADPGLSTREQ *pReq;
1816 ssize_t cbRet = 0;
1817 size_t cMaxPages = vbsf_iter_max_span_of_pages(iter);
1818 cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 2), cMaxPages);
1819
1820 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
1821 while (!pReq && cMaxPages > 4) {
1822 cMaxPages /= 2;
1823 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
1824 }
1825 if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
1826 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
1827 if (pReq && papPages) {
1828
1829 /*
1830 * The read loop.
1831 */
1832 struct vbsf_iter_stash Stash = VBSF_ITER_STASH_INITIALIZER;
1833 do {
1834 /*
1835 * Grab as many pages as we can. This means that if adjacent
1836 * segments both starts and ends at a page boundrary, we can
1837 * do them both in the same transfer from the host.
1838 */
1839 size_t cPages = 0;
1840 size_t cbChunk = 0;
1841 size_t offPage0 = 0;
1842 int rc = vbsf_iter_lock_pages(iter, true /*fWrite*/, &Stash, cMaxPages, papPages, &cPages, &offPage0, &cbChunk);
1843 if (rc == 0) {
1844 size_t iPage = cPages;
1845 while (iPage-- > 0)
1846 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
1847 pReq->PgLst.offFirstPage = (uint16_t)offPage0;
1848 AssertStmt(cbChunk <= cbToRead, cbChunk = cbToRead);
1849 } else {
1850 cbRet = rc;
1851 break;
1852 }
1853
1854 /*
1855 * Issue the request and unlock the pages.
1856 */
1857 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, cbChunk, cPages);
1858 SFLOGFLOW(("vbsf_reg_read_iter_locking: VbglR0SfHostReqReadPgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
1859 rc, pReq->Parms.cb32Read.u.value32, cbChunk, cbToRead, cPages, offPage0));
1860
1861 vbsf_iter_unlock_pages(iter, papPages, cPages, true /*fSetDirty*/);
1862
1863 if (RT_SUCCESS(rc)) {
1864 /*
1865 * Success, advance position and buffer.
1866 */
1867 uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
1868 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
1869 cbRet += cbActual;
1870 kio->ki_pos += cbActual;
1871 cbToRead -= cbActual;
1872
1873 /*
1874 * Are we done already?
1875 */
1876 if (!cbToRead)
1877 break;
1878 if (cbActual < cbChunk) { /* We ASSUME end-of-file here. */
1879 if (vbsf_iter_rewind(iter, &Stash, cbChunk - cbActual, cbActual))
1880 iov_iter_truncate(iter, 0);
1881 break;
1882 }
1883 } else {
1884 /*
1885 * Try rewind the iter structure.
1886 */
1887 bool const fRewindOkay = vbsf_iter_rewind(iter, &Stash, cbChunk, cbChunk);
1888 if (rc == VERR_NO_MEMORY && cMaxPages > 4 && fRewindOkay) {
1889 /*
1890 * The host probably doesn't have enough heap to handle the
1891 * request, reduce the page count and retry.
1892 */
1893 cMaxPages /= 4;
1894 Assert(cMaxPages > 0);
1895 } else {
1896 /*
1897 * If we've successfully read stuff, return it rather than
1898 * the error. (Not sure if this is such a great idea...)
1899 */
1900 if (cbRet <= 0)
1901 cbRet = -EPROTO;
1902 break;
1903 }
1904 }
1905 } while (cbToRead > 0);
1906
1907 vbsf_iter_cleanup_stash(iter, &Stash);
1908 }
1909 else
1910 cbRet = -ENOMEM;
1911 if (papPagesFree)
1912 kfree(papPages);
1913 if (pReq)
1914 VbglR0PhysHeapFree(pReq);
1915 SFLOGFLOW(("vbsf_reg_read_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
1916 return cbRet;
1917}
1918
1919
1920/**
1921 * Read into I/O vector iterator.
1922 *
1923 * @returns Number of bytes read on success, negative errno on error.
1924 * @param kio The kernel I/O control block (or something like that).
1925 * @param iter The I/O vector iterator describing the buffer.
1926 */
1927# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1928static ssize_t vbsf_reg_read_iter(struct kiocb *kio, struct iov_iter *iter)
1929# else
1930static ssize_t vbsf_reg_aio_read(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
1931# endif
1932{
1933# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
1934 struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 0 /*write*/);
1935 struct vbsf_iov_iter *iter = &fake_iter;
1936# endif
1937 size_t cbToRead = iov_iter_count(iter);
1938 struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
1939 struct address_space *mapping = inode->i_mapping;
1940
1941 struct vbsf_reg_info *sf_r = kio->ki_filp->private_data;
1942 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
1943
1944 SFLOGFLOW(("vbsf_reg_read_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
1945 inode, kio->ki_filp, cbToRead, kio->ki_pos, iter->type));
1946 AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
1947
1948 /*
1949 * Do we have anything at all to do here?
1950 */
1951 if (!cbToRead)
1952 return 0;
1953
1954 /*
1955 * If there is a mapping and O_DIRECT isn't in effect, we must at a
1956 * heed dirty pages in the mapping and read from them. For simplicity
1957 * though, we just do page cache reading when there are writable
1958 * mappings around with any kind of pages loaded.
1959 */
1960 if (vbsf_should_use_cached_read(kio->ki_filp, mapping, sf_g)) {
1961# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
1962 return generic_file_read_iter(kio, iter);
1963# else
1964 return generic_file_aio_read(kio, iov, cSegs, offFile);
1965# endif
1966 }
1967
1968 /*
1969 * Now now we reject async I/O requests.
1970 */
1971 if (!is_sync_kiocb(kio)) {
1972 SFLOGFLOW(("vbsf_reg_read_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
1973 return -EOPNOTSUPP;
1974 }
1975
1976 /*
1977 * For small requests, try use an embedded buffer provided we get a heap block
1978 * that does not cross page boundraries (see host code).
1979 */
1980 if (cbToRead <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) {
1981 uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + cbToRead;
1982 VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
1983 if (pReq) {
1984 if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
1985 ssize_t cbRet;
1986 int vrc = VbglR0SfHostReqReadEmbedded(sf_g->map.root, pReq, sf_r->Handle.hHost, kio->ki_pos, (uint32_t)cbToRead);
1987 if (RT_SUCCESS(vrc)) {
1988 cbRet = pReq->Parms.cb32Read.u.value32;
1989 AssertStmt(cbRet <= (ssize_t)cbToRead, cbRet = cbToRead);
1990 if (copy_to_iter(pReq->abData, cbRet, iter) == cbRet) {
1991 kio->ki_pos += cbRet;
1992 if (cbRet < cbToRead)
1993 iov_iter_truncate(iter, 0);
1994 } else
1995 cbRet = -EFAULT;
1996 } else
1997 cbRet = -EPROTO;
1998 VbglR0PhysHeapFree(pReq);
1999 SFLOGFLOW(("vbsf_reg_read_iter: returns %#zx (%zd)\n", cbRet, cbRet));
2000 return cbRet;
2001 }
2002 VbglR0PhysHeapFree(pReq);
2003 }
2004 }
2005
2006 /*
2007 * Otherwise do the page locking thing.
2008 */
2009 return vbsf_reg_read_iter_locking(kio, iter, cbToRead, sf_g, sf_r);
2010}
2011
2012
2013/**
2014 * Worker for vbsf_reg_write_iter() that deals with larger writes using page
2015 * locking.
2016 */
2017static ssize_t vbsf_reg_write_iter_locking(struct kiocb *kio, struct iov_iter *iter, size_t cbToWrite, loff_t offFile,
2018 struct vbsf_super_info *sf_g, struct vbsf_reg_info *sf_r,
2019 struct inode *inode, struct vbsf_inode_info *sf_i, struct address_space *mapping)
2020{
2021 /*
2022 * Estimate how many pages we may possible submit in a single request so
2023 * that we can allocate matching request buffer and page array.
2024 */
2025 struct page *apPagesStack[16];
2026 struct page **papPages = &apPagesStack[0];
2027 struct page **papPagesFree = NULL;
2028 VBOXSFWRITEPGLSTREQ *pReq;
2029 ssize_t cbRet = 0;
2030 size_t cMaxPages = vbsf_iter_max_span_of_pages(iter);
2031 cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 2), cMaxPages);
2032
2033 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
2034 while (!pReq && cMaxPages > 4) {
2035 cMaxPages /= 2;
2036 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
2037 }
2038 if (pReq && cMaxPages > RT_ELEMENTS(apPagesStack))
2039 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL);
2040 if (pReq && papPages) {
2041
2042 /*
2043 * The write loop.
2044 */
2045 struct vbsf_iter_stash Stash = VBSF_ITER_STASH_INITIALIZER;
2046 do {
2047 /*
2048 * Grab as many pages as we can. This means that if adjacent
2049 * segments both starts and ends at a page boundrary, we can
2050 * do them both in the same transfer from the host.
2051 */
2052 size_t cPages = 0;
2053 size_t cbChunk = 0;
2054 size_t offPage0 = 0;
2055 int rc = vbsf_iter_lock_pages(iter, false /*fWrite*/, &Stash, cMaxPages, papPages, &cPages, &offPage0, &cbChunk);
2056 if (rc == 0) {
2057 size_t iPage = cPages;
2058 while (iPage-- > 0)
2059 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]);
2060 pReq->PgLst.offFirstPage = (uint16_t)offPage0;
2061 AssertStmt(cbChunk <= cbToWrite, cbChunk = cbToWrite);
2062 } else {
2063 cbRet = rc;
2064 break;
2065 }
2066
2067 /*
2068 * Issue the request and unlock the pages.
2069 */
2070 rc = VbglR0SfHostReqWritePgLst(sf_g->map.root, pReq, sf_r->Handle.hHost, offFile, cbChunk, cPages);
2071 SFLOGFLOW(("vbsf_reg_write_iter_locking: VbglR0SfHostReqWritePgLst -> %d (cbActual=%#x cbChunk=%#zx of %#zx cPages=%#zx offPage0=%#x\n",
2072 rc, pReq->Parms.cb32Write.u.value32, cbChunk, cbToWrite, cPages, offPage0));
2073 if (RT_SUCCESS(rc)) {
2074 /*
2075 * Success, advance position and buffer.
2076 */
2077 uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
2078 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
2079
2080 vbsf_reg_write_sync_page_cache(mapping, offFile, cbActual, NULL /*pbSrcBuf*/, papPages, offPage0, cPages);
2081 vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
2082
2083 cbRet += cbActual;
2084 offFile += cbActual;
2085 kio->ki_pos = offFile;
2086 cbToWrite -= cbActual;
2087 if (offFile > i_size_read(inode))
2088 i_size_write(inode, offFile);
2089 sf_i->force_restat = 1; /* mtime (and size) may have changed */
2090
2091 /*
2092 * Are we done already?
2093 */
2094 if (!cbToWrite)
2095 break;
2096 if (cbActual < cbChunk) { /* We ASSUME end-of-file here. */
2097 if (vbsf_iter_rewind(iter, &Stash, cbChunk - cbActual, cbActual))
2098 iov_iter_truncate(iter, 0);
2099 break;
2100 }
2101 } else {
2102 /*
2103 * Try rewind the iter structure.
2104 */
2105 bool fRewindOkay;
2106 vbsf_iter_unlock_pages(iter, papPages, cPages, false /*fSetDirty*/);
2107 fRewindOkay = vbsf_iter_rewind(iter, &Stash, cbChunk, cbChunk);
2108 if (rc == VERR_NO_MEMORY && cMaxPages > 4 && fRewindOkay) {
2109 /*
2110 * The host probably doesn't have enough heap to handle the
2111 * request, reduce the page count and retry.
2112 */
2113 cMaxPages /= 4;
2114 Assert(cMaxPages > 0);
2115 } else {
2116 /*
2117 * If we've successfully written stuff, return it rather than
2118 * the error. (Not sure if this is such a great idea...)
2119 */
2120 if (cbRet <= 0)
2121 cbRet = -EPROTO;
2122 break;
2123 }
2124 }
2125 } while (cbToWrite > 0);
2126
2127 vbsf_iter_cleanup_stash(iter, &Stash);
2128 }
2129 else
2130 cbRet = -ENOMEM;
2131 if (papPagesFree)
2132 kfree(papPages);
2133 if (pReq)
2134 VbglR0PhysHeapFree(pReq);
2135 SFLOGFLOW(("vbsf_reg_write_iter_locking: returns %#zx (%zd)\n", cbRet, cbRet));
2136 return cbRet;
2137}
2138
2139
2140/**
2141 * Write from I/O vector iterator.
2142 *
2143 * @returns Number of bytes written on success, negative errno on error.
2144 * @param kio The kernel I/O control block (or something like that).
2145 * @param iter The I/O vector iterator describing the buffer.
2146 */
2147# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
2148static ssize_t vbsf_reg_write_iter(struct kiocb *kio, struct iov_iter *iter)
2149# else
2150static ssize_t vbsf_reg_aio_write(struct kiocb *kio, const struct iovec *iov, unsigned long cSegs, loff_t offFile)
2151# endif
2152{
2153# if LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0)
2154 struct vbsf_iov_iter fake_iter = VBSF_IOV_ITER_INITIALIZER(cSegs, iov, 1 /*write*/);
2155 struct vbsf_iov_iter *iter = &fake_iter;
2156# endif
2157 size_t cbToWrite = iov_iter_count(iter);
2158 struct inode *inode = VBSF_GET_F_DENTRY(kio->ki_filp)->d_inode;
2159 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
2160 struct address_space *mapping = inode->i_mapping;
2161
2162 struct vbsf_reg_info *sf_r = kio->ki_filp->private_data;
2163 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
2164# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
2165 loff_t offFile = kio->ki_pos;
2166# endif
2167
2168 SFLOGFLOW(("vbsf_reg_write_iter: inode=%p file=%p size=%#zx off=%#llx type=%#x\n",
2169 inode, kio->ki_filp, cbToWrite, offFile, iter->type));
2170 AssertReturn(S_ISREG(inode->i_mode), -EINVAL);
2171
2172 /*
2173 * Enforce APPEND flag.
2174 */
2175 /** @todo This should be handled by the host, it returning the new file
2176 * offset when appending. We may have an outdated i_size value here! */
2177# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
2178 if (kio->ki_flags & IOCB_APPEND)
2179# else
2180 if (kio->ki_filp->f_flags & O_APPEND)
2181# endif
2182 kio->ki_pos = offFile = i_size_read(inode);
2183
2184 /*
2185 * Do we have anything at all to do here?
2186 */
2187 if (!cbToWrite)
2188 return 0;
2189
2190 /*
2191 * Now now we reject async I/O requests.
2192 */
2193 if (!is_sync_kiocb(kio)) {
2194 SFLOGFLOW(("vbsf_reg_write_iter: async I/O not yet supported\n")); /** @todo extend FsPerf with AIO tests. */
2195 return -EOPNOTSUPP;
2196 }
2197
2198 /*
2199 * If there are active writable mappings, coordinate with any
2200 * pending writes via those.
2201 */
2202 if ( mapping
2203 && mapping->nrpages > 0
2204 && mapping_writably_mapped(mapping)) {
2205# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
2206 int err = filemap_fdatawait_range(mapping, offFile, offFile + cbToWrite - 1);
2207 if (err)
2208 return err;
2209# else
2210 /** @todo ... */
2211# endif
2212 }
2213
2214 /*
2215 * For small requests, try use an embedded buffer provided we get a heap block
2216 * that does not cross page boundraries (see host code).
2217 */
2218 if (cbToWrite <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) /* see allocator */) {
2219 uint32_t const cbReq = RT_UOFFSETOF(VBOXSFWRITEEMBEDDEDREQ, abData[0]) + cbToWrite;
2220 VBOXSFWRITEEMBEDDEDREQ *pReq = (VBOXSFWRITEEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq);
2221 if (pReq) {
2222 if ((PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) {
2223 ssize_t cbRet;
2224 if (copy_from_iter(pReq->abData, cbToWrite, iter) == cbToWrite) {
2225 int vrc = VbglR0SfHostReqWriteEmbedded(sf_g->map.root, pReq, sf_r->Handle.hHost,
2226 offFile, (uint32_t)cbToWrite);
2227 if (RT_SUCCESS(vrc)) {
2228 cbRet = pReq->Parms.cb32Write.u.value32;
2229 AssertStmt(cbRet <= (ssize_t)cbToWrite, cbRet = cbToWrite);
2230 vbsf_reg_write_sync_page_cache(mapping, offFile, (uint32_t)cbRet, pReq->abData,
2231 NULL /*papSrcPages*/, 0 /*offSrcPage0*/, 0 /*cSrcPages*/);
2232 kio->ki_pos = offFile += cbRet;
2233 if (offFile > i_size_read(inode))
2234 i_size_write(inode, offFile);
2235# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
2236 if ((size_t)cbRet < cbToWrite)
2237 iov_iter_revert(iter, cbToWrite - cbRet);
2238# endif
2239 } else
2240 cbRet = -EPROTO;
2241 sf_i->force_restat = 1; /* mtime (and size) may have changed */
2242 } else
2243 cbRet = -EFAULT;
2244 VbglR0PhysHeapFree(pReq);
2245 SFLOGFLOW(("vbsf_reg_write_iter: returns %#zx (%zd)\n", cbRet, cbRet));
2246 return cbRet;
2247 }
2248 VbglR0PhysHeapFree(pReq);
2249 }
2250 }
2251
2252 /*
2253 * Otherwise do the page locking thing.
2254 */
2255 return vbsf_reg_write_iter_locking(kio, iter, cbToWrite, offFile, sf_g, sf_r, inode, sf_i, mapping);
2256}
2257
2258#endif /* >= 2.6.19 */
2259
2260/**
2261 * Used by vbsf_reg_open() and vbsf_inode_atomic_open() to
2262 *
2263 * @returns shared folders create flags.
2264 * @param fLnxOpen The linux O_XXX flags to convert.
2265 * @param pfHandle Pointer to vbsf_handle::fFlags.
2266 * @param pszCaller Caller, for logging purposes.
2267 */
2268uint32_t vbsf_linux_oflags_to_vbox(unsigned fLnxOpen, uint32_t *pfHandle, const char *pszCaller)
2269{
2270 uint32_t fVBoxFlags = SHFL_CF_ACCESS_DENYNONE;
2271
2272 /*
2273 * Disposition.
2274 */
2275 if (fLnxOpen & O_CREAT) {
2276 Log(("%s: O_CREAT set\n", pszCaller));
2277 fVBoxFlags |= SHFL_CF_ACT_CREATE_IF_NEW;
2278 if (fLnxOpen & O_EXCL) {
2279 Log(("%s: O_EXCL set\n", pszCaller));
2280 fVBoxFlags |= SHFL_CF_ACT_FAIL_IF_EXISTS;
2281 } else if (fLnxOpen & O_TRUNC) {
2282 Log(("%s: O_TRUNC set\n", pszCaller));
2283 fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
2284 } else
2285 fVBoxFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
2286 } else {
2287 fVBoxFlags |= SHFL_CF_ACT_FAIL_IF_NEW;
2288 if (fLnxOpen & O_TRUNC) {
2289 Log(("%s: O_TRUNC set\n", pszCaller));
2290 fVBoxFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
2291 }
2292 }
2293
2294 /*
2295 * Access.
2296 */
2297 switch (fLnxOpen & O_ACCMODE) {
2298 case O_RDONLY:
2299 fVBoxFlags |= SHFL_CF_ACCESS_READ;
2300 *pfHandle |= VBSF_HANDLE_F_READ;
2301 break;
2302
2303 case O_WRONLY:
2304 fVBoxFlags |= SHFL_CF_ACCESS_WRITE;
2305 *pfHandle |= VBSF_HANDLE_F_WRITE;
2306 break;
2307
2308 case O_RDWR:
2309 fVBoxFlags |= SHFL_CF_ACCESS_READWRITE;
2310 *pfHandle |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE;
2311 break;
2312
2313 default:
2314 BUG();
2315 }
2316
2317 if (fLnxOpen & O_APPEND) {
2318 Log(("%s: O_APPEND set\n", pszCaller));
2319 fVBoxFlags |= SHFL_CF_ACCESS_APPEND;
2320 *pfHandle |= VBSF_HANDLE_F_APPEND;
2321 }
2322
2323 /*
2324 * Only directories?
2325 */
2326 if (fLnxOpen & O_DIRECTORY) {
2327 Log(("%s: O_DIRECTORY set\n", pszCaller));
2328 fVBoxFlags |= SHFL_CF_DIRECTORY;
2329 }
2330
2331 return fVBoxFlags;
2332}
2333
2334
2335/**
2336 * Open a regular file.
2337 *
2338 * @param inode the inode
2339 * @param file the file
2340 * @returns 0 on success, Linux error code otherwise
2341 */
2342static int vbsf_reg_open(struct inode *inode, struct file *file)
2343{
2344 int rc, rc_linux = 0;
2345 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
2346 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
2347 struct dentry *dentry = VBSF_GET_F_DENTRY(file);
2348 struct vbsf_reg_info *sf_r;
2349 VBOXSFCREATEREQ *pReq;
2350
2351 SFLOGFLOW(("vbsf_reg_open: inode=%p file=%p flags=%#x %s\n", inode, file, file->f_flags, sf_i ? sf_i->path->String.ach : NULL));
2352 BUG_ON(!sf_g);
2353 BUG_ON(!sf_i);
2354
2355 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL);
2356 if (!sf_r) {
2357 LogRelFunc(("could not allocate reg info\n"));
2358 return -ENOMEM;
2359 }
2360
2361 RTListInit(&sf_r->Handle.Entry);
2362 sf_r->Handle.cRefs = 1;
2363 sf_r->Handle.fFlags = VBSF_HANDLE_F_FILE | VBSF_HANDLE_F_MAGIC;
2364 sf_r->Handle.hHost = SHFL_HANDLE_NIL;
2365
2366 /* Already open? */
2367 if (sf_i->handle != SHFL_HANDLE_NIL) {
2368 /*
2369 * This inode was created with vbsf_create_worker(). Check the CreateFlags:
2370 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure
2371 * about the access flags (SHFL_CF_ACCESS_*).
2372 */
2373 sf_i->force_restat = 1;
2374 sf_r->Handle.hHost = sf_i->handle;
2375 sf_i->handle = SHFL_HANDLE_NIL;
2376 file->private_data = sf_r;
2377
2378 sf_r->Handle.fFlags |= VBSF_HANDLE_F_READ | VBSF_HANDLE_F_WRITE; /** @todo fix */
2379 vbsf_handle_append(sf_i, &sf_r->Handle);
2380 SFLOGFLOW(("vbsf_reg_open: returns 0 (#1) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
2381 return 0;
2382 }
2383
2384 pReq = (VBOXSFCREATEREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq) + sf_i->path->u16Size);
2385 if (!pReq) {
2386 kfree(sf_r);
2387 LogRelFunc(("Failed to allocate a VBOXSFCREATEREQ buffer!\n"));
2388 return -ENOMEM;
2389 }
2390 memcpy(&pReq->StrPath, sf_i->path, SHFLSTRING_HEADER_SIZE + sf_i->path->u16Size);
2391 RT_ZERO(pReq->CreateParms);
2392 pReq->CreateParms.Handle = SHFL_HANDLE_NIL;
2393
2394 /* We check the value of pReq->CreateParms.Handle afterwards to
2395 * find out if the call succeeded or failed, as the API does not seem
2396 * to cleanly distinguish error and informational messages.
2397 *
2398 * Furthermore, we must set pReq->CreateParms.Handle to SHFL_HANDLE_NIL
2399 * to make the shared folders host service use our fMode parameter */
2400
2401 /* We ignore O_EXCL, as the Linux kernel seems to call create
2402 beforehand itself, so O_EXCL should always fail. */
2403 pReq->CreateParms.CreateFlags = vbsf_linux_oflags_to_vbox(file->f_flags & ~O_EXCL, &sf_r->Handle.fFlags, __FUNCTION__);
2404 pReq->CreateParms.Info.Attr.fMode = inode->i_mode;
2405 LogFunc(("vbsf_reg_open: calling VbglR0SfHostReqCreate, file %s, flags=%#x, %#x\n",
2406 sf_i->path->String.utf8, file->f_flags, pReq->CreateParms.CreateFlags));
2407 rc = VbglR0SfHostReqCreate(sf_g->map.root, pReq);
2408 if (RT_FAILURE(rc)) {
2409 LogFunc(("VbglR0SfHostReqCreate failed flags=%d,%#x rc=%Rrc\n", file->f_flags, pReq->CreateParms.CreateFlags, rc));
2410 kfree(sf_r);
2411 VbglR0PhysHeapFree(pReq);
2412 return -RTErrConvertToErrno(rc);
2413 }
2414
2415 if (pReq->CreateParms.Handle != SHFL_HANDLE_NIL) {
2416 vbsf_dentry_chain_increase_ttl(dentry);
2417 rc_linux = 0;
2418 } else {
2419 switch (pReq->CreateParms.Result) {
2420 case SHFL_PATH_NOT_FOUND:
2421 rc_linux = -ENOENT;
2422 break;
2423 case SHFL_FILE_NOT_FOUND:
2424 /** @todo sf_dentry_increase_parent_ttl(file->f_dentry); if we can trust it. */
2425 rc_linux = -ENOENT;
2426 break;
2427 case SHFL_FILE_EXISTS:
2428 vbsf_dentry_chain_increase_ttl(dentry);
2429 rc_linux = -EEXIST;
2430 break;
2431 default:
2432 vbsf_dentry_chain_increase_parent_ttl(dentry);
2433 rc_linux = 0;
2434 break;
2435 }
2436 }
2437
2438 sf_i->force_restat = 1; /** @todo Why?!? */
2439 sf_r->Handle.hHost = pReq->CreateParms.Handle;
2440 file->private_data = sf_r;
2441 vbsf_handle_append(sf_i, &sf_r->Handle);
2442 VbglR0PhysHeapFree(pReq);
2443 SFLOGFLOW(("vbsf_reg_open: returns 0 (#2) - sf_i=%p hHost=%#llx\n", sf_i, sf_r->Handle.hHost));
2444 return rc_linux;
2445}
2446
2447
2448/**
2449 * Close a regular file.
2450 *
2451 * @param inode the inode
2452 * @param file the file
2453 * @returns 0 on success, Linux error code otherwise
2454 */
2455static int vbsf_reg_release(struct inode *inode, struct file *file)
2456{
2457 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
2458 struct vbsf_reg_info *sf_r = file->private_data;
2459
2460 SFLOGFLOW(("vbsf_reg_release: inode=%p file=%p\n", inode, file));
2461 if (sf_r) {
2462 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
2463 Assert(sf_g);
2464
2465#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25)
2466 /* See the smbfs source (file.c). mmap in particular can cause data to be
2467 * written to the file after it is closed, which we can't cope with. We
2468 * copy and paste the body of filemap_write_and_wait() here as it was not
2469 * defined before 2.6.6 and not exported until quite a bit later. */
2470 /* filemap_write_and_wait(inode->i_mapping); */
2471 if (inode->i_mapping->nrpages
2472 && filemap_fdatawrite(inode->i_mapping) != -EIO)
2473 filemap_fdatawait(inode->i_mapping);
2474#endif
2475
2476 /* Release sf_r, closing the handle if we're the last user. */
2477 file->private_data = NULL;
2478 vbsf_handle_release(&sf_r->Handle, sf_g, "vbsf_reg_release");
2479
2480 sf_i->handle = SHFL_HANDLE_NIL;
2481 }
2482 return 0;
2483}
2484
2485
2486/**
2487 * Wrapper around generic/default seek function that ensures that we've got
2488 * the up-to-date file size when doing anything relative to EOF.
2489 *
2490 * The issue is that the host may extend the file while we weren't looking and
2491 * if the caller wishes to append data, it may end up overwriting existing data
2492 * if we operate with a stale size. So, we always retrieve the file size on EOF
2493 * relative seeks.
2494 */
2495static loff_t vbsf_reg_llseek(struct file *file, loff_t off, int whence)
2496{
2497 SFLOGFLOW(("vbsf_reg_llseek: file=%p off=%lld whence=%d\n", file, off, whence));
2498
2499 switch (whence) {
2500#ifdef SEEK_HOLE
2501 case SEEK_HOLE:
2502 case SEEK_DATA:
2503#endif
2504 case SEEK_END: {
2505 struct vbsf_reg_info *sf_r = file->private_data;
2506 int rc = vbsf_inode_revalidate_with_handle(VBSF_GET_F_DENTRY(file), sf_r->Handle.hHost,
2507 true /*fForce*/, false /*fInodeLocked*/);
2508 if (rc == 0)
2509 break;
2510 return rc;
2511 }
2512 }
2513
2514#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8)
2515 return generic_file_llseek(file, off, whence);
2516#else
2517 return default_llseek(file, off, whence);
2518#endif
2519}
2520
2521
2522/**
2523 * Flush region of file - chiefly mmap/msync.
2524 *
2525 * We cannot use the noop_fsync / simple_sync_file here as that means
2526 * msync(,,MS_SYNC) will return before the data hits the host, thereby
2527 * causing coherency issues with O_DIRECT access to the same file as
2528 * well as any host interaction with the file.
2529 */
2530#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
2531static int vbsf_reg_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2532{
2533# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
2534 return __generic_file_fsync(file, start, end, datasync);
2535# else
2536 return generic_file_fsync(file, start, end, datasync);
2537# endif
2538}
2539#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35)
2540static int vbsf_reg_fsync(struct file *file, int datasync)
2541{
2542 return generic_file_fsync(file, datasync);
2543}
2544#else /* < 2.6.35 */
2545static int vbsf_reg_fsync(struct file *file, struct dentry *dentry, int datasync)
2546{
2547# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
2548 return simple_fsync(file, dentry, datasync);
2549# else
2550 int rc;
2551 struct inode *inode = dentry->d_inode;
2552 AssertReturn(inode, -EINVAL);
2553
2554 /** @todo What about file_fsync()? (<= 2.5.11) */
2555
2556# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
2557 rc = sync_mapping_buffers(inode->i_mapping);
2558 if ( rc == 0
2559 && (inode->i_state & I_DIRTY)
2560 && ((inode->i_state & I_DIRTY_DATASYNC) || !datasync)
2561 ) {
2562 struct writeback_control wbc = {
2563 .sync_mode = WB_SYNC_ALL,
2564 .nr_to_write = 0
2565 };
2566 rc = sync_inode(inode, &wbc);
2567 }
2568# else /* < 2.5.12 */
2569 rc = fsync_inode_buffers(inode);
2570# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
2571 rc |= fsync_inode_data_buffers(inode);
2572# endif
2573 /** @todo probably need to do more here... */
2574# endif /* < 2.5.12 */
2575 return rc;
2576# endif
2577}
2578#endif /* < 2.6.35 */
2579
2580
2581#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
2582/**
2583 * Copy a datablock from one file to another on the host side.
2584 */
2585static ssize_t vbsf_reg_copy_file_range(struct file *pFileSrc, loff_t offSrc, struct file *pFileDst, loff_t offDst,
2586 size_t cbRange, unsigned int fFlags)
2587{
2588 ssize_t cbRet;
2589 if (g_uSfLastFunction >= SHFL_FN_COPY_FILE_PART) {
2590 struct inode *pInodeSrc = pFileSrc->f_inode;
2591 struct vbsf_inode_info *pInodeInfoSrc = VBSF_GET_INODE_INFO(pInodeSrc);
2592 struct vbsf_super_info *pSuperInfoSrc = VBSF_GET_SUPER_INFO(pInodeSrc->i_sb);
2593 struct vbsf_reg_info *pFileInfoSrc = (struct vbsf_reg_info *)pFileSrc->private_data;
2594 struct inode *pInodeDst = pInodeSrc;
2595 struct vbsf_inode_info *pInodeInfoDst = VBSF_GET_INODE_INFO(pInodeDst);
2596 struct vbsf_super_info *pSuperInfoDst = VBSF_GET_SUPER_INFO(pInodeDst->i_sb);
2597 struct vbsf_reg_info *pFileInfoDst = (struct vbsf_reg_info *)pFileDst->private_data;
2598 VBOXSFCOPYFILEPARTREQ *pReq;
2599
2600 /*
2601 * Some extra validation.
2602 */
2603 AssertPtrReturn(pInodeInfoSrc, -EOPNOTSUPP);
2604 Assert(pInodeInfoSrc->u32Magic == SF_INODE_INFO_MAGIC);
2605 AssertPtrReturn(pInodeInfoDst, -EOPNOTSUPP);
2606 Assert(pInodeInfoDst->u32Magic == SF_INODE_INFO_MAGIC);
2607
2608# if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
2609 if (!S_ISREG(pInodeSrc->i_mode) || !S_ISREG(pInodeDst->i_mode))
2610 return S_ISDIR(pInodeSrc->i_mode) || S_ISDIR(pInodeDst->i_mode) ? -EISDIR : -EINVAL;
2611# endif
2612
2613 /*
2614 * Allocate the request and issue it.
2615 */
2616 pReq = (VBOXSFCOPYFILEPARTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
2617 if (pReq) {
2618 int vrc = VbglR0SfHostReqCopyFilePart(pSuperInfoSrc->map.root, pFileInfoSrc->Handle.hHost, offSrc,
2619 pSuperInfoDst->map.root, pFileInfoDst->Handle.hHost, offDst,
2620 cbRange, 0 /*fFlags*/, pReq);
2621 if (RT_SUCCESS(vrc))
2622 cbRet = pReq->Parms.cb64ToCopy.u.value64;
2623 else if (vrc == VERR_NOT_IMPLEMENTED)
2624 cbRet = -EOPNOTSUPP;
2625 else
2626 cbRet = -RTErrConvertToErrno(vrc);
2627
2628 VbglR0PhysHeapFree(pReq);
2629 } else
2630 cbRet = -ENOMEM;
2631 } else {
2632 cbRet = -EOPNOTSUPP;
2633 }
2634 SFLOGFLOW(("vbsf_reg_copy_file_range: returns %zd\n", cbRet));
2635 return cbRet;
2636}
2637#endif /* > 4.5 */
2638
2639
2640#ifdef SFLOG_ENABLED
2641/*
2642 * This is just for logging page faults and such.
2643 */
2644
2645/** Pointer to the ops generic_file_mmap returns the first time it's called. */
2646static struct vm_operations_struct const *g_pGenericFileVmOps = NULL;
2647/** Merge of g_LoggingVmOpsTemplate and g_pGenericFileVmOps. */
2648static struct vm_operations_struct g_LoggingVmOps;
2649
2650
2651/* Generic page fault callback: */
2652# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
2653static vm_fault_t vbsf_vmlog_fault(struct vm_fault *vmf)
2654{
2655 vm_fault_t rc;
2656 SFLOGFLOW(("vbsf_vmlog_fault: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->address));
2657 rc = g_pGenericFileVmOps->fault(vmf);
2658 SFLOGFLOW(("vbsf_vmlog_fault: returns %d\n", rc));
2659 return rc;
2660}
2661# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
2662static int vbsf_vmlog_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2663{
2664 int rc;
2665# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
2666 SFLOGFLOW(("vbsf_vmlog_fault: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->address));
2667# else
2668 SFLOGFLOW(("vbsf_vmlog_fault: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->virtual_address));
2669# endif
2670 rc = g_pGenericFileVmOps->fault(vma, vmf);
2671 SFLOGFLOW(("vbsf_vmlog_fault: returns %d\n", rc));
2672 return rc;
2673}
2674# endif
2675
2676
2677/* Special/generic page fault handler: */
2678# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
2679# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)
2680static struct page *vbsf_vmlog_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
2681{
2682 struct page *page;
2683 SFLOGFLOW(("vbsf_vmlog_nopage: vma=%p address=%p type=%p:{%#x}\n", vma, address, type, type ? *type : 0));
2684 page = g_pGenericFileVmOps->nopage(vma, address, type);
2685 SFLOGFLOW(("vbsf_vmlog_nopage: returns %p\n", page));
2686 return page;
2687}
2688# else
2689static struct page *vbsf_vmlog_nopage(struct vm_area_struct *vma, unsigned long address, int write_access_or_unused)
2690{
2691 struct page *page;
2692 SFLOGFLOW(("vbsf_vmlog_nopage: vma=%p address=%p wau=%d\n", vma, address, write_access_or_unused));
2693 page = g_pGenericFileVmOps->nopage(vma, address, write_access_or_unused);
2694 SFLOGFLOW(("vbsf_vmlog_nopage: returns %p\n", page));
2695 return page;
2696}
2697# endif /* < 2.6.26 */
2698
2699
2700/* Special page fault callback for making something writable: */
2701# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
2702static vm_fault_t vbsf_vmlog_page_mkwrite(struct vm_fault *vmf)
2703{
2704 vm_fault_t rc;
2705# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
2706 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->address));
2707# else
2708 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vmf=%p flags=%#x addr=%p\n", vmf, vmf->flags, vmf->virtual_address));
2709# endif
2710 rc = g_pGenericFileVmOps->page_mkwrite(vmf);
2711 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
2712 return rc;
2713}
2714# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
2715static int vbsf_vmlog_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
2716{
2717 int rc;
2718 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p vmf=%p flags=%#x addr=%p\n", vma, vmf, vmf->flags, vmf->virtual_address));
2719 rc = g_pGenericFileVmOps->page_mkwrite(vma, vmf);
2720 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
2721 return rc;
2722}
2723# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
2724static int vbsf_vmlog_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2725{
2726 int rc;
2727 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: vma=%p page=%p\n", vma, page));
2728 rc = g_pGenericFileVmOps->page_mkwrite(vma, page);
2729 SFLOGFLOW(("vbsf_vmlog_page_mkwrite: returns %d\n", rc));
2730 return rc;
2731}
2732# endif
2733
2734
2735/* Special page fault callback for mapping pages: */
2736# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
2737static void vbsf_vmlog_map_pages(struct vm_fault *vmf, pgoff_t start, pgoff_t end)
2738{
2739 SFLOGFLOW(("vbsf_vmlog_map_pages: vmf=%p (flags=%#x addr=%p) start=%p end=%p\n", vmf, vmf->flags, vmf->address, start, end));
2740 g_pGenericFileVmOps->map_pages(vmf, start, end);
2741 SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
2742}
2743# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
2744static void vbsf_vmlog_map_pages(struct fault_env *fenv, pgoff_t start, pgoff_t end)
2745{
2746 SFLOGFLOW(("vbsf_vmlog_map_pages: fenv=%p (flags=%#x addr=%p) start=%p end=%p\n", fenv, fenv->flags, fenv->address, start, end));
2747 g_pGenericFileVmOps->map_pages(fenv, start, end);
2748 SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
2749}
2750# elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
2751static void vbsf_vmlog_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
2752{
2753 SFLOGFLOW(("vbsf_vmlog_map_pages: vma=%p vmf=%p (flags=%#x addr=%p)\n", vma, vmf, vmf->flags, vmf->virtual_address));
2754 g_pGenericFileVmOps->map_pages(vma, vmf);
2755 SFLOGFLOW(("vbsf_vmlog_map_pages: returns\n"));
2756}
2757# endif
2758
2759
2760/** Overload template. */
2761static struct vm_operations_struct const g_LoggingVmOpsTemplate = {
2762# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
2763 .fault = vbsf_vmlog_fault,
2764# endif
2765# if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 25)
2766 .nopage = vbsf_vmlog_nopage,
2767# endif
2768# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
2769 .page_mkwrite = vbsf_vmlog_page_mkwrite,
2770# endif
2771# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
2772 .map_pages = vbsf_vmlog_map_pages,
2773# endif
2774};
2775
2776/** file_operations::mmap wrapper for logging purposes. */
2777extern int vbsf_reg_mmap(struct file *file, struct vm_area_struct *vma)
2778{
2779 int rc;
2780 SFLOGFLOW(("vbsf_reg_mmap: file=%p vma=%p\n", file, vma));
2781 rc = generic_file_mmap(file, vma);
2782 if (rc == 0) {
2783 /* Merge the ops and template the first time thru (there's a race here). */
2784 if (g_pGenericFileVmOps == NULL) {
2785 uintptr_t const *puSrc1 = (uintptr_t *)vma->vm_ops;
2786 uintptr_t const *puSrc2 = (uintptr_t *)&g_LoggingVmOpsTemplate;
2787 uintptr_t volatile *puDst = (uintptr_t *)&g_LoggingVmOps;
2788 size_t cbLeft = sizeof(g_LoggingVmOps) / sizeof(*puDst);
2789 while (cbLeft-- > 0) {
2790 *puDst = *puSrc2 && *puSrc1 ? *puSrc2 : *puSrc1;
2791 puSrc1++;
2792 puSrc2++;
2793 puDst++;
2794 }
2795 g_pGenericFileVmOps = vma->vm_ops;
2796 vma->vm_ops = &g_LoggingVmOps;
2797 } else if (g_pGenericFileVmOps == vma->vm_ops)
2798 vma->vm_ops = &g_LoggingVmOps;
2799 else
2800 SFLOGFLOW(("vbsf_reg_mmap: Warning: vm_ops=%p, expected %p!\n", vma->vm_ops, g_pGenericFileVmOps));
2801 }
2802 SFLOGFLOW(("vbsf_reg_mmap: returns %d\n", rc));
2803 return rc;
2804}
2805
2806#endif /* SFLOG_ENABLED */
2807
2808
2809/**
2810 * File operations for regular files.
2811 */
2812struct file_operations vbsf_reg_fops = {
2813 .open = vbsf_reg_open,
2814 .read = vbsf_reg_read,
2815 .write = vbsf_reg_write,
2816#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
2817 .read_iter = vbsf_reg_read_iter,
2818 .write_iter = vbsf_reg_write_iter,
2819#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
2820 .aio_read = vbsf_reg_aio_read,
2821 .aio_write = vbsf_reg_aio_write,
2822#endif
2823 .release = vbsf_reg_release,
2824#ifdef SFLOG_ENABLED
2825 .mmap = vbsf_reg_mmap,
2826#else
2827 .mmap = generic_file_mmap,
2828#endif
2829#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
2830/** @todo This code is known to cause caching of data which should not be
2831 * cached. Investigate --
2832 * bird: Part of this was using generic page cache functions for
2833 * implementing .aio_read/write. Fixed that (see above). */
2834# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23)
2835 .splice_read = vbsf_splice_read,
2836# else
2837 .sendfile = generic_file_sendfile,
2838# endif
2839#endif
2840 .llseek = vbsf_reg_llseek,
2841 .fsync = vbsf_reg_fsync,
2842#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
2843 .copy_file_range = vbsf_reg_copy_file_range,
2844#endif
2845};
2846
2847
2848/**
2849 * Inodes operations for regular files.
2850 */
2851struct inode_operations vbsf_reg_iops = {
2852#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 18)
2853 .getattr = vbsf_inode_getattr,
2854#else
2855 .revalidate = vbsf_inode_revalidate,
2856#endif
2857 .setattr = vbsf_inode_setattr,
2858};
2859
2860
2861
2862/*********************************************************************************************************************************
2863* Address Space Operations on Regular Files (for mmap) *
2864*********************************************************************************************************************************/
2865
2866
2867#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
2868
2869/**
2870 * Used to read the content of a page into the page cache.
2871 *
2872 * Needed for mmap and reads+writes when the file is mmapped in a
2873 * shared+writeable fashion.
2874 */
2875static int vbsf_readpage(struct file *file, struct page *page)
2876{
2877 struct inode *inode = VBSF_GET_F_DENTRY(file)->d_inode;
2878 int err;
2879
2880 SFLOGFLOW(("vbsf_readpage: inode=%p file=%p page=%p off=%#llx\n", inode, file, page, (uint64_t)page->index << PAGE_SHIFT));
2881 Assert(PageLocked(page));
2882
2883 if (PageUptodate(page)) {
2884 unlock_page(page);
2885 return 0;
2886 }
2887
2888 if (!is_bad_inode(inode)) {
2889 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
2890 if (pReq) {
2891 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
2892 struct vbsf_reg_info *sf_r = file->private_data;
2893 uint32_t cbRead;
2894 int vrc;
2895
2896 pReq->PgLst.offFirstPage = 0;
2897 pReq->PgLst.aPages[0] = page_to_phys(page);
2898 vrc = VbglR0SfHostReqReadPgLst(sf_g->map.root,
2899 pReq,
2900 sf_r->Handle.hHost,
2901 (uint64_t)page->index << PAGE_SHIFT,
2902 PAGE_SIZE,
2903 1 /*cPages*/);
2904
2905 cbRead = pReq->Parms.cb32Read.u.value32;
2906 AssertStmt(cbRead <= PAGE_SIZE, cbRead = PAGE_SIZE);
2907 VbglR0PhysHeapFree(pReq);
2908
2909 if (RT_SUCCESS(vrc)) {
2910 if (cbRead == PAGE_SIZE) {
2911 /* likely */
2912 } else {
2913 uint8_t *pbMapped = (uint8_t *)kmap(page);
2914 RT_BZERO(&pbMapped[cbRead], PAGE_SIZE - cbRead);
2915 kunmap(page);
2916 /** @todo truncate the inode file size? */
2917 }
2918
2919 flush_dcache_page(page);
2920 SetPageUptodate(page);
2921 unlock_page(page);
2922 return 0;
2923 }
2924 err = -RTErrConvertToErrno(vrc);
2925 } else
2926 err = -ENOMEM;
2927 } else
2928 err = -EIO;
2929 SetPageError(page);
2930 unlock_page(page);
2931 return err;
2932}
2933
2934
2935/**
2936 * Used to write out the content of a dirty page cache page to the host file.
2937 *
2938 * Needed for mmap and writes when the file is mmapped in a shared+writeable
2939 * fashion.
2940 */
2941static int vbsf_writepage(struct page *page, struct writeback_control *wbc)
2942{
2943 struct address_space *mapping = page->mapping;
2944 struct inode *inode = mapping->host;
2945 struct vbsf_inode_info *sf_i = VBSF_GET_INODE_INFO(inode);
2946 struct vbsf_handle *pHandle = vbsf_handle_find(sf_i, VBSF_HANDLE_F_WRITE, VBSF_HANDLE_F_APPEND);
2947 int err;
2948
2949 SFLOGFLOW(("vbsf_writepage: inode=%p page=%p off=%#llx pHandle=%p (%#llx)\n",
2950 inode, page,(uint64_t)page->index << PAGE_SHIFT, pHandle, pHandle->hHost));
2951
2952 if (pHandle) {
2953 struct vbsf_super_info *sf_g = VBSF_GET_SUPER_INFO(inode->i_sb);
2954 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq));
2955 if (pReq) {
2956 uint64_t const cbFile = i_size_read(inode);
2957 uint64_t const offInFile = (uint64_t)page->index << PAGE_SHIFT;
2958 uint32_t const cbToWrite = page->index != (cbFile >> PAGE_SHIFT) ? PAGE_SIZE
2959 : (uint32_t)cbFile & (uint32_t)PAGE_OFFSET_MASK;
2960 int vrc;
2961
2962 pReq->PgLst.offFirstPage = 0;
2963 pReq->PgLst.aPages[0] = page_to_phys(page);
2964 vrc = VbglR0SfHostReqWritePgLst(sf_g->map.root,
2965 pReq,
2966 pHandle->hHost,
2967 offInFile,
2968 cbToWrite,
2969 1 /*cPages*/);
2970 AssertMsgStmt(pReq->Parms.cb32Write.u.value32 == cbToWrite || RT_FAILURE(vrc), /* lazy bird */
2971 ("%#x vs %#x\n", pReq->Parms.cb32Write, cbToWrite),
2972 vrc = VERR_WRITE_ERROR);
2973 VbglR0PhysHeapFree(pReq);
2974
2975 if (RT_SUCCESS(vrc)) {
2976 /* Update the inode if we've extended the file. */
2977 /** @todo is this necessary given the cbToWrite calc above? */
2978 uint64_t const offEndOfWrite = offInFile + cbToWrite;
2979 if ( offEndOfWrite > cbFile
2980 && offEndOfWrite > i_size_read(inode))
2981 i_size_write(inode, offEndOfWrite);
2982
2983 if (PageError(page))
2984 ClearPageError(page);
2985
2986 err = 0;
2987 } else {
2988 ClearPageUptodate(page);
2989 err = -EPROTO;
2990 }
2991 } else
2992 err = -ENOMEM;
2993 vbsf_handle_release(pHandle, sf_g, "vbsf_writepage");
2994 } else {
2995 static uint64_t volatile s_cCalls = 0;
2996 if (s_cCalls++ < 16)
2997 printk("vbsf_writepage: no writable handle for %s..\n", sf_i->path->String.ach);
2998 err = -EPROTO;
2999 }
3000 unlock_page(page);
3001 return err;
3002}
3003
3004
3005# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3006/**
3007 * Called when writing thru the page cache (which we shouldn't be doing).
3008 */
3009int vbsf_write_begin(struct file *file, struct address_space *mapping, loff_t pos,
3010 unsigned len, unsigned flags, struct page **pagep, void **fsdata)
3011{
3012 /** @todo r=bird: We shouldn't ever get here, should we? Because we don't use
3013 * the page cache for any writes AFAIK. We could just as well use
3014 * simple_write_begin & simple_write_end here if we think we really
3015 * need to have non-NULL function pointers in the table... */
3016 static uint64_t volatile s_cCalls = 0;
3017 if (s_cCalls++ < 16) {
3018 printk("vboxsf: Unexpected call to vbsf_write_begin(pos=%#llx len=%#x flags=%#x)! Please report.\n",
3019 (unsigned long long)pos, len, flags);
3020 RTLogBackdoorPrintf("vboxsf: Unexpected call to vbsf_write_begin(pos=%#llx len=%#x flags=%#x)! Please report.\n",
3021 (unsigned long long)pos, len, flags);
3022# ifdef WARN_ON
3023 WARN_ON(1);
3024# endif
3025 }
3026 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata);
3027}
3028# endif /* KERNEL_VERSION >= 2.6.24 */
3029
3030
3031# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
3032/**
3033 * This is needed to make open accept O_DIRECT as well as dealing with direct
3034 * I/O requests if we don't intercept them earlier.
3035 */
3036# if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)
3037static ssize_t vbsf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3038# elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
3039static ssize_t vbsf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
3040# elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
3041static ssize_t vbsf_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
3042# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 6)
3043static ssize_t vbsf_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
3044# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 55)
3045static int vbsf_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
3046# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
3047static int vbsf_direct_IO(int rw, struct file *file, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
3048# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 35)
3049static int vbsf_direct_IO(int rw, struct inode *inode, const struct iovec *iov, loff_t offset, unsigned long nr_segs)
3050# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 26)
3051static int vbsf_direct_IO(int rw, struct inode *inode, char *buf, loff_t offset, size_t count)
3052# else
3053static int vbsf_direct_IO(int rw, struct inode *inode, struct kiobuf *, unsigned long, int)
3054# endif
3055{
3056 TRACE();
3057 return -EINVAL;
3058}
3059# endif
3060
3061/**
3062 * Address space (for the page cache) operations for regular files.
3063 *
3064 * @todo the FsPerf touch/flush (mmap) test fails on 4.4.0 (ubuntu 16.04 lts).
3065 */
3066struct address_space_operations vbsf_reg_aops = {
3067 .readpage = vbsf_readpage,
3068 .writepage = vbsf_writepage,
3069 /** @todo Need .writepages if we want msync performance... */
3070# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12)
3071 .set_page_dirty = __set_page_dirty_buffers,
3072# endif
3073# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
3074 .write_begin = vbsf_write_begin,
3075 .write_end = simple_write_end,
3076# else
3077 .prepare_write = simple_prepare_write,
3078 .commit_write = simple_commit_write,
3079# endif
3080# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 10)
3081 .direct_IO = vbsf_direct_IO,
3082# endif
3083};
3084
3085#endif /* LINUX_VERSION_CODE >= 2.6.0 */
3086
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette