VirtualBox

source: vbox/trunk/src/VBox/Additions/WINNT/SharedFolders/driver/file.cpp@ 78693

Last change on this file since 78693 was 78609, checked in by vboxsync, 6 years ago

winnt/vboxsf: Don't flush and purge the cache twice on newer systems, instead do a library trick to redirect relevant imports from write.obj and read.obj to our wrappers that uses CcCoherencyFlushAndPurgeCache when possible to get better coherency between mmap regions and file content when writing and reading normally. This comes at a cost when the file has been mmapped at some point previously (or currently) and we may need to purge stuff. bugref:9172

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 28.2 KB
Line 
1/* $Id: file.cpp 78609 2019-05-20 23:15:40Z vboxsync $ */
2/** @file
3 * VirtualBox Windows Guest Shared Folders - File System Driver file routines.
4 */
5
6/*
7 * Copyright (C) 2012-2019 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 */
17
18
19/*********************************************************************************************************************************
20* Header Files *
21*********************************************************************************************************************************/
22#include "vbsf.h"
23#include <iprt/fs.h>
24#include <iprt/mem.h>
25
26
27/*********************************************************************************************************************************
28* Defined Constants And Macros *
29*********************************************************************************************************************************/
30/** How many pages we should try transfer in one I/O request (read/write). */
31#define VBSF_MAX_IO_PAGES RT_MIN(_16K / sizeof(RTGCPHYS64) /* => 8MB buffer */, VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT)
32
33
34
35
36/** @name HACK ALERT! Using the better CcCoherencyFlushAndPurgeCache when
37 * available (>= Windows 7) and avoid flushing+purging cache twice.
38 *
39 * We change the cache flushing and purging related imports from the write.obj
40 * and read.obj files in the rdbsslib.lib to import so these gets redirected
41 * here instead of going directly to ntoskrnl. We will use
42 * CcCoherencyFlushAndPurgeCache when present, and on older systems there will
43 * be no change. This does however save us from doing double flushing and
44 * purging on newer systems.
45 *
46 * If we don't use CcCoherencyFlushAndPurgeCache we end up not seeing newly
47 * written data in memory mappings, and similarlly not seeing data from freshly
48 * dirtied (but as yet unflushed) memory mapping pages when reading. (Both
49 * these scenarios are tested by FsPerf --mmap.)
50 *
51 * See VBoxEditCoffLib and the Makefile.kmk for the rest of the puzzle.
52 *
53 * @todo investigate whether we could do it the same way as we do on linux,
54 * where we iterrogate the cache and use cached data when memory mappings
55 * are active. Only troubles are:
56 *
57 * 1. Don't know how to find out whether we've got memory mappings.
58 *
59 * 2. Don't know how to detect dirty pages (we should only read
60 * from dirty ones).
61 *
62 * To really explore this, it would be best to introduce a caching mode
63 * mount parameter (or something) analogous to what we have on linux. In
64 * the relaxed mode, we could get away with more as users could always
65 * disable caching...
66 * @{
67 */
68
69/** For reads. */
70static VOID NTAPI vbsfNtReadCcFlushCache(PSECTION_OBJECT_POINTERS pSectObjPtrs, PLARGE_INTEGER poffFlush, ULONG cbFlush,
71 PIO_STATUS_BLOCK pIos)
72{
73 if (g_pfnCcCoherencyFlushAndPurgeCache)
74 g_pfnCcCoherencyFlushAndPurgeCache(pSectObjPtrs, poffFlush, cbFlush, pIos, CC_FLUSH_AND_PURGE_NO_PURGE);
75 else
76 CcFlushCache(pSectObjPtrs, poffFlush, cbFlush, pIos);
77}
78
79
80/**
81 * For writes with mmapping/caching section, called before the purging.
82 *
83 * This does both flushing and puring when CcCoherencyFlushAndPurgeCache is
84 * available.
85 */
86static VOID NTAPI vbsfNtWriteCcFlushCache(PSECTION_OBJECT_POINTERS pSectObjPtrs, PLARGE_INTEGER poffFlush, ULONG cbFlush,
87 PIO_STATUS_BLOCK pIos)
88{
89 if (g_pfnCcCoherencyFlushAndPurgeCache)
90 g_pfnCcCoherencyFlushAndPurgeCache(pSectObjPtrs, poffFlush, cbFlush, pIos, 0 /*fFlags*/);
91 else
92 CcFlushCache(pSectObjPtrs, poffFlush, cbFlush, pIos);
93}
94
95
96/**
97 * For writes with mmapping/caching section, called to purge after flushing.
98 *
99 * We translate this to a no-op when CcCoherencyFlushAndPurgeCache is available.
100 */
101static BOOLEAN NTAPI vbsfNtWriteCcPurgeCacheSection(PSECTION_OBJECT_POINTERS pSectObjPtrs, PLARGE_INTEGER poffPurge,ULONG cbPurge,
102#if (NTDDI_VERSION >= NTDDI_VISTA)
103 ULONG fUninitializeCacheMaps)
104#else
105 BOOLEAN fUninitializeCacheMaps)
106#endif
107{
108#if (NTDDI_VERSION >= NTDDI_VISTA)
109 fUninitializeCacheMaps &= 0xff; /* Used to be BOOLEAN before Vista. */
110#endif
111 Assert(fUninitializeCacheMaps == 0);
112 BOOLEAN fRet;
113 if (g_pfnCcCoherencyFlushAndPurgeCache)
114 fRet = TRUE;
115 else
116 fRet = CcPurgeCacheSection(pSectObjPtrs, poffPurge, cbPurge, fUninitializeCacheMaps);
117 return fRet;
118}
119
120extern "C" {
121/** This is what read.obj gets instead of __imp_CcFlushCache. */
122decltype(CcFlushCache) *g_pfnRdFlushCache = vbsfNtReadCcFlushCache;
123/** This is what write.obj gets instead of __imp_CcFlushCache. */
124decltype(CcFlushCache) *g_pfnWrFlushCache = vbsfNtWriteCcFlushCache;
125/** This is what write.obj gets instead of __imp_CcPurgeCacheSection. */
126decltype(CcPurgeCacheSection) *g_pfnWrPurgeCacheSection = vbsfNtWriteCcPurgeCacheSection;
127}
128
129/** @} */
130
131
132
133/**
134 * Performs a read.
135 *
136 * @note Almost identical to vbsfNtWriteWorker.
137 */
138static NTSTATUS vbsfNtReadWorker(PRX_CONTEXT RxContext)
139{
140 RxCaptureFcb;
141 RxCaptureFobx;
142 PMRX_VBOX_NETROOT_EXTENSION pNetRootX = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
143 PVBSFNTFCBEXT pVBoxFcbX = VBoxMRxGetFcbExtension(capFcb);
144 PMRX_VBOX_FOBX pVBoxFobX = VBoxMRxGetFileObjectExtension(capFobx);
145 PMDL pBufferMdl = RxContext->LowIoContext.ParamsFor.ReadWrite.Buffer;
146
147 LogFlow(("vbsfNtReadWorker: hFile=%#RX64 offFile=%#RX64 cbToRead=%#x %s\n", pVBoxFobX->hFile,
148 RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount,
149 RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION ? " async" : "sync"));
150
151 AssertReturn(pBufferMdl, STATUS_INTERNAL_ERROR);
152
153
154 /*
155 * We should never get a zero byte request (RDBSS checks), but in case we
156 * do, it should succeed.
157 */
158 uint32_t cbRet = 0;
159 uint32_t cbLeft = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount;
160 AssertReturnStmt(cbLeft > 0, RxContext->InformationToReturn = 0, STATUS_SUCCESS);
161
162 Assert(cbLeft <= MmGetMdlByteCount(pBufferMdl));
163
164 /*
165 * Allocate a request buffer.
166 */
167 uint32_t cPagesLeft = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pBufferMdl), cbLeft);
168 uint32_t cMaxPages = RT_MIN(cPagesLeft, VBSF_MAX_IO_PAGES);
169 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ,
170 PgLst.aPages[cMaxPages]));
171 while (!pReq && cMaxPages > 4)
172 {
173 cMaxPages /= 2;
174 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages]));
175 }
176 NTSTATUS rcNt = STATUS_SUCCESS;
177 if (pReq)
178 {
179 /*
180 * The read loop.
181 */
182 RTFOFF offFile = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset;
183 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pBufferMdl);
184 uint32_t offPage = MmGetMdlByteOffset(pBufferMdl);
185 if (offPage < PAGE_SIZE)
186 { /* likely */ }
187 else
188 {
189 paPfns += offPage >> PAGE_SHIFT;
190 offPage &= PAGE_OFFSET_MASK;
191 }
192
193 for (;;)
194 {
195 /*
196 * Figure out how much to process now and set up the page list for it.
197 */
198 uint32_t cPagesInChunk;
199 uint32_t cbChunk;
200 if (cPagesLeft <= cMaxPages)
201 {
202 cPagesInChunk = cPagesLeft;
203 cbChunk = cbLeft;
204 }
205 else
206 {
207 cPagesInChunk = cMaxPages;
208 cbChunk = (cMaxPages << PAGE_SHIFT) - offPage;
209 }
210
211 size_t iPage = cPagesInChunk;
212 while (iPage-- > 0)
213 pReq->PgLst.aPages[iPage] = (RTGCPHYS)paPfns[iPage] << PAGE_SHIFT;
214 pReq->PgLst.offFirstPage = offPage;
215
216#if 0 /* Instead we hook into read.obj's import function pointers to do this more efficiently. */
217 /*
218 * Flush dirty cache content before we try read it from the host. RDBSS calls
219 * CcFlushCache before it calls us, I think, but CcCoherencyFlushAndPurgeCache
220 * does the right thing whereas CcFlushCache clearly does (FsPerf mmap+read
221 * coherency test fails consistently on W10, XP, ++).
222 */
223 if ( g_pfnCcCoherencyFlushAndPurgeCache
224 && !(RxContext->CurrentIrp && (RxContext->CurrentIrp->Flags & IRP_PAGING_IO))
225 && RxContext->NonPagedFcb != NULL
226 && RxContext->NonPagedFcb->SectionObjectPointers.DataSectionObject != NULL)
227 {
228 LARGE_INTEGER offFlush;
229 offFlush.QuadPart = offFile;
230 Assert(!RxContext->FcbPagingIoResourceAcquired);
231 BOOLEAN AcquiredFile = RxAcquirePagingIoResourceShared(NULL, capFcb, 1 /*fWait*/);
232 g_pfnCcCoherencyFlushAndPurgeCache(&RxContext->NonPagedFcb->SectionObjectPointers, &offFlush, cbChunk,
233 &RxContext->CurrentIrp->IoStatus, CC_FLUSH_AND_PURGE_NO_PURGE);
234 if (AcquiredFile)
235 { RxReleasePagingIoResource(NULL, capFcb); /* requires {} */ }
236 }
237#endif
238
239 /*
240 * Issue the request and unlock the pages.
241 */
242 int vrc = VbglR0SfHostReqReadPgLst(pNetRootX->map.root, pReq, pVBoxFobX->hFile, offFile, cbChunk, cPagesInChunk);
243 if (RT_SUCCESS(vrc))
244 {
245 /*
246 * Success, advance position and buffer.
247 */
248 uint32_t cbActual = pReq->Parms.cb32Read.u.value32;
249 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
250 cbRet += cbActual;
251 offFile += cbActual;
252 cbLeft -= cbActual;
253
254 /*
255 * Update timestamp state (FCB is shared).
256 */
257 pVBoxFobX->fTimestampsImplicitlyUpdated |= VBOX_FOBX_F_INFO_LASTACCESS_TIME;
258 if (pVBoxFcbX->pFobxLastAccessTime != pVBoxFobX)
259 pVBoxFcbX->pFobxLastAccessTime = NULL;
260
261 /*
262 * Are we done already?
263 */
264 if (!cbLeft || cbActual < cbChunk)
265 {
266 /*
267 * Flag EOF.
268 */
269 if (cbActual != 0 || cbRet != 0)
270 { /* typical */ }
271 else
272 rcNt = STATUS_END_OF_FILE;
273
274 /*
275 * See if we've reached the EOF early or read beyond what we thought were the EOF.
276 *
277 * Note! We don't dare do this (yet) if we're in paging I/O as we then hold the
278 * PagingIoResource in shared mode and would probably deadlock in the
279 * updating code when taking the lock in exclusive mode.
280 */
281 if (RxContext->LowIoContext.Resource != capFcb->Header.PagingIoResource)
282 {
283 LONGLONG cbFileRdbss;
284 RxGetFileSizeWithLock((PFCB)capFcb, &cbFileRdbss);
285 if ( offFile < cbFileRdbss
286 && cbActual < cbChunk /* hit EOF */)
287 vbsfNtUpdateFcbSize(RxContext->pFobx->AssociatedFileObject, capFcb, pVBoxFobX, offFile, cbFileRdbss, -1);
288 else if (offFile > cbFileRdbss)
289 vbsfNtQueryAndUpdateFcbSize(pNetRootX, RxContext->pFobx->AssociatedFileObject,
290 pVBoxFobX, capFcb, pVBoxFcbX);
291 }
292 break;
293 }
294
295 /*
296 * More to read, advance page related variables and loop.
297 */
298 paPfns += cPagesInChunk;
299 cPagesLeft -= cPagesInChunk;
300 offPage = 0;
301 }
302 else if (vrc == VERR_NO_MEMORY && cMaxPages > 4)
303 {
304 /*
305 * The host probably doesn't have enough heap to handle the
306 * request, reduce the page count and retry.
307 */
308 cMaxPages /= 4;
309 Assert(cMaxPages > 0);
310 }
311 else
312 {
313 /*
314 * If we've successfully read stuff, return it rather than
315 * the error. (Not sure if this is such a great idea...)
316 */
317 if (cbRet > 0)
318 Log(("vbsfNtReadWorker: read at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, vrc, cbRet));
319 else
320 {
321 rcNt = vbsfNtVBoxStatusToNt(vrc);
322 Log(("vbsfNtReadWorker: read at %#RX64 -> %Rrc (rcNt=%#x)\n", offFile, vrc, rcNt));
323 }
324 break;
325 }
326
327 }
328
329 VbglR0PhysHeapFree(pReq);
330 }
331 else
332 rcNt = STATUS_INSUFFICIENT_RESOURCES;
333 RxContext->InformationToReturn = cbRet;
334 LogFlow(("vbsfNtReadWorker: returns %#x cbRet=%#x @ %#RX64\n",
335 rcNt, cbRet, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset));
336 return rcNt;
337}
338
339/**
340 * Wrapper for RxDispatchToWorkerThread().
341 */
342static VOID vbsfNtReadThreadWorker(VOID *pv)
343{
344 PRX_CONTEXT RxContext = (PRX_CONTEXT)pv;
345
346 Log(("VBOXSF: vbsfNtReadThreadWorker: calling the worker\n"));
347
348 RxContext->IoStatusBlock.Status = vbsfNtReadWorker(RxContext);
349
350 Log(("VBOXSF: vbsfNtReadThreadWorker: Status 0x%08X\n",
351 RxContext->IoStatusBlock.Status));
352
353 RxLowIoCompletion(RxContext);
354}
355
356/**
357 * Read stuff from a file.
358 *
359 * Prior to calling us, RDBSS will have:
360 * - Called CcFlushCache() for uncached accesses.
361 * - For non-paging access the Fcb.Header.Resource lock in shared mode in one
362 * way or another (ExAcquireResourceSharedLite,
363 * ExAcquireSharedWaitForExclusive).
364 * - For paging the FCB isn't, but the Fcb.Header.PagingResource is taken
365 * in shared mode (ExAcquireResourceSharedLite).
366 *
367 * Upon completion, it will update the file pointer if applicable. There are no
368 * EOF checks and corresponding file size updating like in the write case, so
369 * that's something we have to do ourselves it seems since the library relies on
370 * the size information to be accurate in a few places (set EOF, cached reads).
371 */
372NTSTATUS VBoxMRxRead(IN PRX_CONTEXT RxContext)
373{
374 NTSTATUS Status;
375
376 /* If synchronous operation, keep it on this thread (RDBSS already checked
377 if we've got enough stack before calling us). */
378 if (!(RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION))
379 {
380 RxContext->IoStatusBlock.Status = Status = vbsfNtReadWorker(RxContext);
381 Assert(Status != STATUS_PENDING);
382
383 Log(("VBOXSF: VBoxMRxRead: vbsfNtReadWorker: Status %#08X\n", Status));
384 }
385 else
386 {
387 Status = RxDispatchToWorkerThread(VBoxMRxDeviceObject, DelayedWorkQueue, vbsfNtReadThreadWorker, RxContext);
388
389 Log(("VBOXSF: VBoxMRxRead: RxDispatchToWorkerThread: Status 0x%08X\n", Status));
390
391 if (Status == STATUS_SUCCESS)
392 Status = STATUS_PENDING;
393 }
394
395 return Status;
396}
397
398/**
399 * Performs a write.
400 *
401 * @note Almost identical to vbsfNtReadWorker.
402 */
403static NTSTATUS vbsfNtWriteWorker(PRX_CONTEXT RxContext)
404{
405 RxCaptureFcb;
406 RxCaptureFobx;
407 PMRX_VBOX_NETROOT_EXTENSION pNetRootX = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
408 PVBSFNTFCBEXT pVBoxFcbX = VBoxMRxGetFcbExtension(capFcb);
409 PMRX_VBOX_FOBX pVBoxFobX = VBoxMRxGetFileObjectExtension(capFobx);
410 PMDL pBufferMdl = RxContext->LowIoContext.ParamsFor.ReadWrite.Buffer;
411
412 LogFlow(("vbsfNtWriteWorker: hFile=%#RX64 offFile=%#RX64 cbToWrite=%#x %s\n", pVBoxFobX->hFile,
413 RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount,
414 RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION ? " async" : "sync"));
415
416 AssertReturn(pBufferMdl, STATUS_INTERNAL_ERROR);
417
418 /*
419 * We should never get a zero byte request (RDBSS checks), but in case we
420 * do, it should succeed.
421 */
422 uint32_t cbRet = 0;
423 uint32_t cbLeft = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteCount;
424 AssertReturnStmt(cbLeft > 0, RxContext->InformationToReturn = 0, STATUS_SUCCESS);
425
426 Assert(cbLeft <= MmGetMdlByteCount(pBufferMdl));
427
428 /*
429 * Allocate a request buffer.
430 */
431 uint32_t cPagesLeft = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(pBufferMdl), cbLeft);
432 uint32_t cMaxPages = RT_MIN(cPagesLeft, VBSF_MAX_IO_PAGES);
433 VBOXSFWRITEPGLSTREQ *pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ,
434 PgLst.aPages[cMaxPages]));
435 while (!pReq && cMaxPages > 4)
436 {
437 cMaxPages /= 2;
438 pReq = (VBOXSFWRITEPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFWRITEPGLSTREQ, PgLst.aPages[cMaxPages]));
439 }
440 NTSTATUS rcNt = STATUS_SUCCESS;
441 if (pReq)
442 {
443 /*
444 * The write loop.
445 */
446 RTFOFF offFile = RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset;
447 PPFN_NUMBER paPfns = MmGetMdlPfnArray(pBufferMdl);
448 uint32_t offPage = MmGetMdlByteOffset(pBufferMdl);
449 if (offPage < PAGE_SIZE)
450 { /* likely */ }
451 else
452 {
453 paPfns += offPage >> PAGE_SHIFT;
454 offPage &= PAGE_OFFSET_MASK;
455 }
456
457 for (;;)
458 {
459 /*
460 * Figure out how much to process now and set up the page list for it.
461 */
462 uint32_t cPagesInChunk;
463 uint32_t cbChunk;
464 if (cPagesLeft <= cMaxPages)
465 {
466 cPagesInChunk = cPagesLeft;
467 cbChunk = cbLeft;
468 }
469 else
470 {
471 cPagesInChunk = cMaxPages;
472 cbChunk = (cMaxPages << PAGE_SHIFT) - offPage;
473 }
474
475 size_t iPage = cPagesInChunk;
476 while (iPage-- > 0)
477 pReq->PgLst.aPages[iPage] = (RTGCPHYS)paPfns[iPage] << PAGE_SHIFT;
478 pReq->PgLst.offFirstPage = offPage;
479
480#if 0 /* Instead we hook into write.obj's import function pointers to do this more efficiently. */
481 /*
482 * Flush and purge the cache range we're touching upon now, provided we can and
483 * really needs to. The CcCoherencyFlushAndPurgeCache API seems to work better
484 * than the CcFlushCache + CcPurgeCacheSection that RDBSS does before calling us.
485 */
486 if ( g_pfnCcCoherencyFlushAndPurgeCache
487 && !(RxContext->CurrentIrp && (RxContext->CurrentIrp->Flags & IRP_PAGING_IO))
488 && RxContext->NonPagedFcb != NULL
489 && RxContext->NonPagedFcb->SectionObjectPointers.DataSectionObject != NULL)
490 {
491 LARGE_INTEGER offFlush;
492 offFlush.QuadPart = offFile;
493 BOOLEAN fAcquiredLock = RxAcquirePagingIoResource(NULL, capFcb);
494 g_pfnCcCoherencyFlushAndPurgeCache(&RxContext->NonPagedFcb->SectionObjectPointers, &offFlush, cbChunk,
495 &RxContext->CurrentIrp->IoStatus, 0 /*fFlags*/);
496 if (fAcquiredLock)
497 { RxReleasePagingIoResource(NULL, capFcb); /* requires {} */ }
498 }
499#endif
500
501 /*
502 * Issue the request and unlock the pages.
503 */
504 int vrc = VbglR0SfHostReqWritePgLst(pNetRootX->map.root, pReq, pVBoxFobX->hFile, offFile, cbChunk, cPagesInChunk);
505 if (RT_SUCCESS(vrc))
506 {
507 /*
508 * Success, advance position and buffer.
509 */
510 uint32_t cbActual = pReq->Parms.cb32Write.u.value32;
511 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk);
512 cbRet += cbActual;
513 offFile += cbActual;
514 cbLeft -= cbActual;
515
516 /*
517 * Update timestamp state (FCB is shared).
518 */
519 pVBoxFobX->fTimestampsImplicitlyUpdated |= VBOX_FOBX_F_INFO_LASTWRITE_TIME;
520 if (pVBoxFcbX->pFobxLastWriteTime != pVBoxFobX)
521 pVBoxFcbX->pFobxLastWriteTime = NULL;
522
523 /*
524 * Are we done already?
525 */
526 if (!cbLeft || cbActual < cbChunk)
527 {
528 /*
529 * Make sure our cached file size value is up to date (RDBSS takes care
530 * of the ones in the FCB as well as the cache manager).
531 */
532 if (cbRet > 0)
533 {
534 if (pVBoxFobX->Info.cbObject < offFile)
535 pVBoxFobX->Info.cbObject = offFile;
536
537 if (pVBoxFobX->Info.cbAllocated < offFile)
538 {
539 pVBoxFobX->Info.cbAllocated = offFile;
540 pVBoxFobX->nsUpToDate = 0;
541 }
542 }
543 break;
544 }
545
546 /*
547 * More to write, advance page related variables and loop.
548 */
549 paPfns += cPagesInChunk;
550 cPagesLeft -= cPagesInChunk;
551 offPage = 0;
552 }
553 else if (vrc == VERR_NO_MEMORY && cMaxPages > 4)
554 {
555 /*
556 * The host probably doesn't have enough heap to handle the
557 * request, reduce the page count and retry.
558 */
559 cMaxPages /= 4;
560 Assert(cMaxPages > 0);
561 }
562 else
563 {
564 /*
565 * If we've successfully written stuff, return it rather than
566 * the error. (Not sure if this is such a great idea...)
567 */
568 if (cbRet > 0)
569 Log(("vbsfNtWriteWorker: write at %#RX64 -> %Rrc; got cbRet=%#zx already\n", offFile, vrc, cbRet));
570 else
571 {
572 rcNt = vbsfNtVBoxStatusToNt(vrc);
573 Log(("vbsfNtWriteWorker: write at %#RX64 -> %Rrc (rcNt=%#x)\n", offFile, vrc, rcNt));
574 }
575 break;
576 }
577
578 }
579
580 VbglR0PhysHeapFree(pReq);
581 }
582 else
583 rcNt = STATUS_INSUFFICIENT_RESOURCES;
584 RxContext->InformationToReturn = cbRet;
585 LogFlow(("vbsfNtWriteWorker: returns %#x cbRet=%#x @ %#RX64\n",
586 rcNt, cbRet, RxContext->LowIoContext.ParamsFor.ReadWrite.ByteOffset));
587 return rcNt;
588}
589
590/**
591 * Wrapper for RxDispatchToWorkerThread().
592 */
593static VOID vbsfNtWriteThreadWorker(VOID *pv)
594{
595 PRX_CONTEXT RxContext = (PRX_CONTEXT)pv;
596
597 Log(("VBOXSF: vbsfNtWriteThreadWorker: calling the worker\n"));
598
599 RxContext->IoStatusBlock.Status = vbsfNtWriteWorker(RxContext);
600
601 Log(("VBOXSF: vbsfNtWriteThreadWorker: Status 0x%08X\n",
602 RxContext->IoStatusBlock.Status));
603
604 RxLowIoCompletion(RxContext);
605}
606
607NTSTATUS VBoxMRxWrite(IN PRX_CONTEXT RxContext)
608{
609 NTSTATUS Status;
610
611 /* If synchronous operation, keep it on this thread (RDBSS already checked
612 if we've got enough stack before calling us). */
613 if (!(RxContext->Flags & RX_CONTEXT_FLAG_ASYNC_OPERATION))
614 {
615 RxContext->IoStatusBlock.Status = Status = vbsfNtWriteWorker(RxContext);
616 Assert(Status != STATUS_PENDING);
617
618 Log(("VBOXSF: VBoxMRxWrite: vbsfNtWriteWorker: Status %#08X\n", Status));
619 }
620 else
621 {
622 Status = RxDispatchToWorkerThread(VBoxMRxDeviceObject, DelayedWorkQueue, vbsfNtWriteThreadWorker, RxContext);
623
624 Log(("VBOXSF: VBoxMRxWrite: RxDispatchToWorkerThread: Status 0x%08X\n", Status));
625
626 if (Status == STATUS_SUCCESS)
627 Status = STATUS_PENDING;
628 }
629
630 return Status;
631}
632
633
634NTSTATUS VBoxMRxLocks(IN PRX_CONTEXT RxContext)
635{
636 NTSTATUS Status = STATUS_SUCCESS;
637
638 RxCaptureFcb;
639 RxCaptureFobx;
640
641 PMRX_VBOX_NETROOT_EXTENSION pNetRootExtension = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
642 PMRX_VBOX_FOBX pVBoxFobx = VBoxMRxGetFileObjectExtension(capFobx);
643
644 PLOWIO_CONTEXT LowIoContext = &RxContext->LowIoContext;
645 uint32_t fu32Lock = 0;
646 int vrc;
647
648 Log(("VBOXSF: MRxLocks: Operation %d\n",
649 LowIoContext->Operation));
650
651 switch (LowIoContext->Operation)
652 {
653 default:
654 AssertMsgFailed(("VBOXSF: MRxLocks: Unsupported lock/unlock type %d detected!\n",
655 LowIoContext->Operation));
656 return STATUS_NOT_IMPLEMENTED;
657
658 case LOWIO_OP_UNLOCK_MULTIPLE:
659 /** @todo Remove multiple locks listed in LowIoContext.ParamsFor.Locks.LockList. */
660 Log(("VBOXSF: MRxLocks: Unsupported LOWIO_OP_UNLOCK_MULTIPLE!\n",
661 LowIoContext->Operation));
662 return STATUS_NOT_IMPLEMENTED;
663
664 case LOWIO_OP_SHAREDLOCK:
665 fu32Lock = SHFL_LOCK_SHARED | SHFL_LOCK_PARTIAL;
666 break;
667
668 case LOWIO_OP_EXCLUSIVELOCK:
669 fu32Lock = SHFL_LOCK_EXCLUSIVE | SHFL_LOCK_PARTIAL;
670 break;
671
672 case LOWIO_OP_UNLOCK:
673 fu32Lock = SHFL_LOCK_CANCEL | SHFL_LOCK_PARTIAL;
674 break;
675 }
676
677 if (LowIoContext->ParamsFor.Locks.Flags & LOWIO_LOCKSFLAG_FAIL_IMMEDIATELY)
678 fu32Lock |= SHFL_LOCK_NOWAIT;
679 else
680 fu32Lock |= SHFL_LOCK_WAIT;
681
682 vrc = VbglR0SfLock(&g_SfClient, &pNetRootExtension->map, pVBoxFobx->hFile,
683 LowIoContext->ParamsFor.Locks.ByteOffset, LowIoContext->ParamsFor.Locks.Length, fu32Lock);
684
685 Status = vbsfNtVBoxStatusToNt(vrc);
686
687 Log(("VBOXSF: MRxLocks: Returned 0x%08X\n", Status));
688 return Status;
689}
690
691NTSTATUS VBoxMRxCompleteBufferingStateChangeRequest(IN OUT PRX_CONTEXT RxContext, IN OUT PMRX_SRV_OPEN SrvOpen,
692 IN PVOID pvContext)
693{
694 RT_NOREF(RxContext, SrvOpen, pvContext);
695 Log(("VBOXSF: MRxCompleteBufferingStateChangeRequest: not implemented\n"));
696 return STATUS_NOT_IMPLEMENTED;
697}
698
699NTSTATUS VBoxMRxFlush (IN PRX_CONTEXT RxContext)
700{
701 NTSTATUS Status = STATUS_SUCCESS;
702
703 RxCaptureFcb;
704 RxCaptureFobx;
705
706 PMRX_VBOX_NETROOT_EXTENSION pNetRootExtension = VBoxMRxGetNetRootExtension(capFcb->pNetRoot);
707 PMRX_VBOX_FOBX pVBoxFobx = VBoxMRxGetFileObjectExtension(capFobx);
708
709 int vrc;
710
711 Log(("VBOXSF: MRxFlush\n"));
712
713 /* Do the actual flushing of file buffers */
714 vrc = VbglR0SfFlush(&g_SfClient, &pNetRootExtension->map, pVBoxFobx->hFile);
715
716 Status = vbsfNtVBoxStatusToNt(vrc);
717
718 Log(("VBOXSF: MRxFlush: Returned 0x%08X\n", Status));
719 return Status;
720}
721
722/** See PMRX_EXTENDFILE_CALLDOWN in ddk/mrx.h
723 *
724 * Documentation says it returns STATUS_SUCCESS on success and an error
725 * status on failure, so the ULONG return type is probably just a typo that
726 * stuck.
727 */
728ULONG NTAPI VBoxMRxExtendStub(IN OUT struct _RX_CONTEXT * RxContext, IN OUT PLARGE_INTEGER pNewFileSize,
729 OUT PLARGE_INTEGER pNewAllocationSize)
730{
731 RT_NOREF(RxContext);
732
733 /* Note: On Windows hosts vbsfNtSetEndOfFile returns ACCESS_DENIED if the file has been
734 * opened in APPEND mode. Writes to a file will extend it anyway, therefore it is
735 * better to not call the host at all and tell the caller that the file was extended.
736 */
737 Log(("VBOXSF: MRxExtendStub: new size = %RX64\n",
738 pNewFileSize->QuadPart));
739
740 pNewAllocationSize->QuadPart = pNewFileSize->QuadPart;
741
742 return STATUS_SUCCESS;
743}
744
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette