VirtualBox

source: vbox/trunk/src/VBox/Additions/darwin/VBoxSF/VBoxSF-VNodeOps.cpp@ 85416

Last change on this file since 85416 was 82968, checked in by vboxsync, 5 years ago

Copyright year updates by scm.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 27.7 KB
Line 
1/* $Id: VBoxSF-VNodeOps.cpp 82968 2020-02-04 10:35:17Z vboxsync $ */
2/** @file
3 * VBoxSF - Darwin Shared Folders, VNode Operations.
4 */
5
6
7/*
8 * Copyright (C) 2013-2020 Oracle Corporation
9 *
10 * This file is part of VirtualBox Open Source Edition (OSE), as
11 * available from http://www.virtualbox.org. This file is free software;
12 * you can redistribute it and/or modify it under the terms of the GNU
13 * General Public License (GPL) as published by the Free Software
14 * Foundation, in version 2 as it comes in the "COPYING" file of the
15 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
16 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
17 */
18
19
20/*********************************************************************************************************************************
21* Header Files *
22*********************************************************************************************************************************/
23#define LOG_GROUP LOG_GROUP_SHARED_FOLDERS
24#include "VBoxSFInternal.h"
25
26#include <iprt/mem.h>
27#include <iprt/assert.h>
28#include <VBox/log.h>
29
30
31/*********************************************************************************************************************************
32* Structures and Typedefs *
33*********************************************************************************************************************************/
34struct default_error_args_hack
35{
36 struct default_error_vdesc_hack
37 {
38 int vdesc_offset;
39 const char *vdesc_name;
40 } const *a_desc;
41};
42
43
44
45/**
46 * Default implementation that returns ENOTSUP.
47 */
48static int vboxSfDwnVnDefaultError(struct default_error_args_hack *pArgs)
49{
50 Log(("vboxSfDwnVnDefaultError: %s\n", RT_VALID_PTR(pArgs) && RT_VALID_PTR(pArgs->a_desc) ? pArgs->a_desc->vdesc_name : "??"));
51 RT_NOREF(pArgs);
52 return ENOTSUP;
53}
54
55
56static int vboxFsDwnVnGetAttr(struct vnop_getattr_args *pArgs)
57{
58#if 1
59 RT_NOREF(pArgs);
60 return ENOTSUP;
61#else
62
63 vboxvfs_mount_t *pMount;
64 struct vnode_attr *vnode_args;
65 vboxvfs_vnode_t *pVnodeData;
66
67 struct timespec timespec;
68
69 SHFLFSOBJINFO Info;
70 mount_t mp;
71 vnode_t vnode;
72 int rc;
73
74 PDEBUG("Getting vnode attribute...");
75
76 AssertReturn(pArgs, EINVAL);
77
78 vnode = pArgs->a_vp; AssertReturn(vnode, EINVAL);
79 vnode_args = pArgs->a_vap; AssertReturn(vnode_args, EINVAL);
80 mp = vnode_mount(vnode); AssertReturn(mp, EINVAL);
81 pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL);
82 pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL);
83
84 lck_rw_lock_shared(pVnodeData->pLock);
85
86 rc = vboxvfs_get_info_internal(mp, pVnodeData->pPath, &Info);
87 if (rc == 0)
88 {
89 /* Set timestamps */
90 RTTimeSpecGetTimespec(&Info.BirthTime, &timespec); VATTR_RETURN(vnode_args, va_create_time, timespec);
91 RTTimeSpecGetTimespec(&Info.AccessTime, &timespec); VATTR_RETURN(vnode_args, va_access_time, timespec);
92 RTTimeSpecGetTimespec(&Info.ModificationTime, &timespec); VATTR_RETURN(vnode_args, va_modify_time, timespec);
93 RTTimeSpecGetTimespec(&Info.ChangeTime, &timespec); VATTR_RETURN(vnode_args, va_change_time, timespec);
94 VATTR_CLEAR_ACTIVE(vnode_args, va_backup_time);
95
96 /* Set owner info. */
97 VATTR_RETURN(vnode_args, va_uid, pMount->owner);
98 VATTR_CLEAR_ACTIVE(vnode_args, va_gid);
99
100 /* Access mode and flags */
101 VATTR_RETURN(vnode_args, va_mode, vboxvfs_h2g_mode_inernal(Info.Attr.fMode));
102 VATTR_RETURN(vnode_args, va_flags, Info.Attr.u.Unix.fFlags);
103
104 /* The current generation number (0 if this information is not available) */
105 VATTR_RETURN(vnode_args, va_gen, Info.Attr.u.Unix.GenerationId);
106
107 VATTR_RETURN(vnode_args, va_rdev, 0);
108 VATTR_RETURN(vnode_args, va_nlink, 2);
109
110 VATTR_RETURN(vnode_args, va_data_size, sizeof(struct dirent)); /* Size of data returned per each readdir() request */
111
112 /* Hope, when it overflows nothing catastrophical will heppen! If we will not assign
113 * a uniq va_fileid to each vnode, `ls`, 'find' (and simmilar tools that uses fts_read() calls) will think that
114 * each sub-directory is self-cycled. */
115 VATTR_RETURN(vnode_args, va_fileid, (pMount->cFileIdCounter++));
116
117 /* Not supported */
118 VATTR_CLEAR_ACTIVE(vnode_args, va_linkid);
119 VATTR_CLEAR_ACTIVE(vnode_args, va_parentid);
120 VATTR_CLEAR_ACTIVE(vnode_args, va_fsid);
121 VATTR_CLEAR_ACTIVE(vnode_args, va_filerev);
122
123 /* Not present on 10.6 */
124 //VATTR_CLEAR_ACTIVE(vnode_args, va_addedtime);
125
126 /** @todo take care about va_encoding (file name encoding) */
127 VATTR_CLEAR_ACTIVE(vnode_args, va_encoding);
128 /** @todo take care about: va_acl */
129 VATTR_CLEAR_ACTIVE(vnode_args, va_acl);
130
131 VATTR_CLEAR_ACTIVE(vnode_args, va_name);
132 VATTR_CLEAR_ACTIVE(vnode_args, va_uuuid);
133 VATTR_CLEAR_ACTIVE(vnode_args, va_guuid);
134
135 VATTR_CLEAR_ACTIVE(vnode_args, va_total_size);
136 VATTR_CLEAR_ACTIVE(vnode_args, va_total_alloc);
137 VATTR_CLEAR_ACTIVE(vnode_args, va_data_alloc);
138 VATTR_CLEAR_ACTIVE(vnode_args, va_iosize);
139
140 VATTR_CLEAR_ACTIVE(vnode_args, va_nchildren);
141 VATTR_CLEAR_ACTIVE(vnode_args, va_dirlinkcount);
142 }
143 else
144 {
145 PDEBUG("getattr: unable to get VBoxVFS object info");
146 }
147
148 lck_rw_unlock_shared(pVnodeData->pLock);
149
150 return rc;
151#endif
152}
153
154#if 0
155/**
156 * Helper function for vboxvfs_vnode_lookup(): create new vnode.
157 */
158static int
159vboxvfs_vnode_lookup_instantinate_vnode(vnode_t parent_vnode, char *entry_name, vnode_t *result_vnode)
160{
161 /* We need to construct full path to vnode in order to get
162 * vboxvfs_get_info_internal() to understand us! */
163
164 char *pszCurDirPath;
165 int cbCurDirPath = MAXPATHLEN;
166
167 mount_t mp = vnode_mount(parent_vnode); AssertReturn(mp, EINVAL);
168 vnode_t vnode;
169
170 int rc;
171
172 pszCurDirPath = (char *)RTMemAllocZ(cbCurDirPath);
173 if (pszCurDirPath)
174 {
175 rc = vn_getpath(parent_vnode, pszCurDirPath, &cbCurDirPath);
176 if (rc == 0 && cbCurDirPath < MAXPATHLEN)
177 {
178 SHFLFSOBJINFO Info;
179 PSHFLSTRING pSHFLPath;
180
181 /* Add '/' between path parts and truncate name if it is too long */
182 strncat(pszCurDirPath, "/", 1); strncat(pszCurDirPath, entry_name, MAXPATHLEN - cbCurDirPath - 1);
183
184 rc = vboxvfs_guest_path_to_shflstring_path_internal(mp, pszCurDirPath, strlen(pszCurDirPath) + 1, &pSHFLPath);
185 if (rc == 0)
186 {
187 rc = vboxvfs_get_info_internal(mp, pSHFLPath, (PSHFLFSOBJINFO)&Info);
188 if (rc == 0)
189 {
190 enum vtype type;
191
192 if (RTFS_IS_DIRECTORY(Info.Attr.fMode)) type = VDIR;
193 else if (RTFS_IS_FILE (Info.Attr.fMode)) type = VREG;
194 else
195 {
196 PDEBUG("Not supported VFS object (%s) type: mode 0x%X",
197 entry_name,
198 Info.Attr.fMode);
199
200 RTMemFree(pszCurDirPath);
201 vboxvfs_put_path_internal((void **)&pSHFLPath);
202 return ENOENT;
203 }
204 /* Create new vnode */
205 rc = vboxvfs_create_vnode_internal(mp, type, parent_vnode, FALSE, pSHFLPath, &vnode);
206 if (rc == 0)
207 {
208 PDEBUG("new vnode object '%s' has been created", entry_name);
209
210 *result_vnode = vnode;
211 RTMemFree(pszCurDirPath);
212
213 return 0;
214 }
215 else
216 PDEBUG("Unable to create vnode: %d", rc);
217 }
218 else
219 PDEBUG("Unable to get host object info: %d", rc);
220
221 vboxvfs_put_path_internal((void **)&pSHFLPath);
222 }
223 else
224 PDEBUG("Unable to convert guest<->host path");
225 }
226 else
227 PDEBUG("Unable to construct vnode path: %d", rc);
228
229 RTMemFree(pszCurDirPath);
230 }
231 else
232 {
233 PDEBUG("Unable to allocate memory for path buffer");
234 rc = ENOMEM;
235 }
236
237 return rc;
238}
239
240/**
241 * Helper function for vboxvfs_vnode_lookup(): take care
242 * about '.' and '..' directory entries.
243 */
244static int
245vboxvfs_vnode_lookup_dot_handler(struct vnop_lookup_args *pArgs, vnode_t *result_vnode)
246{
247 vnode_t vnode = NULL;
248
249 if (pArgs->a_cnp->cn_flags & ISDOTDOT)
250 {
251 vnode = vnode_getparent(pArgs->a_dvp);
252 if (vnode)
253 {
254 PDEBUG("return parent directory");
255 *result_vnode = vnode;
256 return 0;
257 }
258 else
259 {
260 PDEBUG("return parent directory not found, return current directory");
261 *result_vnode = pArgs->a_dvp;
262 return 0;
263 }
264 }
265 else if ((strncmp(pArgs->a_cnp->cn_nameptr, ".", 1) == 0) &&
266 pArgs->a_cnp->cn_namelen == 1)
267 {
268 PDEBUG("return current directory");
269 *result_vnode = pArgs->a_dvp;
270 return 0;
271 }
272
273 return ENOENT;
274}
275#endif
276
277static int vboxSfDwnVnLookup(struct vnop_lookup_args *pArgs)
278{
279#if 1
280 RT_NOREF(pArgs);
281 return ENOTSUP;
282#else
283 int rc;
284
285 vnode_t vnode;
286 vboxvfs_vnode_t *pVnodeData;
287
288 PDEBUG("Looking up for vnode...");
289
290 AssertReturn(pArgs, EINVAL);
291 AssertReturn(pArgs->a_dvp, EINVAL);
292 AssertReturn(vnode_isdir(pArgs->a_dvp), EINVAL);
293 AssertReturn(pArgs->a_cnp, EINVAL);
294 AssertReturn(pArgs->a_cnp->cn_nameptr, EINVAL);
295 AssertReturn(pArgs->a_vpp, EINVAL);
296
297 pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(pArgs->a_dvp);
298 AssertReturn(pVnodeData, EINVAL);
299 AssertReturn(pVnodeData->pLock, EINVAL);
300
301 /*
302 todo: take care about pArgs->a_cnp->cn_nameiop
303 */
304
305 if (pArgs->a_cnp->cn_nameiop == LOOKUP) PDEBUG("LOOKUP");
306 else if (pArgs->a_cnp->cn_nameiop == CREATE) PDEBUG("CREATE");
307 else if (pArgs->a_cnp->cn_nameiop == RENAME) PDEBUG("RENAME");
308 else if (pArgs->a_cnp->cn_nameiop == DELETE) PDEBUG("DELETE");
309 else PDEBUG("Unknown cn_nameiop: 0x%X", (int)pArgs->a_cnp->cn_nameiop);
310
311 lck_rw_lock_exclusive(pVnodeData->pLock);
312
313 /* Take care about '.' and '..' entries */
314 if (vboxvfs_vnode_lookup_dot_handler(pArgs, &vnode) == 0)
315 {
316 vnode_get(vnode);
317 *pArgs->a_vpp = vnode;
318
319 lck_rw_unlock_exclusive(pVnodeData->pLock);
320
321 return 0;
322 }
323
324 /* Look into VFS cache and attempt to find previously allocated vnode there. */
325 rc = cache_lookup(pArgs->a_dvp, &vnode, pArgs->a_cnp);
326 if (rc == -1) /* Record found */
327 {
328 PDEBUG("Found record in VFS cache");
329
330 /* Check if VFS object still exist on a host side */
331 if (vboxvfs_exist_internal(vnode))
332 {
333 /* Prepare & return cached vnode */
334 vnode_get(vnode);
335 *pArgs->a_vpp = vnode;
336
337 rc = 0;
338 }
339 else
340 {
341 /* If vnode exist in guets VFS cache, but not exist on a host -- just forget it. */
342 cache_purge(vnode);
343 /** @todo free vnode data here */
344 rc = ENOENT;
345 }
346 }
347 else
348 {
349 PDEBUG("cache_lookup() returned %d, create new VFS vnode", rc);
350
351 rc = vboxvfs_vnode_lookup_instantinate_vnode(pArgs->a_dvp, pArgs->a_cnp->cn_nameptr, &vnode);
352 if (rc == 0)
353 {
354 cache_enter(pArgs->a_dvp, vnode, pArgs->a_cnp);
355 *pArgs->a_vpp = vnode;
356 }
357 else
358 {
359 rc = ENOENT;
360 }
361 }
362
363 lck_rw_unlock_exclusive(pVnodeData->pLock);
364
365 return rc;
366#endif
367}
368
369static int vboxSfDwnVnOpen(struct vnop_open_args *pArgs)
370{
371#if 1
372 RT_NOREF(pArgs);
373 return ENOTSUP;
374#else
375 vnode_t vnode;
376 vboxvfs_vnode_t *pVnodeData;
377 uint32_t fHostFlags;
378 mount_t mp;
379 vboxvfs_mount_t *pMount;
380
381 int rc;
382
383 PDEBUG("Opening vnode...");
384
385 AssertReturn(pArgs, EINVAL);
386
387 vnode = pArgs->a_vp; AssertReturn(vnode, EINVAL);
388 pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL);
389 mp = vnode_mount(vnode); AssertReturn(mp, EINVAL);
390 pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL);
391
392 lck_rw_lock_exclusive(pVnodeData->pLock);
393
394 if (vnode_isinuse(vnode, 0))
395 {
396 PDEBUG("vnode '%s' (handle 0x%X) already has VBoxVFS object handle assigned, just return ok",
397 (char *)pVnodeData->pPath->String.utf8,
398 (int)pVnodeData->pHandle);
399
400 lck_rw_unlock_exclusive(pVnodeData->pLock);
401 return 0;
402 }
403
404 /* At this point we must make sure that nobody is using VBoxVFS object handle */
405 //if (pVnodeData->Handle != SHFL_HANDLE_NIL)
406 //{
407 // PDEBUG("vnode has active VBoxVFS object handle set, aborting");
408 // lck_rw_unlock_exclusive(pVnodeData->pLock);
409 // return EINVAL;
410 //}
411
412 fHostFlags = vboxvfs_g2h_mode_inernal(pArgs->a_mode);
413 fHostFlags |= (vnode_isdir(vnode) ? SHFL_CF_DIRECTORY : 0);
414
415 SHFLHANDLE Handle;
416 rc = vboxvfs_open_internal(pMount, pVnodeData->pPath, fHostFlags, &Handle);
417 if (rc == 0)
418 {
419 PDEBUG("Open success: '%s' (handle 0x%X)",
420 (char *)pVnodeData->pPath->String.utf8,
421 (int)Handle);
422
423 pVnodeData->pHandle = Handle;
424 }
425 else
426 {
427 PDEBUG("Unable to open: '%s': %d",
428 (char *)pVnodeData->pPath->String.utf8,
429 rc);
430 }
431
432 lck_rw_unlock_exclusive(pVnodeData->pLock);
433
434 return rc;
435#endif
436}
437
438static int vboxSfDwnVnClose(struct vnop_close_args *pArgs)
439{
440#if 1
441 RT_NOREF(pArgs);
442 return ENOTSUP;
443#else
444
445 vnode_t vnode;
446 mount_t mp;
447 vboxvfs_vnode_t *pVnodeData;
448 vboxvfs_mount_t *pMount;
449
450 int rc;
451
452 PDEBUG("Closing vnode...");
453
454 AssertReturn(pArgs, EINVAL);
455
456 vnode = pArgs->a_vp; AssertReturn(vnode, EINVAL);
457 pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL);
458 mp = vnode_mount(vnode); AssertReturn(mp, EINVAL);
459 pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL);
460
461 lck_rw_lock_exclusive(pVnodeData->pLock);
462
463 if (vnode_isinuse(vnode, 0))
464 {
465 PDEBUG("vnode '%s' (handle 0x%X) is still in use, just return ok",
466 (char *)pVnodeData->pPath->String.utf8,
467 (int)pVnodeData->pHandle);
468
469 lck_rw_unlock_exclusive(pVnodeData->pLock);
470 return 0;
471 }
472
473 /* At this point we must make sure that vnode has VBoxVFS object handle assigned */
474 if (pVnodeData->pHandle == SHFL_HANDLE_NIL)
475 {
476 PDEBUG("vnode has no active VBoxVFS object handle set, aborting");
477 lck_rw_unlock_exclusive(pVnodeData->pLock);
478 return EINVAL;
479 }
480
481 rc = vboxvfs_close_internal(pMount, pVnodeData->pHandle);
482 if (rc == 0)
483 {
484 PDEBUG("Close success: '%s' (handle 0x%X)",
485 (char *)pVnodeData->pPath->String.utf8,
486 (int)pVnodeData->pHandle);
487
488 /* Forget about previously assigned VBoxVFS object handle */
489 pVnodeData->pHandle = SHFL_HANDLE_NIL;
490 }
491 else
492 {
493 PDEBUG("Unable to close: '%s' (handle 0x%X): %d",
494 (char *)pVnodeData->pPath->String.utf8,
495 (int)pVnodeData->pHandle, rc);
496 }
497
498 lck_rw_unlock_exclusive(pVnodeData->pLock);
499
500 return rc;
501#endif
502}
503
504#if 0
505/**
506 * Convert SHFLDIRINFO to struct dirent and copy it back to user.
507 */
508static int
509vboxvfs_vnode_readdir_copy_data(ino_t index, SHFLDIRINFO *Info, struct uio *uio, int *numdirent)
510{
511 struct dirent entry;
512
513 int rc;
514
515 entry.d_ino = index;
516 entry.d_reclen = (__uint16_t)sizeof(entry);
517
518 /* Detect dir entry type */
519 if (RTFS_IS_DIRECTORY(Info->Info.Attr.fMode))
520 entry.d_type = DT_DIR;
521 else if (RTFS_IS_FILE(Info->Info.Attr.fMode))
522 entry.d_type = DT_REG;
523 else
524 {
525 PDEBUG("Unknown type of host file: mode 0x%X", (int)Info->Info.Attr.fMode);
526 return ENOTSUP;
527 }
528
529 entry.d_namlen = (__uint8_t)min(sizeof(entry.d_name), Info->name.u16Size);
530 memcpy(entry.d_name, Info->name.String.utf8, entry.d_namlen);
531
532 rc = uiomove((char *)&entry, sizeof(entry), uio);
533 if (rc == 0)
534 {
535 uio_setoffset(uio, index * sizeof(struct dirent));
536 *numdirent = (int)index;
537
538 PDEBUG("discovered entry: '%s' (%d bytes), item #%d", entry.d_name, (int)entry.d_namlen, (int)index);
539 }
540 else
541 {
542 PDEBUG("Failed to return dirent data item #%d (%d)", (int)index, rc);
543 }
544
545 return rc;
546}
547#endif
548
549static int vboxSfDwnVnReadDir(struct vnop_readdir_args *pArgs)
550{
551#if 1
552 RT_NOREF(pArgs);
553 return ENOTSUP;
554#else
555 vboxvfs_mount_t *pMount;
556 vboxvfs_vnode_t *pVnodeData;
557 SHFLDIRINFO *Info;
558 uint32_t cbInfo;
559 mount_t mp;
560 vnode_t vnode;
561 struct uio *uio;
562
563 int rc = 0, rc2;
564
565 PDEBUG("Reading directory...");
566
567 AssertReturn(pArgs, EINVAL);
568 AssertReturn(pArgs->a_eofflag, EINVAL);
569 AssertReturn(pArgs->a_numdirent, EINVAL);
570
571 uio = pArgs->a_uio; AssertReturn(uio, EINVAL);
572 vnode = pArgs->a_vp; AssertReturn(vnode, EINVAL); AssertReturn(vnode_isdir(vnode), EINVAL);
573 pVnodeData = (vboxvfs_vnode_t *)vnode_fsnode(vnode); AssertReturn(pVnodeData, EINVAL);
574 mp = vnode_mount(vnode); AssertReturn(mp, EINVAL);
575 pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL);
576
577 lck_rw_lock_shared(pVnodeData->pLock);
578
579 cbInfo = sizeof(Info) + MAXPATHLEN;
580 Info = (SHFLDIRINFO *)RTMemAllocZ(cbInfo);
581 if (!Info)
582 {
583 PDEBUG("No memory to allocate internal data");
584 lck_rw_unlock_shared(pVnodeData->pLock);
585 return ENOMEM;
586 }
587
588 uint32_t index = (uint32_t)uio_offset(uio) / (uint32_t)sizeof(struct dirent);
589 uint32_t cFiles = 0;
590
591 PDEBUG("Exploring VBoxVFS directory (%s), handle (0x%.8X), offset (0x%X), count (%d)", (char *)pVnodeData->pPath->String.utf8, (int)pVnodeData->pHandle, index, uio_iovcnt(uio));
592
593 /* Currently, there is a problem when VbglR0SfDirInfo() is not able to
594 * continue retrieve directory content if the same VBoxVFS handle is used.
595 * This macro forces to use a new handle in readdir() callback. If enabled,
596 * the original handle (obtained in open() callback is ignored). */
597
598 SHFLHANDLE Handle;
599 rc = vboxvfs_open_internal(pMount,
600 pVnodeData->pPath,
601 SHFL_CF_DIRECTORY | SHFL_CF_ACCESS_READ | SHFL_CF_ACT_OPEN_IF_EXISTS | SHFL_CF_ACT_FAIL_IF_NEW,
602 &Handle);
603 if (rc != 0)
604 {
605 PDEBUG("Unable to open dir: %d", rc);
606 RTMemFree(Info);
607 lck_rw_unlock_shared(pVnodeData->pLock);
608 return rc;
609 }
610
611#if 0
612 rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0, 0, index, &cbInfo, (PSHFLDIRINFO)Info, &cFiles);
613#else
614 SHFLSTRING *pMask = vboxvfs_construct_shflstring("*", strlen("*"));
615 if (pMask)
616 {
617 for (uint32_t cSkip = 0; (cSkip < index + 1) && (rc == VINF_SUCCESS); cSkip++)
618 {
619 //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, 0 /* pMask */, 0 /* SHFL_LIST_RETURN_ONE */, 0, &cbInfo, (PSHFLDIRINFO)Info, &cFiles);
620
621 uint32_t cbReturned = cbInfo;
622 //rc = VbglR0SfDirInfo(&g_vboxSFClient, &pMount->pMap, Handle, pMask, SHFL_LIST_RETURN_ONE, 0, &cbReturned, (PSHFLDIRINFO)Info, &cFiles);
623 rc = VbglR0SfDirInfo(&g_SfClientDarwin, &pMount->pMap, Handle, 0, SHFL_LIST_RETURN_ONE, 0,
624 &cbReturned, (PSHFLDIRINFO)Info, &cFiles);
625
626 }
627
628 PDEBUG("read %d files", cFiles);
629 RTMemFree(pMask);
630 }
631 else
632 {
633 PDEBUG("Can't alloc mask");
634 rc = ENOMEM;
635 }
636#endif
637 rc2 = vboxvfs_close_internal(pMount, Handle);
638 if (rc2 != 0)
639 {
640 PDEBUG("Unable to close directory: %s: %d",
641 pVnodeData->pPath->String.utf8,
642 rc2);
643 }
644
645 switch (rc)
646 {
647 case VINF_SUCCESS:
648 {
649 rc = vboxvfs_vnode_readdir_copy_data((ino_t)(index + 1), Info, uio, pArgs->a_numdirent);
650 break;
651 }
652
653 case VERR_NO_MORE_FILES:
654 {
655 PDEBUG("No more entries in directory");
656 *(pArgs->a_eofflag) = 1;
657 break;
658 }
659
660 default:
661 {
662 PDEBUG("VbglR0SfDirInfo() for item #%d has failed: %d", (int)index, (int)rc);
663 rc = EINVAL;
664 break;
665 }
666 }
667
668 RTMemFree(Info);
669 lck_rw_unlock_shared(pVnodeData->pLock);
670
671 return rc;
672#endif
673}
674
675
676static int vboxSfDwnVnPathConf(struct vnop_pathconf_args *pArgs)
677{
678 Log(("vboxSfDwnVnPathConf:\n"));
679 RT_NOREF(pArgs);
680 return 0;
681}
682
683
684/**
685 * vnop_reclaim implementation.
686 *
687 * VBoxVFS reclaim callback.
688 * Called when vnode is going to be deallocated. Should release
689 * all the VBoxVFS resources that correspond to current vnode object.
690 *
691 * @param pArgs Operation arguments passed from VFS layer.
692 *
693 * @return 0 on success, BSD error code otherwise.
694 */
695static int vboxSfDwnVnReclaim(struct vnop_reclaim_args *pArgs)
696{
697 AssertReturn(pArgs && pArgs->a_vp, EINVAL);
698
699 /* Check that it's not a root node that's in use. */
700 PVBOXSFMNTDATA pMntData = (PVBOXSFMNTDATA)vfs_fsprivate(vnode_mount(pArgs->a_vp));
701 AssertReturn(!pMntData || pMntData->pVnRoot != pArgs->a_vp, EBUSY);
702
703 /* Get the private data and free it. */
704 PVBOXSFDWNVNDATA pVnData = (PVBOXSFDWNVNDATA)vnode_fsnode(pArgs->a_vp);
705 AssertPtrReturn(pVnData, 0);
706
707 if (pVnData->hHandle != SHFL_HANDLE_NIL)
708 {
709 /** @todo can this happen? */
710 pVnData->hHandle = SHFL_HANDLE_NIL;
711 }
712
713 RTMemFree(pVnData);
714 return 0;
715}
716
717
718/**
719 * Allocates a vnode.
720 *
721 * @returns Pointer to the new VNode, NULL if out of memory.
722 * @param pMount The file system mount structure.
723 * @param enmType The vnode type.
724 * @param pParent The parent vnode, NULL if root.
725 * @param cbFile The file size
726 */
727vnode_t vboxSfDwnVnAlloc(mount_t pMount, enum vtype enmType, vnode_t pParent, uint64_t cbFile)
728{
729 /*
730 * Create our private data.
731 */
732 PVBOXSFDWNVNDATA pVnData = (PVBOXSFDWNVNDATA)RTMemAllocZ(sizeof(*pVnData));
733 if (pVnData)
734 {
735 pVnData->hHandle = SHFL_HANDLE_NIL;
736
737 struct vnode_fsparam VnParms;
738 RT_ZERO(VnParms);
739 VnParms.vnfs_mp = pMount;
740 VnParms.vnfs_vtype = enmType;
741 VnParms.vnfs_str = "vboxsf";
742 VnParms.vnfs_dvp = pParent;
743 VnParms.vnfs_fsnode = pVnData;
744 VnParms.vnfs_vops = g_papfnVBoxSfDwnVnDirOpsVector;
745 VnParms.vnfs_markroot = pParent == NULL;
746 VnParms.vnfs_marksystem = 0;
747 VnParms.vnfs_rdev = 0;
748 VnParms.vnfs_filesize = cbFile;
749 VnParms.vnfs_cnp = 0;
750 VnParms.vnfs_flags = VNFS_NOCACHE;
751
752 vnode_t pVnRet;
753 int rc = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &VnParms, &pVnRet);
754 if (rc == 0)
755 return pVnRet;
756 RTMemFree(pVnData);
757 }
758 printf("vboxSfDwnVnAlloc: out of memory!\n");
759 return NULL;
760}
761
762
763/**
764 * Vnode operations.
765 */
766static struct vnodeopv_entry_desc g_VBoxSfDirOpsDescList[] =
767{
768#define VNODEOPFUNC int(*)(void *)
769 { &vnop_default_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
770 //{ &vnop_access_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - probably not needed.
771 //{ &vnop_advlock_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - later.
772 //{ &vnop_allocate_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - maybe, need shfl function
773 { &vnop_blktooff_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
774 //{ &vnop_blockmap_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
775 //{ &vnop_bwrite_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
776 { &vnop_close_desc, (VNODEOPFUNC)vboxSfDwnVnClose },
777 //{ &vnop_copyfile_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
778 { &vnop_create_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
779 //{ &vnop_exchange_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
780 { &vnop_fsync_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
781 { &vnop_getattr_desc, (VNODEOPFUNC)vboxFsDwnVnGetAttr },
782 //{ &vnop_getnamedstream_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
783 //{ &vnop_getxattr_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
784 { &vnop_inactive_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
785 { &vnop_ioctl_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
786 { &vnop_link_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
787 //{ &vnop_listxattr_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
788 { &vnop_lookup_desc, (VNODEOPFUNC)vboxSfDwnVnLookup },
789 { &vnop_mkdir_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
790 { &vnop_mknod_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
791 { &vnop_mmap_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
792 { &vnop_mnomap_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
793 { &vnop_offtoblk_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
794 { &vnop_open_desc, (VNODEOPFUNC)vboxSfDwnVnOpen },
795 { &vnop_pagein_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
796 { &vnop_pageout_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
797 { &vnop_pathconf_desc, (VNODEOPFUNC)vboxSfDwnVnPathConf },
798 /* { &vnop_print_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, undefined in ML */
799 { &vnop_read_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
800 { &vnop_readdir_desc, (VNODEOPFUNC)vboxSfDwnVnReadDir },
801 //{ &vnop_readdirattr_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - hfs specific.
802 { &vnop_readlink_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
803 { &vnop_reclaim_desc, (VNODEOPFUNC)vboxSfDwnVnReclaim },
804 { &vnop_remove_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
805 //{ &vnop_removexattr_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
806 { &vnop_rename_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
807 //{ &vnop_revoke_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - not needed
808 { &vnop_rmdir_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
809 { &vnop_searchfs_desc, (VNODEOPFUNC)err_searchfs },
810 //{ &vnop_select_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - not needed
811 { &vnop_setattr_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
812 { &vnop_setxattr_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
813 //{ &vnop_strategy_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - not needed
814 { &vnop_symlink_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
815 /* { &vnop_truncate_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, undefined in ML */
816 //{ &vnop_whiteout_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError }, - not needed/supported
817 { &vnop_write_desc, (VNODEOPFUNC)vboxSfDwnVnDefaultError },
818 { NULL, (VNODEOPFUNC)NULL },
819#undef VNODEOPFUNC
820};
821
822/** ??? */
823int (**g_papfnVBoxSfDwnVnDirOpsVector)(void *);
824
825/**
826 * VNode operation descriptors.
827 */
828struct vnodeopv_desc g_VBoxSfVnodeOpvDesc =
829{
830 &g_papfnVBoxSfDwnVnDirOpsVector,
831 g_VBoxSfDirOpsDescList
832};
833
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette