Changeset 77138 in vbox for trunk/src/VBox/Additions/linux
- Timestamp:
- Feb 1, 2019 7:00:23 PM (6 years ago)
- Location:
- trunk/src/VBox/Additions/linux/sharedfolders
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/mount.vboxsf.c
r76553 r77138 97 97 typedef enum handler_opt 98 98 { 99 HORW, 100 HORO, 101 HOUID, 102 HOGID, 103 HOTTL, 104 HODMODE, 105 HOFMODE, 106 HOUMASK, 107 HODMASK, 108 HOFMASK, 109 HOIOCHARSET, 110 HOCONVERTCP, 111 HONOEXEC, 112 HOEXEC, 113 HONODEV, 114 HODEV, 115 HONOSUID, 116 HOSUID, 117 HOREMOUNT, 118 HONOAUTO, 119 HONIGNORE 99 HO_RW, 100 HO_RO, 101 HO_UID, 102 HO_GID, 103 HO_TTL, 104 HO_DMODE, 105 HO_FMODE, 106 HO_UMASK, 107 HO_DMASK, 108 HO_FMASK, 109 HO_IOCHARSET, 110 HO_CONVERTCP, 111 HO_MAX_IO_PAGES, 112 HO_NOEXEC, 113 HO_EXEC, 114 HO_NODEV, 115 HO_DEV, 116 HO_NOSUID, 117 HO_SUID, 118 HO_REMOUNT, 119 HO_NOAUTO, 120 HO_NIGNORE 120 121 } handler_opt; 121 122 struct … … 127 128 } handlers[] = 128 129 { 129 {"rw", HORW, 0, "mount read write (default)"}, 130 {"ro", HORO, 0, "mount read only"}, 131 {"uid", HOUID, 1, "default file owner user id"}, 132 {"gid", HOGID, 1, "default file owner group id"}, 133 {"ttl", HOTTL, 1, "time to live for dentry"}, 134 {"iocharset", HOIOCHARSET, 1, "i/o charset (default utf8)"}, 135 {"convertcp", HOCONVERTCP, 1, "convert share name from given charset to utf8"}, 136 {"dmode", HODMODE, 1, "mode of all directories"}, 137 {"fmode", HOFMODE, 1, "mode of all regular files"}, 138 {"umask", HOUMASK, 1, "umask of directories and regular files"}, 139 {"dmask", HODMASK, 1, "umask of directories"}, 140 {"fmask", HOFMASK, 1, "umask of regular files"}, 141 {"noexec", HONOEXEC, 0, 0 }, /* don't document these options directly here */ 142 {"exec", HOEXEC, 0, 0 }, /* as they are well known and described in the */ 143 {"nodev", HONODEV, 0, 0 }, /* usual manpages */ 144 {"dev", HODEV, 0, 0 }, 145 {"nosuid", HONOSUID, 0, 0 }, 146 {"suid", HOSUID, 0, 0 }, 147 {"remount", HOREMOUNT, 0, 0 }, 148 {"noauto", HONOAUTO, 0, 0 }, 149 {"_netdev", HONIGNORE, 0, 0 }, 150 {NULL, 0, 0, NULL} 130 {"rw", HO_RW, 0, "mount read write (default)"}, 131 {"ro", HO_RO, 0, "mount read only"}, 132 {"uid", HO_UID, 1, "default file owner user id"}, 133 {"gid", HO_GID, 1, "default file owner group id"}, 134 {"ttl", HO_TTL, 1, "time to live for dentry"}, 135 {"iocharset", HO_IOCHARSET, 1, "i/o charset (default utf8)"}, 136 {"convertcp", HO_CONVERTCP, 1, "convert share name from given charset to utf8"}, 137 {"dmode", HO_DMODE, 1, "mode of all directories"}, 138 {"fmode", HO_FMODE, 1, "mode of all regular files"}, 139 {"umask", HO_UMASK, 1, "umask of directories and regular files"}, 140 {"dmask", HO_DMASK, 1, "umask of directories"}, 141 {"fmask", HO_FMASK, 1, "umask of regular files"}, 142 {"maxiopages", HO_MAX_IO_PAGES, 1, "max buffer size for I/O with host"}, 143 {"noexec", HO_NOEXEC, 0, NULL}, /* don't document these options directly here */ 144 {"exec", HO_EXEC, 0, NULL}, /* as they are well known and described in the */ 145 {"nodev", HO_NODEV, 0, NULL}, /* usual manpages */ 146 {"dev", HO_DEV, 0, NULL}, 147 {"nosuid", HO_NOSUID, 0, NULL}, 148 {"suid", HO_SUID, 0, NULL}, 149 {"remount", HO_REMOUNT, 0, NULL}, 150 {"noauto", HO_NOAUTO, 0, NULL}, 151 {"_netdev", HO_NIGNORE, 0, NULL}, 152 {"relatime", HO_NIGNORE, 0, NULL}, 153 {NULL, 0, 0, NULL} 151 154 }, *handler; 152 155 … … 204 207 switch(handler->opt) 205 208 { 206 case HORW: 207 opts->ronly = 0; 208 break; 209 case HORO: 210 opts->ronly = 1; 211 break; 212 case HONOEXEC: 213 opts->noexec = 1; 214 break; 215 case HOEXEC: 216 opts->noexec = 0; 217 break; 218 case HONODEV: 219 opts->nodev = 1; 220 break; 221 case HODEV: 222 opts->nodev = 0; 223 break; 224 case HONOSUID: 225 opts->nosuid = 1; 226 break; 227 case HOSUID: 228 opts->nosuid = 0; 229 break; 230 case HOREMOUNT: 231 opts->remount = 1; 232 break; 233 case HOUID: 234 /** @todo convert string to id. */ 235 opts->uid = safe_atoi(val, val_len, 10); 236 break; 237 case HOGID: 238 /** @todo convert string to id. */ 239 opts->gid = safe_atoi(val, val_len, 10); 240 break; 241 case HOTTL: 242 opts->ttl = safe_atoi(val, val_len, 10); 243 break; 244 case HODMODE: 245 opts->dmode = safe_atoi(val, val_len, 8); 246 break; 247 case HOFMODE: 248 opts->fmode = safe_atoi(val, val_len, 8); 249 break; 250 case HOUMASK: 251 opts->dmask = opts->fmask = safe_atoi(val, val_len, 8); 252 break; 253 case HODMASK: 254 opts->dmask = safe_atoi(val, val_len, 8); 255 break; 256 case HOFMASK: 257 opts->fmask = safe_atoi(val, val_len, 8); 258 break; 259 case HOIOCHARSET: 260 if (val_len + 1 > sizeof(opts->nls_name)) 261 { 262 panic("iocharset name too long\n"); 263 } 264 memcpy(opts->nls_name, val, val_len); 265 opts->nls_name[val_len] = 0; 266 break; 267 case HOCONVERTCP: 268 opts->convertcp = malloc(val_len + 1); 269 if (!opts->convertcp) 270 { 271 panic_err("could not allocate memory"); 272 } 273 memcpy(opts->convertcp, val, val_len); 274 opts->convertcp[val_len] = 0; 275 break; 276 case HONOAUTO: 277 case HONIGNORE: 278 break; 209 case HO_RW: 210 opts->ronly = 0; 211 break; 212 case HO_RO: 213 opts->ronly = 1; 214 break; 215 case HO_NOEXEC: 216 opts->noexec = 1; 217 break; 218 case HO_EXEC: 219 opts->noexec = 0; 220 break; 221 case HO_NODEV: 222 opts->nodev = 1; 223 break; 224 case HO_DEV: 225 opts->nodev = 0; 226 break; 227 case HO_NOSUID: 228 opts->nosuid = 1; 229 break; 230 case HO_SUID: 231 opts->nosuid = 0; 232 break; 233 case HO_REMOUNT: 234 opts->remount = 1; 235 break; 236 case HO_UID: 237 /** @todo convert string to id. */ 238 opts->uid = safe_atoi(val, val_len, 10); 239 break; 240 case HO_GID: 241 /** @todo convert string to id. */ 242 opts->gid = safe_atoi(val, val_len, 10); 243 break; 244 case HO_TTL: 245 opts->ttl = safe_atoi(val, val_len, 10); 246 break; 247 case HO_DMODE: 248 opts->dmode = safe_atoi(val, val_len, 8); 249 break; 250 case HO_FMODE: 251 opts->fmode = safe_atoi(val, val_len, 8); 252 break; 253 case HO_UMASK: 254 opts->dmask = opts->fmask = safe_atoi(val, val_len, 8); 255 break; 256 case HO_DMASK: 257 opts->dmask = safe_atoi(val, val_len, 8); 258 break; 259 case HO_FMASK: 260 opts->fmask = safe_atoi(val, val_len, 8); 261 break; 262 case HO_MAX_IO_PAGES: 263 opts->cMaxIoPages = safe_atoi(val, val_len, 10); 264 break; 265 case HO_IOCHARSET: 266 if (val_len + 1 > sizeof(opts->nls_name)) 267 { 268 panic("iocharset name too long\n"); 269 } 270 memcpy(opts->nls_name, val, val_len); 271 opts->nls_name[val_len] = 0; 272 break; 273 case HO_CONVERTCP: 274 opts->convertcp = malloc(val_len + 1); 275 if (!opts->convertcp) 276 { 277 panic_err("could not allocate memory"); 278 } 279 memcpy(opts->convertcp, val, val_len); 280 opts->convertcp[val_len] = 0; 281 break; 282 case HO_NOAUTO: 283 case HO_NIGNORE: 284 break; 279 285 } 280 286 break; … … 393 399 "\0", /* nls_name */ 394 400 NULL, /* convertcp */ 401 0, /* cMaxIoPages */ 395 402 }; 396 403 AssertCompile(sizeof(uid_t) == sizeof(int)); 397 404 AssertCompile(sizeof(gid_t) == sizeof(int)); 398 405 406 memset(&mntinf, 0, sizeof(mntinf)); 399 407 mntinf.nullchar = '\0'; 400 408 mntinf.signature[0] = VBSF_MOUNT_SIGNATURE_BYTE_0; … … 480 488 mntinf.dmask = opts.dmask; 481 489 mntinf.fmask = opts.fmask; 490 mntinf.cMaxIoPages = opts.cMaxIoPages; 482 491 483 492 /* -
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77089 r77138 304 304 305 305 306 #ifndef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 307 /** 308 * Fallback case of sf_reg_read() that locks the user buffers and let the host 309 * write directly to them. 310 */ 311 static ssize_t sf_reg_read_fallback(struct file *file, char /*__user*/ *buf, size_t size, loff_t *off, 312 struct sf_glob_info *sf_g, struct sf_reg_info *sf_r) 313 { 314 /* 315 * Lock pages and execute the read, taking care not to pass the host 316 * more than it can handle in one go or more than we care to allocate 317 * page arrays for. The latter limit is set at just short of 32KB due 318 * to how the physical heap works. 319 */ 320 struct page *apPagesStack[16]; 321 struct page **papPages = &apPagesStack[0]; 322 struct page **papPagesFree = NULL; 323 VBOXSFREADPGLSTREQ *pReq; 324 loff_t offFile = *off; 325 ssize_t cbRet = -ENOMEM; 326 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; 327 size_t cMaxPages = RT_MIN(RT_MAX(sf_g->cMaxIoPages, 1), cPages); 328 329 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); 330 while (!pReq && cMaxPages > 4) { 331 cMaxPages /= 2; 332 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); 333 } 334 if (pReq && cPages > RT_ELEMENTS(apPagesStack)) 335 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL); 336 if (pReq && papPages) { 337 cbRet = 0; 338 for (;;) { 339 /* 340 * Figure out how much to process now and lock the user pages. 341 */ 342 int rc; 343 size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK; 344 pReq->PgLst.offFirstPage = (uint16_t)cbChunk; 345 cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT; 346 if (cPages <= cMaxPages) 347 cbChunk = size; 348 else { 349 cPages = cMaxPages; 350 cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk; 351 } 352 353 rc = sf_lock_user_pages(buf, cPages, true /*fWrite*/, papPages); 354 if (rc == 0) { 355 size_t iPage = cPages; 356 while (iPage-- > 0) 357 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]); 358 } else { 359 cbRet = rc; 360 break; 361 } 362 363 /* 364 * Issue the request and unlock the pages. 365 */ 366 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->handle, offFile, cbChunk, cPages); 367 368 sf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/); 369 370 if (RT_SUCCESS(rc)) { 371 /* 372 * Success, advance position and buffer. 373 */ 374 uint32_t cbActual = pReq->Parms.cb32Read.u.value32; 375 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk); 376 cbRet += cbActual; 377 offFile += cbActual; 378 buf = (uint8_t *)buf + cbActual; 379 size -= cbActual; 380 381 /* 382 * Are we done already? If so commit the new file offset. 383 */ 384 if (!size || cbActual < cbChunk) { 385 *off = offFile; 386 break; 387 } 388 } else if (rc == VERR_NO_MEMORY && cMaxPages > 4) { 389 /* 390 * The host probably doesn't have enough heap to handle the 391 * request, reduce the page count and retry. 392 */ 393 cMaxPages /= 4; 394 Assert(cMaxPages > 0); 395 } else { 396 /* 397 * If we've successfully read stuff, return it rather than 398 * the error. (Not sure if this is such a great idea...) 399 */ 400 if (cbRet > 0) 401 *off = offFile; 402 else 403 cbRet = -EPROTO; 404 break; 405 } 406 } 407 } 408 if (papPagesFree) 409 kfree(papPages); 410 if (pReq) 411 VbglR0PhysHeapFree(pReq); 412 return cbRet; 413 } 414 #endif /* VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 415 416 306 417 /** 307 418 * Read from a regular file. … … 323 434 size_t left = size; 324 435 ssize_t total_bytes_read = 0; 436 loff_t pos = *off; 325 437 #endif 326 438 struct inode *inode = GET_F_DENTRY(file)->d_inode; 327 439 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 328 440 struct sf_reg_info *sf_r = file->private_data; 329 loff_t pos = *off;330 441 331 442 TRACE(); … … 384 495 * that does not cross page boundraries (see host code). 385 496 */ 386 if (size <= PAGE_SIZE / 4 * 3 /* see allocator */) {497 if (size <= PAGE_SIZE / 4 * 3 - RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) /* see allocator */) { 387 498 uint32_t const cbReq = RT_UOFFSETOF(VBOXSFREADEMBEDDEDREQ, abData[0]) + size; 388 499 VBOXSFREADEMBEDDEDREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq); … … 390 501 && (PAGE_SIZE - ((uintptr_t)pReq & PAGE_OFFSET_MASK)) >= cbReq) { 391 502 ssize_t cbRet; 392 int vrc = VbglR0SfHostReqReadEmbedded(sf_g->map.root, pReq, sf_r->handle, pos, (uint32_t)size);503 int vrc = VbglR0SfHostReqReadEmbedded(sf_g->map.root, pReq, sf_r->handle, *off, (uint32_t)size); 393 504 if (RT_SUCCESS(vrc)) { 394 505 cbRet = pReq->Parms.cb32Read.u.value32; … … 417 528 if (pReq) { 418 529 ssize_t cbRet; 419 int vrc = VbglR0SfHostReqReadContig(sf_g->map.root, pReq, sf_r->handle, pos, (uint32_t)size,530 int vrc = VbglR0SfHostReqReadContig(sf_g->map.root, pReq, sf_r->handle, *off, (uint32_t)size, 420 531 pvBounce, virt_to_phys(pvBounce)); 421 532 if (RT_SUCCESS(vrc)) { … … 437 548 # endif 438 549 439 /* 440 * Lock pages and execute the read, taking care not to pass the host 441 * more than it can handle in one go or more than we care to allocate 442 * page arrays for. The latter limit is set at just short of 32KB due 443 * to how the physical heap works. 444 */ 445 { 446 struct page *apPagesStack[8]; 447 struct page **papPages = &apPagesStack[0]; 448 struct page **papPagesFree = NULL; 449 VBOXSFREADPGLSTREQ *pReq; 450 ssize_t cbRet = -ENOMEM; 451 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; 452 size_t cMaxPages = RT_MIN(cPages, 453 RT_MIN((_32K - sizeof(VBOXSFREADPGLSTREQ) - 64) / sizeof(RTGCPHYS64), 454 VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT)); 455 456 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); 457 while (!pReq && cMaxPages > 4) { 458 cMaxPages /= 2; 459 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); 460 } 461 if (pReq && cPages > RT_ELEMENTS(apPagesStack)) 462 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL); 463 if (pReq && papPages) { 464 cbRet = 0; 465 for (;;) { 466 /* Figure out how much to process now and lock the user pages. */ 467 int rc; 468 size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK; 469 pReq->PgLst.offFirstPage = (uint16_t)cbChunk; 470 cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT; 471 if (cPages <= cMaxPages) 472 cbChunk = size; 473 else { 474 cPages = cMaxPages; 475 cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk; 476 } 477 478 rc = sf_lock_user_pages(buf, cPages, true /*fWrite*/, papPages); 479 if (rc == 0) { 480 size_t iPage = cPages; 481 while (iPage-- > 0) 482 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]); 483 } else { 484 cbRet = rc; 485 break; 486 } 487 488 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->handle, pos, cbChunk, cPages); 489 490 sf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/); 491 492 if (RT_SUCCESS(rc)) { 493 uint32_t cbActual = pReq->Parms.cb32Read.u.value32; 494 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk); 495 cbRet += cbActual; 496 pos += cbActual; 497 buf = (uint8_t *)buf + cbActual; 498 size -= cbActual; 499 if (!size || cbActual < cbChunk) { 500 *off = pos; 501 break; 502 } 503 } else { 504 if (cbRet > 0) 505 *off = pos; 506 else 507 cbRet = -EPROTO; 508 break; 509 } 510 } 511 } 512 if (papPagesFree) 513 kfree(papPages); 514 if (pReq) 515 VbglR0PhysHeapFree(pReq); 516 return cbRet; 517 } 550 return sf_reg_read_fallback(file, buf, size, off, sf_g, sf_r); 518 551 #endif /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 519 552 } … … 564 597 return 0; 565 598 566 tmp = 567 alloc_bounce_buffer(&tmp_size, &tmp_phys, size, 568 __PRETTY_FUNCTION__); 599 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, 600 __PRETTY_FUNCTION__); 569 601 if (!tmp) 570 602 return -ENOMEM; -
trunk/src/VBox/Additions/linux/sharedfolders/vbsfmount.c
r76553 r77138 58 58 else 59 59 fprintf(m, "%s,", MNTOPT_RW); 60 if (opts->cMaxIoPages) 61 fprintf(m, "maxiopages=%u,", opts->cMaxIoPages); 60 62 61 63 fclose(m); -
trunk/src/VBox/Additions/linux/sharedfolders/vbsfmount.h
r76733 r77138 44 44 45 45 struct vbsf_mount_info_new { 46 /* 46 /** 47 47 * The old version of the mount_info struct started with a 48 48 * char name[MAX_HOST_NAME] field, where name cannot be '\0'. … … 52 52 */ 53 53 char nullchar; 54 char signature[3]; /* signature */55 int length; /* length of the whole structure */56 char name[MAX_HOST_NAME]; /* share name */57 char nls_name[MAX_NLS_NAME]; /* name of an I/O charset */58 int uid; /* user ID for all entries, default 0=root */59 int gid; /* group ID for all entries, default 0=root */60 int ttl; /* time to live */61 int dmode; /* mode for directories if != 0xffffffff */62 int fmode; /* mode for regular files if != 0xffffffff */63 int dmask; /* umask applied to directories */64 int fmask; /* umask applied to regular files */54 char signature[3]; /**< signature */ 55 int length; /**< length of the whole structure */ 56 char name[MAX_HOST_NAME]; /**< share name */ 57 char nls_name[MAX_NLS_NAME]; /**< name of an I/O charset */ 58 int uid; /**< user ID for all entries, default 0=root */ 59 int gid; /**< group ID for all entries, default 0=root */ 60 int ttl; /**< time to live */ 61 int dmode; /**< mode for directories if != 0xffffffff */ 62 int fmode; /**< mode for regular files if != 0xffffffff */ 63 int dmask; /**< umask applied to directories */ 64 int fmask; /**< umask applied to regular files */ 65 65 char tag[32]; /**< Mount tag for VBoxService automounter. @since 6.0 */ 66 uint32_t cMaxIoPages; /**< Max pages to read & write at a time. @since 6.0.6 */ 66 67 }; 67 68 … … 82 83 char nls_name[MAX_NLS_NAME]; 83 84 char *convertcp; 85 uint32_t cMaxIoPages; 84 86 }; 85 87 -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.c
r77054 r77138 64 64 static struct super_operations sf_super_ops; 65 65 66 /** 67 * Copies options from the mount info structure into @a sf_g. 68 * 69 * This is used both by sf_glob_alloc() and sf_remount_fs(). 70 */ 71 static void sf_glob_copy_remount_options(struct sf_glob_info *sf_g, struct vbsf_mount_info_new *info) 72 { 73 sf_g->ttl = info->ttl; 74 sf_g->uid = info->uid; 75 sf_g->gid = info->gid; 76 77 if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, tag)) { 78 /* new fields */ 79 sf_g->dmode = info->dmode; 80 sf_g->fmode = info->fmode; 81 sf_g->dmask = info->dmask; 82 sf_g->fmask = info->fmask; 83 } else { 84 sf_g->dmode = ~0; 85 sf_g->fmode = ~0; 86 } 87 88 if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, cMaxIoPages)) { 89 AssertCompile(sizeof(sf_g->tag) >= sizeof(info->tag)); 90 memcpy(sf_g->tag, info->tag, sizeof(info->tag)); 91 sf_g->tag[sizeof(sf_g->tag) - 1] = '\0'; 92 } else { 93 sf_g->tag[0] = '\0'; 94 } 95 96 /* The max number of pages in an I/O request. This must take into 97 account that the physical heap generally grows in 64 KB chunks, 98 so we should not try push that limit. It also needs to take 99 into account that the host will allocate temporary heap buffers 100 for the I/O bytes we send/receive, so don't push the host heap 101 too hard as we'd have to retry with smaller requests when this 102 happens, which isn't too efficient. */ 103 sf_g->cMaxIoPages = RT_MIN(_16K / sizeof(RTGCPHYS64) /* => 8MB buffer */, 104 VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT); 105 if ( (unsigned)info->length >= sizeof(struct vbsf_mount_info_new) 106 && info->cMaxIoPages != 0) { 107 if (info->cMaxIoPages <= VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT) 108 sf_g->cMaxIoPages = info->cMaxIoPages; 109 else 110 printk(KERN_WARNING "vboxsf: max I/O page count (%#x) is out of range, using default (%#x) instead.\n", 111 info->cMaxIoPages, sf_g->cMaxIoPages); 112 } 113 } 114 66 115 /* allocate global info, try to map host share */ 67 116 static int sf_glob_alloc(struct vbsf_mount_info_new *info, … … 155 204 } 156 205 157 sf_g->ttl = info->ttl; 158 sf_g->uid = info->uid; 159 sf_g->gid = info->gid; 160 161 if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, tag)) { 162 /* new fields */ 163 sf_g->dmode = info->dmode; 164 sf_g->fmode = info->fmode; 165 sf_g->dmask = info->dmask; 166 sf_g->fmask = info->fmask; 167 } else { 168 sf_g->dmode = ~0; 169 sf_g->fmode = ~0; 170 } 171 172 if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) { 173 AssertCompile(sizeof(sf_g->tag) >= sizeof(info->tag)); 174 memcpy(sf_g->tag, info->tag, sizeof(info->tag)); 175 sf_g->tag[sizeof(sf_g->tag) - 1] = '\0'; 176 } else { 177 sf_g->tag[0] = '\0'; 178 } 206 /* The rest is shared with remount. */ 207 sf_glob_copy_remount_options(sf_g, info); 179 208 180 209 *sf_gp = sf_g; … … 467 496 if (data && data[0] != 0) { 468 497 struct vbsf_mount_info_new *info = (struct vbsf_mount_info_new *)data; 469 if (info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 498 if ( info->nullchar == '\0' 499 && info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 470 500 && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 471 501 && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) { 472 sf_g->uid = info->uid; 473 sf_g->gid = info->gid; 474 sf_g->ttl = info->ttl; 475 if ((unsigned)info->length >= RT_UOFFSETOF(struct vbsf_mount_info_new, tag)) { 476 sf_g->dmode = info->dmode; 477 sf_g->fmode = info->fmode; 478 sf_g->dmask = info->dmask; 479 sf_g->fmask = info->fmask; 480 } else { 481 sf_g->dmode = ~0; 482 sf_g->fmode = ~0; 483 } 484 if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) { 485 AssertCompile(sizeof(sf_g->tag) >= sizeof(info->tag)); 486 memcpy(sf_g->tag, info->tag, sizeof(info->tag)); 487 sf_g->tag[sizeof(sf_g->tag) - 1] = '\0'; 488 } else { 489 sf_g->tag[0] = '\0'; 490 } 502 sf_glob_copy_remount_options(sf_g, info); 491 503 } 492 504 } … … 521 533 struct sf_glob_info *sf_g = GET_GLOB_INFO(sb); 522 534 if (sf_g) { 523 seq_printf(m, ",uid=%u,gid=%u,ttl=%u,dmode=0%o,fmode=0%o,dmask=0%o,fmask=0%o", 524 sf_g->uid, sf_g->gid, sf_g->ttl, sf_g->dmode, sf_g->fmode, sf_g->dmask, sf_g->fmask); 535 seq_printf(m, ",uid=%u,gid=%u,ttl=%u,dmode=0%o,fmode=0%o,dmask=0%o,fmask=0%o,maxiopages=%u", 536 sf_g->uid, sf_g->gid, sf_g->ttl, sf_g->dmode, sf_g->fmode, sf_g->dmask, 537 sf_g->fmask, sf_g->cMaxIoPages); 525 538 if (sf_g->tag[0] != '\0') { 526 539 seq_puts(m, ",tag="); -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h
r77054 r77138 63 63 int dmask; 64 64 int fmask; 65 /** Maximum number of pages to allow in an I/O buffer with the host. 66 * This applies to read and write operations. */ 67 uint32_t cMaxIoPages; 65 68 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 66 69 struct backing_dev_info bdi;
Note:
See TracChangeset
for help on using the changeset viewer.