Changeset 77089 in vbox for trunk/src/VBox/Additions/linux
- Timestamp:
- Jan 31, 2019 8:51:23 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77064 r77089 256 256 #endif /* 2.6.23 <= LINUX_VERSION_CODE < 2.6.31 */ 257 257 258 259 /** Companion to sf_lock_user_pages(). */ 260 DECLINLINE(void) sf_unlock_user_pages(struct page **papPages, size_t cPages, bool fSetDirty) 261 { 262 while (cPages-- > 0) 263 { 264 struct page *pPage = papPages[cPages]; 265 if (fSetDirty && !PageReserved(pPage)) 266 SetPageDirty(pPage); 267 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) 268 put_page(pPage); 269 #else 270 page_cache_release(pPage); 271 #endif 272 } 273 } 274 275 276 /** Wrapper around get_user_pages. */ 277 DECLINLINE(int) sf_lock_user_pages(void /*__user*/ *pvFrom, size_t cPages, bool fWrite, struct page **papPages) 278 { 279 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) 280 ssize_t cPagesLocked = get_user_pages_unlocked((uintptr_t)pvFrom, cPages, papPages, 281 fWrite ? FOLL_WRITE | FOLL_FORCE : FOLL_FORCE); 282 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) 283 ssize_t cPagesLocked = get_user_pages_unlocked((uintptr_t)pvFrom, cPages, fWrite, 1 /*force*/, papPages); 284 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) 285 ssize_t cPagesLocked = get_user_pages_unlocked(current, current->mm, (uintptr_t)pvFrom, cPages, 286 fWrite, 1 /*force*/, papPages); 287 # else 288 struct task_struct *pTask = current; 289 size_t cPagesLocked; 290 down_read(&pTask->mm->mmap_sem); 291 cPagesLocked = get_user_pages(current, current->mm, (uintptr_t)pvFrom, cPages, fWrite, 1 /*force*/, papPages, NULL); 292 up_read(&pTask->mm->mmap_sem); 293 # endif 294 if (cPagesLocked == cPages) 295 return 0; 296 if (cPagesLocked < 0) 297 return cPagesLocked; 298 299 sf_unlock_user_pages(papPages, cPagesLocked, false /*fSetDirty*/); 300 301 /* We could use pvFrom + cPagesLocked to get the correct status here... */ 302 return -EFAULT; 303 } 304 305 258 306 /** 259 307 * Read from a regular file. … … 268 316 loff_t *off) 269 317 { 318 #ifdef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 270 319 int err; 271 320 void *tmp; … … 274 323 size_t left = size; 275 324 ssize_t total_bytes_read = 0; 325 #endif 276 326 struct inode *inode = GET_F_DENTRY(file)->d_inode; 277 327 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); … … 290 340 return 0; 291 341 292 #ifndef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 342 #ifdef VBOXSF_USE_DEPRECATED_VBGL_INTERFACE 343 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__); 344 if (!tmp) 345 return -ENOMEM; 346 347 while (left) { 348 uint32_t to_read, nread; 349 350 to_read = tmp_size; 351 if (to_read > left) 352 to_read = (uint32_t) left; 353 354 nread = to_read; 355 356 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos); 357 if (err) 358 goto fail; 359 360 if (copy_to_user(buf, tmp, nread)) { 361 err = -EFAULT; 362 goto fail; 363 } 364 365 pos += nread; 366 left -= nread; 367 buf += nread; 368 total_bytes_read += nread; 369 if (nread != to_read) 370 break; 371 } 372 373 *off += total_bytes_read; 374 free_bounce_buffer(tmp); 375 return total_bytes_read; 376 377 fail: 378 free_bounce_buffer(tmp); 379 return err; 380 381 #else /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 293 382 /* 294 383 * For small requests, try use an embedded buffer provided we get a heap block … … 304 393 if (RT_SUCCESS(vrc)) { 305 394 cbRet = pReq->Parms.cb32Read.u.value32; 395 AssertStmt(cbRet <= (ssize_t)size, cbRet = size); 306 396 if (copy_to_user(buf, pReq->abData, cbRet) == 0) 307 397 *off += cbRet; … … 317 407 } 318 408 319 // /* 320 // * For other requests, use a bounce buffer. 321 // */ 322 // VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADEMBEDDEDREQ *)VbglR0PhysHeapAlloc(cbReq); 323 #endif 324 325 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__);326 if (!tmp)327 return -ENOMEM;328 329 while (left) {330 uint32_t to_read, nread;331 332 to_read = tmp_size;333 if (to_read > left)334 to_read = (uint32_t) left;335 336 nread = to_read;337 338 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos);339 if (err)340 goto fail;341 342 if (copy_to_user(buf, tmp, nread)) {343 err = -EFAULT;344 goto fail;409 # if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */ 410 /* 411 * For medium sized requests try use a bounce buffer. 412 */ 413 if (size <= _64K /** @todo make this configurable? */) { 414 void *pvBounce = kmalloc(size, GFP_KERNEL); 415 if (pvBounce) { 416 VBOXSFREADPGLSTREQ *pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(sizeof(*pReq)); 417 if (pReq) { 418 ssize_t cbRet; 419 int vrc = VbglR0SfHostReqReadContig(sf_g->map.root, pReq, sf_r->handle, pos, (uint32_t)size, 420 pvBounce, virt_to_phys(pvBounce)); 421 if (RT_SUCCESS(vrc)) { 422 cbRet = pReq->Parms.cb32Read.u.value32; 423 AssertStmt(cbRet <= (ssize_t)size, cbRet = size); 424 if (copy_to_user(buf, pvBounce, cbRet) == 0) 425 *off += cbRet; 426 else 427 cbRet = -EPROTO; 428 } else 429 cbRet = -EPROTO; 430 VbglR0PhysHeapFree(pReq); 431 kfree(pvBounce); 432 return cbRet; 433 } 434 kfree(pvBounce); 345 435 } 346 347 pos += nread; 348 left -= nread; 349 buf += nread; 350 total_bytes_read += nread; 351 if (nread != to_read) 352 break; 353 } 354 355 *off += total_bytes_read; 356 free_bounce_buffer(tmp); 357 return total_bytes_read; 358 359 fail: 360 free_bounce_buffer(tmp); 361 return err; 436 } 437 # endif 438 439 /* 440 * Lock pages and execute the read, taking care not to pass the host 441 * more than it can handle in one go or more than we care to allocate 442 * page arrays for. The latter limit is set at just short of 32KB due 443 * to how the physical heap works. 444 */ 445 { 446 struct page *apPagesStack[8]; 447 struct page **papPages = &apPagesStack[0]; 448 struct page **papPagesFree = NULL; 449 VBOXSFREADPGLSTREQ *pReq; 450 ssize_t cbRet = -ENOMEM; 451 size_t cPages = (((uintptr_t)buf & PAGE_OFFSET_MASK) + size + PAGE_OFFSET_MASK) >> PAGE_SHIFT; 452 size_t cMaxPages = RT_MIN(cPages, 453 RT_MIN((_32K - sizeof(VBOXSFREADPGLSTREQ) - 64) / sizeof(RTGCPHYS64), 454 VMMDEV_MAX_HGCM_DATA_SIZE >> PAGE_SHIFT)); 455 456 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); 457 while (!pReq && cMaxPages > 4) { 458 cMaxPages /= 2; 459 pReq = (VBOXSFREADPGLSTREQ *)VbglR0PhysHeapAlloc(RT_UOFFSETOF_DYN(VBOXSFREADPGLSTREQ, PgLst.aPages[cMaxPages])); 460 } 461 if (pReq && cPages > RT_ELEMENTS(apPagesStack)) 462 papPagesFree = papPages = kmalloc(cMaxPages * sizeof(sizeof(papPages[0])), GFP_KERNEL); 463 if (pReq && papPages) { 464 cbRet = 0; 465 for (;;) { 466 /* Figure out how much to process now and lock the user pages. */ 467 int rc; 468 size_t cbChunk = (uintptr_t)buf & PAGE_OFFSET_MASK; 469 pReq->PgLst.offFirstPage = (uint16_t)cbChunk; 470 cPages = RT_ALIGN_Z(cbChunk + size, PAGE_SIZE) >> PAGE_SHIFT; 471 if (cPages <= cMaxPages) 472 cbChunk = size; 473 else { 474 cPages = cMaxPages; 475 cbChunk = (cMaxPages << PAGE_SHIFT) - cbChunk; 476 } 477 478 rc = sf_lock_user_pages(buf, cPages, true /*fWrite*/, papPages); 479 if (rc == 0) { 480 size_t iPage = cPages; 481 while (iPage-- > 0) 482 pReq->PgLst.aPages[iPage] = page_to_phys(papPages[iPage]); 483 } else { 484 cbRet = rc; 485 break; 486 } 487 488 rc = VbglR0SfHostReqReadPgLst(sf_g->map.root, pReq, sf_r->handle, pos, cbChunk, cPages); 489 490 sf_unlock_user_pages(papPages, cPages, true /*fSetDirty*/); 491 492 if (RT_SUCCESS(rc)) { 493 uint32_t cbActual = pReq->Parms.cb32Read.u.value32; 494 AssertStmt(cbActual <= cbChunk, cbActual = cbChunk); 495 cbRet += cbActual; 496 pos += cbActual; 497 buf = (uint8_t *)buf + cbActual; 498 size -= cbActual; 499 if (!size || cbActual < cbChunk) { 500 *off = pos; 501 break; 502 } 503 } else { 504 if (cbRet > 0) 505 *off = pos; 506 else 507 cbRet = -EPROTO; 508 break; 509 } 510 } 511 } 512 if (papPagesFree) 513 kfree(papPages); 514 if (pReq) 515 VbglR0PhysHeapFree(pReq); 516 return cbRet; 517 } 518 #endif /* !VBOXSF_USE_DEPRECATED_VBGL_INTERFACE */ 362 519 } 363 520
Note:
See TracChangeset
for help on using the changeset viewer.