- Timestamp:
- Feb 22, 2019 9:47:06 PM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r77439 r77443 474 474 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 475 475 struct sf_reg_info *sf_r = file->private_data; 476 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 2)477 struct address_space *mapping = file->f_mapping;478 #else479 476 struct address_space *mapping = inode->i_mapping; 480 #endif481 477 482 478 TRACE(); … … 531 527 } 532 528 533 # 529 #if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */ 534 530 /* 535 531 * For medium sized requests try use a bounce buffer. … … 559 555 } 560 556 } 557 #endif 558 559 return sf_reg_read_fallback(file, buf, size, off, sf_g, sf_r); 560 } 561 562 563 /** 564 * Wrapper around invalidate_mapping_pages() for page cache invalidation so that 565 * the changes written via sf_reg_write are made visible to mmap users. 566 */ 567 DECLINLINE(void) sf_reg_write_invalidate_mapping_range(struct address_space *mapping, loff_t offStart, loff_t offEnd) 568 { 569 /* 570 * Only bother with this if the mapping has any pages in it. 571 * 572 * Note! According to the docs, the last parameter, end, is inclusive (we 573 * would have named it 'last' to indicate this). 574 * 575 * Note! The pre-2.6.12 function might not do enough to sure consistency 576 * when any of the pages in the range is already mapped. 577 */ 578 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) 579 if (mapping) 580 invalidate_inode_pages2_range(mapping, offStart >> PAGE_SHIFT, (offEnd - 1) >> PAGE_SHIFT); 581 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 60) 582 if (mapping && mapping->nrpages > 0) 583 invalidate_mapping_pages(mapping, offStart >> PAGE_SHIFT, (offEnd - 1) >> PAGE_SHIFT); 584 # else 585 /** @todo ... */ 586 RT_NOREF(mapping, offStart, offEnd); 561 587 # endif 562 563 return sf_reg_read_fallback(file, buf, size, off, sf_g, sf_r);564 588 } 565 589 … … 640 664 if (offFile > i_size_read(inode)) 641 665 i_size_write(inode, offFile); 666 sf_reg_write_invalidate_mapping_range(inode->i_mapping, offFile - cbActual, offFile); 642 667 643 668 /* … … 689 714 loff_t * off) 690 715 { 691 loff_t pos; 692 struct inode *inode = GET_F_DENTRY(file)->d_inode; 716 struct inode *inode = GET_F_DENTRY(file)->d_inode; 693 717 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 694 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 695 struct sf_reg_info *sf_r = file->private_data; 718 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 719 struct sf_reg_info *sf_r = file->private_data; 720 struct address_space *mapping = inode->i_mapping; 721 loff_t pos; 696 722 697 723 TRACE(); … … 717 743 *off = pos; 718 744 return 0; 745 } 746 747 /* 748 * If there are active writable mappings, coordinate with any 749 * pending writes via those. 750 */ 751 if ( mapping 752 && mapping->nrpages > 0 753 && mapping_writably_mapped(mapping)) { 754 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) 755 int err = filemap_fdatawait_range(mapping, pos, pos + size - 1); 756 if (err) 757 return err; 758 #else 759 /** @todo ... */ 760 #endif 719 761 } 720 762 … … 738 780 if (pos > i_size_read(inode)) 739 781 i_size_write(inode, pos); 782 sf_reg_write_invalidate_mapping_range(mapping, pos - cbRet, pos); 740 783 } else 741 784 cbRet = -EPROTO; … … 751 794 } 752 795 753 # 796 #if 0 /* Turns out this is slightly slower than locking the pages even for 4KB reads (4.19/amd64). */ 754 797 /* 755 798 * For medium sized requests try use a bounce buffer. … … 771 814 if (pos > i_size_read(inode)) 772 815 i_size_write(inode, pos); 816 sf_reg_write_invalidate_mapping_range(mapping, pos - cbRet, pos); 773 817 } else 774 818 cbRet = -EPROTO; … … 785 829 } 786 830 } 787 # 831 #endif 788 832 789 833 return sf_reg_write_fallback(file, buf, size, off, pos, inode, sf_i, sf_g, sf_r); … … 1216 1260 { 1217 1261 TRACE(); 1262 #if 0 1263 printk("sf_write_begin: pos=%#llx len=%#x flags=%#x\n", pos, len, flags); 1264 RTLogBackdoorPrintf("sf_write_begin: pos=%#llx len=%#x flags=%#x\n", pos, len, flags); 1265 #endif 1218 1266 /** @todo rig up a FsPerf testcase for this code! */ 1219 1267
Note:
See TracChangeset
for help on using the changeset viewer.