Changeset 70786 in vbox for trunk/src/VBox/Additions/linux/sharedfolders
- Timestamp:
- Jan 29, 2018 10:57:10 AM (7 years ago)
- Location:
- trunk/src/VBox/Additions/linux/sharedfolders
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/src/VBox/Additions/linux/sharedfolders/dirops.c
r69500 r70786 28 28 static int sf_dir_open(struct inode *inode, struct file *file) 29 29 { 30 int rc; 31 int err; 32 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 33 struct sf_dir_info *sf_d; 34 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 35 SHFLCREATEPARMS params; 36 37 TRACE(); 38 BUG_ON(!sf_g); 39 BUG_ON(!sf_i); 40 41 if (file->private_data) 42 { 43 LogFunc(("sf_dir_open() called on already opened directory '%s'\n", 44 sf_i->path->String.utf8)); 45 return 0; 46 } 47 48 sf_d = sf_dir_info_alloc(); 49 if (!sf_d) 50 { 51 LogRelFunc(("could not allocate directory info for '%s'\n", 52 sf_i->path->String.utf8)); 53 return -ENOMEM; 54 } 55 56 RT_ZERO(params); 57 params.Handle = SHFL_HANDLE_NIL; 58 params.CreateFlags = 0 59 | SHFL_CF_DIRECTORY 60 | SHFL_CF_ACT_OPEN_IF_EXISTS 61 | SHFL_CF_ACT_FAIL_IF_NEW 62 | SHFL_CF_ACCESS_READ 63 ; 64 65 LogFunc(("sf_dir_open(): calling VbglR0SfCreate, folder %s, flags %#x\n", 66 sf_i->path->String.utf8, params.CreateFlags)); 67 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 68 if (RT_SUCCESS(rc)) 69 { 70 if (params.Result == SHFL_FILE_EXISTS) 71 { 72 err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle); 73 if (!err) 74 file->private_data = sf_d; 75 } 76 else 77 err = -ENOENT; 78 79 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 80 if (RT_FAILURE(rc)) 81 LogFunc(("sf_dir_open(): VbglR0SfClose(%s) after err=%d failed rc=%Rrc\n", 82 sf_i->path->String.utf8, err, rc)); 83 } 84 else 85 err = -EPERM; 86 87 if (err) 88 sf_dir_info_free(sf_d); 89 90 return err; 91 } 92 30 int rc; 31 int err; 32 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 33 struct sf_dir_info *sf_d; 34 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 35 SHFLCREATEPARMS params; 36 37 TRACE(); 38 BUG_ON(!sf_g); 39 BUG_ON(!sf_i); 40 41 if (file->private_data) { 42 LogFunc(("sf_dir_open() called on already opened directory '%s'\n", sf_i->path->String.utf8)); 43 return 0; 44 } 45 46 sf_d = sf_dir_info_alloc(); 47 if (!sf_d) { 48 LogRelFunc(("could not allocate directory info for '%s'\n", 49 sf_i->path->String.utf8)); 50 return -ENOMEM; 51 } 52 53 RT_ZERO(params); 54 params.Handle = SHFL_HANDLE_NIL; 55 params.CreateFlags = 0 56 | SHFL_CF_DIRECTORY 57 | SHFL_CF_ACT_OPEN_IF_EXISTS 58 | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ; 59 60 LogFunc(("sf_dir_open(): calling VbglR0SfCreate, folder %s, flags %#x\n", sf_i->path->String.utf8, params.CreateFlags)); 61 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 62 if (RT_SUCCESS(rc)) { 63 if (params.Result == SHFL_FILE_EXISTS) { 64 err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle); 65 if (!err) 66 file->private_data = sf_d; 67 } else 68 err = -ENOENT; 69 70 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 71 if (RT_FAILURE(rc)) 72 LogFunc(("sf_dir_open(): VbglR0SfClose(%s) after err=%d failed rc=%Rrc\n", sf_i->path->String.utf8, err, rc)); 73 } else 74 err = -EPERM; 75 76 if (err) 77 sf_dir_info_free(sf_d); 78 79 return err; 80 } 93 81 94 82 /** … … 103 91 static int sf_dir_release(struct inode *inode, struct file *file) 104 92 { 105 106 107 108 109 110 93 TRACE(); 94 95 if (file->private_data) 96 sf_dir_info_free(file->private_data); 97 98 return 0; 111 99 } 112 100 … … 118 106 static int sf_get_d_type(RTFMODE fMode) 119 107 { 120 int d_type; 121 switch (fMode & RTFS_TYPE_MASK) 122 { 123 case RTFS_TYPE_FIFO: d_type = DT_FIFO; break; 124 case RTFS_TYPE_DEV_CHAR: d_type = DT_CHR; break; 125 case RTFS_TYPE_DIRECTORY: d_type = DT_DIR; break; 126 case RTFS_TYPE_DEV_BLOCK: d_type = DT_BLK; break; 127 case RTFS_TYPE_FILE: d_type = DT_REG; break; 128 case RTFS_TYPE_SYMLINK: d_type = DT_LNK; break; 129 case RTFS_TYPE_SOCKET: d_type = DT_SOCK; break; 130 case RTFS_TYPE_WHITEOUT: d_type = DT_WHT; break; 131 default: d_type = DT_UNKNOWN; break; 132 } 133 return d_type; 108 int d_type; 109 switch (fMode & RTFS_TYPE_MASK) { 110 case RTFS_TYPE_FIFO: 111 d_type = DT_FIFO; 112 break; 113 case RTFS_TYPE_DEV_CHAR: 114 d_type = DT_CHR; 115 break; 116 case RTFS_TYPE_DIRECTORY: 117 d_type = DT_DIR; 118 break; 119 case RTFS_TYPE_DEV_BLOCK: 120 d_type = DT_BLK; 121 break; 122 case RTFS_TYPE_FILE: 123 d_type = DT_REG; 124 break; 125 case RTFS_TYPE_SYMLINK: 126 d_type = DT_LNK; 127 break; 128 case RTFS_TYPE_SOCKET: 129 d_type = DT_SOCK; 130 break; 131 case RTFS_TYPE_WHITEOUT: 132 d_type = DT_WHT; 133 break; 134 default: 135 d_type = DT_UNKNOWN; 136 break; 137 } 138 return d_type; 134 139 } 135 140 … … 141 146 static int sf_getdent(struct file *dir, char d_name[NAME_MAX], int *d_type) 142 147 { 143 loff_t cur; 144 struct sf_glob_info *sf_g; 145 struct sf_dir_info *sf_d; 146 struct sf_inode_info *sf_i; 147 struct inode *inode; 148 struct list_head *pos, *list; 149 150 TRACE(); 151 152 inode = GET_F_DENTRY(dir)->d_inode; 153 sf_i = GET_INODE_INFO(inode); 154 sf_g = GET_GLOB_INFO(inode->i_sb); 155 sf_d = dir->private_data; 156 157 BUG_ON(!sf_g); 158 BUG_ON(!sf_d); 159 BUG_ON(!sf_i); 160 161 if (sf_i->force_reread) 162 { 163 int rc; 164 int err; 165 SHFLCREATEPARMS params; 166 167 RT_ZERO(params); 168 params.Handle = SHFL_HANDLE_NIL; 169 params.CreateFlags = 0 170 | SHFL_CF_DIRECTORY 171 | SHFL_CF_ACT_OPEN_IF_EXISTS 172 | SHFL_CF_ACT_FAIL_IF_NEW 173 | SHFL_CF_ACCESS_READ 174 ; 175 176 LogFunc(("sf_getdent: calling VbglR0SfCreate, folder %s, flags %#x\n", 177 sf_i->path->String.utf8, params.CreateFlags)); 178 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 179 if (RT_FAILURE(rc)) 180 { 181 LogFunc(("VbglR0SfCreate(%s) failed rc=%Rrc\n", 182 sf_i->path->String.utf8, rc)); 183 return -EPERM; 184 } 185 186 if (params.Result != SHFL_FILE_EXISTS) 187 { 188 LogFunc(("directory %s does not exist\n", sf_i->path->String.utf8)); 189 sf_dir_info_free(sf_d); 190 return -ENOENT; 191 } 192 193 sf_dir_info_empty(sf_d); 194 err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle); 195 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 196 if (RT_FAILURE(rc)) 197 LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n", sf_i->path->String.utf8, rc)); 198 if (err) 199 return err; 200 201 sf_i->force_reread = 0; 202 } 203 204 cur = 0; 205 list = &sf_d->info_list; 206 list_for_each(pos, list) 207 { 208 struct sf_dir_buf *b; 209 SHFLDIRINFO *info; 210 loff_t i; 211 212 b = list_entry(pos, struct sf_dir_buf, head); 213 if (dir->f_pos >= cur + b->cEntries) 214 { 215 cur += b->cEntries; 216 continue; 217 } 218 219 for (i = 0, info = b->buf; i < dir->f_pos - cur; ++i) 220 { 221 size_t size; 222 223 size = offsetof(SHFLDIRINFO, name.String) + info->name.u16Size; 224 info = (SHFLDIRINFO *) ((uintptr_t) info + size); 225 } 226 227 *d_type = sf_get_d_type(info->Info.Attr.fMode); 228 229 return sf_nlscpy(sf_g, d_name, NAME_MAX, 230 info->name.String.utf8, info->name.u16Length); 231 } 232 233 return 1; 148 loff_t cur; 149 struct sf_glob_info *sf_g; 150 struct sf_dir_info *sf_d; 151 struct sf_inode_info *sf_i; 152 struct inode *inode; 153 struct list_head *pos, *list; 154 155 TRACE(); 156 157 inode = GET_F_DENTRY(dir)->d_inode; 158 sf_i = GET_INODE_INFO(inode); 159 sf_g = GET_GLOB_INFO(inode->i_sb); 160 sf_d = dir->private_data; 161 162 BUG_ON(!sf_g); 163 BUG_ON(!sf_d); 164 BUG_ON(!sf_i); 165 166 if (sf_i->force_reread) { 167 int rc; 168 int err; 169 SHFLCREATEPARMS params; 170 171 RT_ZERO(params); 172 params.Handle = SHFL_HANDLE_NIL; 173 params.CreateFlags = 0 174 | SHFL_CF_DIRECTORY 175 | SHFL_CF_ACT_OPEN_IF_EXISTS 176 | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_READ; 177 178 LogFunc(("sf_getdent: calling VbglR0SfCreate, folder %s, flags %#x\n", sf_i->path->String.utf8, params.CreateFlags)); 179 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, 180 ¶ms); 181 if (RT_FAILURE(rc)) { 182 LogFunc(("VbglR0SfCreate(%s) failed rc=%Rrc\n", 183 sf_i->path->String.utf8, rc)); 184 return -EPERM; 185 } 186 187 if (params.Result != SHFL_FILE_EXISTS) { 188 LogFunc(("directory %s does not exist\n", 189 sf_i->path->String.utf8)); 190 sf_dir_info_free(sf_d); 191 return -ENOENT; 192 } 193 194 sf_dir_info_empty(sf_d); 195 err = sf_dir_read_all(sf_g, sf_i, sf_d, params.Handle); 196 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 197 if (RT_FAILURE(rc)) 198 LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n", 199 sf_i->path->String.utf8, rc)); 200 if (err) 201 return err; 202 203 sf_i->force_reread = 0; 204 } 205 206 cur = 0; 207 list = &sf_d->info_list; 208 list_for_each(pos, list) { 209 struct sf_dir_buf *b; 210 SHFLDIRINFO *info; 211 loff_t i; 212 213 b = list_entry(pos, struct sf_dir_buf, head); 214 if (dir->f_pos >= cur + b->cEntries) { 215 cur += b->cEntries; 216 continue; 217 } 218 219 for (i = 0, info = b->buf; i < dir->f_pos - cur; ++i) { 220 size_t size; 221 222 size = 223 offsetof(SHFLDIRINFO, 224 name.String) + info->name.u16Size; 225 info = (SHFLDIRINFO *) ((uintptr_t) info + size); 226 } 227 228 *d_type = sf_get_d_type(info->Info.Attr.fMode); 229 230 return sf_nlscpy(sf_g, d_name, NAME_MAX, 231 info->name.String.utf8, info->name.u16Length); 232 } 233 234 return 1; 234 235 } 235 236 … … 263 264 #endif 264 265 { 265 TRACE(); 266 for (;;) 267 { 268 int err; 269 ino_t fake_ino; 270 loff_t sanity; 271 char d_name[NAME_MAX]; 272 int d_type = DT_UNKNOWN; 273 274 err = sf_getdent(dir, d_name, &d_type); 275 switch (err) 276 { 277 case 1: 278 return 0; 279 280 case 0: 281 break; 282 283 case -1: 284 default: 285 /* skip erroneous entry and proceed */ 286 LogFunc(("sf_getdent error %d\n", err)); 287 dir->f_pos += 1; 266 TRACE(); 267 for (;;) { 268 int err; 269 ino_t fake_ino; 270 loff_t sanity; 271 char d_name[NAME_MAX]; 272 int d_type = DT_UNKNOWN; 273 274 err = sf_getdent(dir, d_name, &d_type); 275 switch (err) { 276 case 1: 277 return 0; 278 279 case 0: 280 break; 281 282 case -1: 283 default: 284 /* skip erroneous entry and proceed */ 285 LogFunc(("sf_getdent error %d\n", err)); 286 dir->f_pos += 1; 288 287 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) 289 290 #endif 291 292 293 294 288 ctx->pos += 1; 289 #endif 290 continue; 291 } 292 293 /* d_name now contains a valid entry name */ 295 294 296 295 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) 297 sanity = ctx->pos + 0xbeef; 298 #else 299 sanity = dir->f_pos + 0xbeef; 300 #endif 301 fake_ino = sanity; 302 if (sanity - fake_ino) 303 { 304 LogRelFunc(("can not compute ino\n")); 305 return -EINVAL; 306 } 307 296 sanity = ctx->pos + 0xbeef; 297 #else 298 sanity = dir->f_pos + 0xbeef; 299 #endif 300 fake_ino = sanity; 301 if (sanity - fake_ino) { 302 LogRelFunc(("can not compute ino\n")); 303 return -EINVAL; 304 } 308 305 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) 309 if (!dir_emit(ctx, d_name, strlen(d_name), fake_ino, d_type)) 310 { 311 LogFunc(("dir_emit failed\n"));312 return 0; 313 } 314 #else 315 err = filldir(opaque, d_name, strlen(d_name), dir->f_pos, fake_ino, d_type); 316 if (err) 317 318 319 320 321 322 323 #endif 324 325 306 if (!dir_emit(ctx, d_name, strlen(d_name), fake_ino, d_type)) { 307 LogFunc(("dir_emit failed\n")); 308 return 0; 309 } 310 #else 311 err = 312 filldir(opaque, d_name, strlen(d_name), dir->f_pos, 313 fake_ino, d_type); 314 if (err) { 315 LogFunc(("filldir returned error %d\n", err)); 316 /* Rely on the fact that filldir returns error 317 only when it runs out of space in opaque */ 318 return 0; 319 } 320 #endif 321 322 dir->f_pos += 1; 326 323 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) 327 ctx->pos += 1; 328 #endif 329 } 330 331 BUG(); 332 } 333 334 struct file_operations sf_dir_fops = 335 { 336 .open = sf_dir_open, 324 ctx->pos += 1; 325 #endif 326 } 327 328 BUG(); 329 } 330 331 struct file_operations sf_dir_fops = { 332 .open = sf_dir_open, 337 333 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) 338 339 #else 340 341 #endif 342 343 .read= generic_read_dir334 .iterate = sf_dir_iterate, 335 #else 336 .readdir = sf_dir_read, 337 #endif 338 .release = sf_dir_release, 339 .read = generic_read_dir 344 340 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) 345 , .llseek= generic_file_llseek341 ,.llseek = generic_file_llseek 346 342 #endif 347 343 }; 348 349 344 350 345 /* iops */ … … 361 356 static struct dentry *sf_lookup(struct inode *parent, struct dentry *dentry 362 357 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) 363 358 , unsigned int flags 364 359 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 365 , struct nameidata *nd 366 #endif 367 ) 368 { 369 int err; 370 struct sf_inode_info *sf_i, *sf_new_i; 371 struct sf_glob_info *sf_g; 372 SHFLSTRING *path; 373 struct inode *inode; 374 ino_t ino; 375 SHFLFSOBJINFO fsinfo; 376 377 TRACE(); 378 sf_g = GET_GLOB_INFO(parent->i_sb); 379 sf_i = GET_INODE_INFO(parent); 380 381 BUG_ON(!sf_g); 382 BUG_ON(!sf_i); 383 384 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 385 if (err) 386 goto fail0; 387 388 err = sf_stat(__func__, sf_g, path, &fsinfo, 1); 389 if (err) 390 { 391 if (err == -ENOENT) 392 { 393 /* -ENOENT: add NULL inode to dentry so it later can be 394 created via call to create/mkdir/open */ 395 kfree(path); 396 inode = NULL; 397 } 398 else 399 goto fail1; 400 } 401 else 402 { 403 sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL); 404 if (!sf_new_i) 405 { 406 LogRelFunc(("could not allocate memory for new inode info\n")); 407 err = -ENOMEM; 408 goto fail1; 409 } 410 sf_new_i->handle = SHFL_HANDLE_NIL; 411 sf_new_i->force_reread = 0; 412 413 ino = iunique(parent->i_sb, 1); 360 , struct nameidata *nd 361 #endif 362 ) 363 { 364 int err; 365 struct sf_inode_info *sf_i, *sf_new_i; 366 struct sf_glob_info *sf_g; 367 SHFLSTRING *path; 368 struct inode *inode; 369 ino_t ino; 370 SHFLFSOBJINFO fsinfo; 371 372 TRACE(); 373 sf_g = GET_GLOB_INFO(parent->i_sb); 374 sf_i = GET_INODE_INFO(parent); 375 376 BUG_ON(!sf_g); 377 BUG_ON(!sf_i); 378 379 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 380 if (err) 381 goto fail0; 382 383 err = sf_stat(__func__, sf_g, path, &fsinfo, 1); 384 if (err) { 385 if (err == -ENOENT) { 386 /* -ENOENT: add NULL inode to dentry so it later can be 387 created via call to create/mkdir/open */ 388 kfree(path); 389 inode = NULL; 390 } else 391 goto fail1; 392 } else { 393 sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL); 394 if (!sf_new_i) { 395 LogRelFunc(("could not allocate memory for new inode info\n")); 396 err = -ENOMEM; 397 goto fail1; 398 } 399 sf_new_i->handle = SHFL_HANDLE_NIL; 400 sf_new_i->force_reread = 0; 401 402 ino = iunique(parent->i_sb, 1); 414 403 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 415 inode = iget_locked(parent->i_sb, ino); 416 #else 417 inode = iget(parent->i_sb, ino); 418 #endif 419 if (!inode) 420 { 421 LogFunc(("iget failed\n")); 422 err = -ENOMEM; /* XXX: ??? */ 423 goto fail2; 424 } 425 426 SET_INODE_INFO(inode, sf_new_i); 427 sf_init_inode(sf_g, inode, &fsinfo); 428 sf_new_i->path = path; 404 inode = iget_locked(parent->i_sb, ino); 405 #else 406 inode = iget(parent->i_sb, ino); 407 #endif 408 if (!inode) { 409 LogFunc(("iget failed\n")); 410 err = -ENOMEM; /* XXX: ??? */ 411 goto fail2; 412 } 413 414 SET_INODE_INFO(inode, sf_new_i); 415 sf_init_inode(sf_g, inode, &fsinfo); 416 sf_new_i->path = path; 429 417 430 418 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 431 432 #endif 433 434 435 436 419 unlock_new_inode(inode); 420 #endif 421 } 422 423 sf_i->force_restat = 0; 424 dentry->d_time = jiffies; 437 425 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) 438 439 #else 440 441 #endif 442 443 444 445 fail2:446 447 448 fail1:449 450 451 fail0:452 426 d_set_d_op(dentry, &sf_dentry_ops); 427 #else 428 dentry->d_op = &sf_dentry_ops; 429 #endif 430 d_add(dentry, inode); 431 return NULL; 432 433 fail2: 434 kfree(sf_new_i); 435 436 fail1: 437 kfree(path); 438 439 fail0: 440 return ERR_PTR(err); 453 441 } 454 442 … … 466 454 */ 467 455 static int sf_instantiate(struct inode *parent, struct dentry *dentry, 468 SHFLSTRING *path, PSHFLFSOBJINFO info, SHFLHANDLE handle) 469 { 470 int err; 471 ino_t ino;472 struct inode *inode;473 struct sf_inode_info *sf_new_i;474 struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);475 476 TRACE(); 477 BUG_ON(!sf_g);478 479 sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL); 480 if (!sf_new_i) 481 482 483 484 485 486 487 456 SHFLSTRING * path, PSHFLFSOBJINFO info, 457 SHFLHANDLE handle) 458 { 459 int err; 460 ino_t ino; 461 struct inode *inode; 462 struct sf_inode_info *sf_new_i; 463 struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb); 464 465 TRACE(); 466 BUG_ON(!sf_g); 467 468 sf_new_i = kmalloc(sizeof(*sf_new_i), GFP_KERNEL); 469 if (!sf_new_i) { 470 LogRelFunc(("could not allocate inode info.\n")); 471 err = -ENOMEM; 472 goto fail0; 473 } 474 475 ino = iunique(parent->i_sb, 1); 488 476 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 489 inode = iget_locked(parent->i_sb, ino); 490 #else 491 inode = iget(parent->i_sb, ino); 492 #endif 493 if (!inode) 494 { 495 LogFunc(("iget failed\n")); 496 err = -ENOMEM; 497 goto fail1; 498 } 499 500 sf_init_inode(sf_g, inode, info); 501 sf_new_i->path = path; 502 SET_INODE_INFO(inode, sf_new_i); 503 sf_new_i->force_restat = 1; 504 sf_new_i->force_reread = 0; 505 506 d_instantiate(dentry, inode); 477 inode = iget_locked(parent->i_sb, ino); 478 #else 479 inode = iget(parent->i_sb, ino); 480 #endif 481 if (!inode) { 482 LogFunc(("iget failed\n")); 483 err = -ENOMEM; 484 goto fail1; 485 } 486 487 sf_init_inode(sf_g, inode, info); 488 sf_new_i->path = path; 489 SET_INODE_INFO(inode, sf_new_i); 490 sf_new_i->force_restat = 1; 491 sf_new_i->force_reread = 0; 492 493 d_instantiate(dentry, inode); 507 494 508 495 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 509 510 #endif 511 512 513 514 515 516 fail1:517 518 519 fail0:520 496 unlock_new_inode(inode); 497 #endif 498 499 /* Store this handle if we leave the handle open. */ 500 sf_new_i->handle = handle; 501 return 0; 502 503 fail1: 504 kfree(sf_new_i); 505 506 fail0: 507 return err; 521 508 522 509 } … … 532 519 */ 533 520 static int sf_create_aux(struct inode *parent, struct dentry *dentry, 534 umode_t mode, int fDirectory) 535 { 536 int rc, err; 537 SHFLCREATEPARMS params; 538 SHFLSTRING *path; 539 struct sf_inode_info *sf_i = GET_INODE_INFO(parent); 540 struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb); 541 542 TRACE(); 543 BUG_ON(!sf_i); 544 BUG_ON(!sf_g); 545 546 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 547 if (err) 548 goto fail0; 549 550 RT_ZERO(params); 551 params.Handle = SHFL_HANDLE_NIL; 552 params.CreateFlags = 0 553 | SHFL_CF_ACT_CREATE_IF_NEW 554 | SHFL_CF_ACT_FAIL_IF_EXISTS 555 | SHFL_CF_ACCESS_READWRITE 556 | (fDirectory ? SHFL_CF_DIRECTORY : 0) 557 ; 558 params.Info.Attr.fMode = 0 559 | (fDirectory ? RTFS_TYPE_DIRECTORY : RTFS_TYPE_FILE) 560 | (mode & S_IRWXUGO) 561 ; 562 params.Info.Attr.enmAdditional = RTFSOBJATTRADD_NOTHING; 563 564 LogFunc(("sf_create_aux: calling VbglR0SfCreate, folder %s, flags %#x\n", 565 path->String.utf8, params.CreateFlags)); 566 rc = VbglR0SfCreate(&client_handle, &sf_g->map, path, ¶ms); 567 if (RT_FAILURE(rc)) 568 { 569 if (rc == VERR_WRITE_PROTECT) 570 { 571 err = -EROFS; 572 goto fail1; 573 } 574 err = -EPROTO; 575 LogFunc(("(%d): VbglR0SfCreate(%s) failed rc=%Rrc\n", 576 fDirectory, sf_i->path->String.utf8, rc)); 577 goto fail1; 578 } 579 580 if (params.Result != SHFL_FILE_CREATED) 581 { 582 err = -EPERM; 583 LogFunc(("(%d): could not create file %s result=%d\n", 584 fDirectory, sf_i->path->String.utf8, params.Result)); 585 goto fail1; 586 } 587 588 err = sf_instantiate(parent, dentry, path, ¶ms.Info, 589 fDirectory ? SHFL_HANDLE_NIL : params.Handle); 590 if (err) 591 { 592 LogFunc(("(%d): could not instantiate dentry for %s err=%d\n", 593 fDirectory, sf_i->path->String.utf8, err)); 594 goto fail2; 595 } 596 597 /* 598 * Don't close this handle right now. We assume that the same file is 599 * opened with sf_reg_open() and later closed with sf_reg_close(). Save 600 * the handle in between. Does not apply to directories. True? 601 */ 602 if (fDirectory) 603 { 604 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 605 if (RT_FAILURE(rc)) 606 LogFunc(("(%d): VbglR0SfClose failed rc=%Rrc\n", fDirectory, rc)); 607 } 608 609 sf_i->force_restat = 1; 610 return 0; 611 612 fail2: 613 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 614 if (RT_FAILURE(rc)) 615 LogFunc(("(%d): VbglR0SfClose failed rc=%Rrc\n", fDirectory, rc)); 616 617 fail1: 618 kfree(path); 619 620 fail0: 621 return err; 521 umode_t mode, int fDirectory) 522 { 523 int rc, err; 524 SHFLCREATEPARMS params; 525 SHFLSTRING *path; 526 struct sf_inode_info *sf_i = GET_INODE_INFO(parent); 527 struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb); 528 529 TRACE(); 530 BUG_ON(!sf_i); 531 BUG_ON(!sf_g); 532 533 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 534 if (err) 535 goto fail0; 536 537 RT_ZERO(params); 538 params.Handle = SHFL_HANDLE_NIL; 539 params.CreateFlags = 0 540 | SHFL_CF_ACT_CREATE_IF_NEW 541 | SHFL_CF_ACT_FAIL_IF_EXISTS 542 | SHFL_CF_ACCESS_READWRITE | (fDirectory ? SHFL_CF_DIRECTORY : 0); 543 params.Info.Attr.fMode = 0 544 | (fDirectory ? RTFS_TYPE_DIRECTORY : RTFS_TYPE_FILE) 545 | (mode & S_IRWXUGO); 546 params.Info.Attr.enmAdditional = RTFSOBJATTRADD_NOTHING; 547 548 LogFunc(("sf_create_aux: calling VbglR0SfCreate, folder %s, flags %#x\n", path->String.utf8, params.CreateFlags)); 549 rc = VbglR0SfCreate(&client_handle, &sf_g->map, path, ¶ms); 550 if (RT_FAILURE(rc)) { 551 if (rc == VERR_WRITE_PROTECT) { 552 err = -EROFS; 553 goto fail1; 554 } 555 err = -EPROTO; 556 LogFunc(("(%d): VbglR0SfCreate(%s) failed rc=%Rrc\n", 557 fDirectory, sf_i->path->String.utf8, rc)); 558 goto fail1; 559 } 560 561 if (params.Result != SHFL_FILE_CREATED) { 562 err = -EPERM; 563 LogFunc(("(%d): could not create file %s result=%d\n", 564 fDirectory, sf_i->path->String.utf8, params.Result)); 565 goto fail1; 566 } 567 568 err = sf_instantiate(parent, dentry, path, ¶ms.Info, 569 fDirectory ? SHFL_HANDLE_NIL : params.Handle); 570 if (err) { 571 LogFunc(("(%d): could not instantiate dentry for %s err=%d\n", 572 fDirectory, sf_i->path->String.utf8, err)); 573 goto fail2; 574 } 575 576 /* 577 * Don't close this handle right now. We assume that the same file is 578 * opened with sf_reg_open() and later closed with sf_reg_close(). Save 579 * the handle in between. Does not apply to directories. True? 580 */ 581 if (fDirectory) { 582 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 583 if (RT_FAILURE(rc)) 584 LogFunc(("(%d): VbglR0SfClose failed rc=%Rrc\n", 585 fDirectory, rc)); 586 } 587 588 sf_i->force_restat = 1; 589 return 0; 590 591 fail2: 592 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 593 if (RT_FAILURE(rc)) 594 LogFunc(("(%d): VbglR0SfClose failed rc=%Rrc\n", fDirectory, 595 rc)); 596 597 fail1: 598 kfree(path); 599 600 fail0: 601 return err; 622 602 } 623 603 … … 632 612 */ 633 613 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) || defined(DOXYGEN_RUNNING) 634 static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode, bool excl) 614 static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode, 615 bool excl) 635 616 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) 636 static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode, struct nameidata *nd) 617 static int sf_create(struct inode *parent, struct dentry *dentry, umode_t mode, 618 struct nameidata *nd) 637 619 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 638 static int sf_create(struct inode *parent, struct dentry *dentry, int mode, struct nameidata *nd) 620 static int sf_create(struct inode *parent, struct dentry *dentry, int mode, 621 struct nameidata *nd) 639 622 #else 640 623 static int sf_create(struct inode *parent, struct dentry *dentry, int mode) 641 624 #endif 642 625 { 643 644 626 TRACE(); 627 return sf_create_aux(parent, dentry, mode, 0); 645 628 } 646 629 … … 659 642 #endif 660 643 { 661 662 644 TRACE(); 645 return sf_create_aux(parent, dentry, mode, 1); 663 646 } 664 647 … … 671 654 * @returns 0 on success, Linux error code otherwise 672 655 */ 673 static int sf_unlink_aux(struct inode *parent, struct dentry *dentry, int fDirectory)674 { 675 int rc, err; 676 struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb);677 struct sf_inode_info *sf_i = GET_INODE_INFO(parent);678 SHFLSTRING *path;679 uint32_t fFlags;680 681 TRACE(); 682 BUG_ON(!sf_g);683 684 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 685 if (err) 686 goto fail0; 687 688 fFlags = fDirectory ? SHFL_REMOVE_DIR : SHFL_REMOVE_FILE; 689 if ( dentry->d_inode 690 691 692 693 if (RT_FAILURE(rc)) 694 { 695 LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc\n",fDirectory, path->String.utf8, rc));696 697 698 699 700 701 702 703 704 705 706 707 fail1:708 709 710 fail0:711 656 static int sf_unlink_aux(struct inode *parent, struct dentry *dentry, 657 int fDirectory) 658 { 659 int rc, err; 660 struct sf_glob_info *sf_g = GET_GLOB_INFO(parent->i_sb); 661 struct sf_inode_info *sf_i = GET_INODE_INFO(parent); 662 SHFLSTRING *path; 663 uint32_t fFlags; 664 665 TRACE(); 666 BUG_ON(!sf_g); 667 668 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 669 if (err) 670 goto fail0; 671 672 fFlags = fDirectory ? SHFL_REMOVE_DIR : SHFL_REMOVE_FILE; 673 if (dentry->d_inode && ((dentry->d_inode->i_mode & S_IFLNK) == S_IFLNK)) 674 fFlags |= SHFL_REMOVE_SYMLINK; 675 rc = VbglR0SfRemove(&client_handle, &sf_g->map, path, fFlags); 676 if (RT_FAILURE(rc)) { 677 LogFunc(("(%d): VbglR0SfRemove(%s) failed rc=%Rrc\n", 678 fDirectory, path->String.utf8, rc)); 679 err = -RTErrConvertToErrno(rc); 680 goto fail1; 681 } 682 683 /* directory access/change time changed */ 684 sf_i->force_restat = 1; 685 /* directory content changed */ 686 sf_i->force_reread = 1; 687 688 err = 0; 689 690 fail1: 691 kfree(path); 692 693 fail0: 694 return err; 712 695 } 713 696 … … 721 704 static int sf_unlink(struct inode *parent, struct dentry *dentry) 722 705 { 723 724 706 TRACE(); 707 return sf_unlink_aux(parent, dentry, 0); 725 708 } 726 709 … … 734 717 static int sf_rmdir(struct inode *parent, struct dentry *dentry) 735 718 { 736 737 719 TRACE(); 720 return sf_unlink_aux(parent, dentry, 1); 738 721 } 739 722 … … 749 732 */ 750 733 static int sf_rename(struct inode *old_parent, struct dentry *old_dentry, 751 734 struct inode *new_parent, struct dentry *new_dentry 752 735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) 753 754 #endif 755 756 { 757 758 759 760 736 , unsigned flags 737 #endif 738 ) 739 { 740 int err = 0, rc = VINF_SUCCESS; 741 struct sf_glob_info *sf_g = GET_GLOB_INFO(old_parent->i_sb); 742 743 TRACE(); 761 744 762 745 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0) 763 if (flags) 764 { 765 LogFunc(("rename with flags=%x\n", flags)); 766 return -EINVAL; 767 } 768 #endif 769 770 if (sf_g != GET_GLOB_INFO(new_parent->i_sb)) 771 { 772 LogFunc(("rename with different roots\n")); 773 err = -EINVAL; 774 } 775 else 776 { 777 struct sf_inode_info *sf_old_i = GET_INODE_INFO(old_parent); 778 struct sf_inode_info *sf_new_i = GET_INODE_INFO(new_parent); 779 /* As we save the relative path inside the inode structure, we need to change 780 this if the rename is successful. */ 781 struct sf_inode_info *sf_file_i = GET_INODE_INFO(old_dentry->d_inode); 782 SHFLSTRING *old_path; 783 SHFLSTRING *new_path; 784 785 BUG_ON(!sf_old_i); 786 BUG_ON(!sf_new_i); 787 BUG_ON(!sf_file_i); 788 789 old_path = sf_file_i->path; 790 err = sf_path_from_dentry(__func__, sf_g, sf_new_i, 791 new_dentry, &new_path); 792 if (err) 793 LogFunc(("failed to create new path\n")); 794 else 795 { 796 int fDir = ((old_dentry->d_inode->i_mode & S_IFDIR) != 0); 797 798 rc = VbglR0SfRename(&client_handle, &sf_g->map, old_path, 799 new_path, fDir ? 0 : SHFL_RENAME_FILE | SHFL_RENAME_REPLACE_IF_EXISTS); 800 if (RT_SUCCESS(rc)) 801 { 802 kfree(old_path); 803 sf_new_i->force_restat = 1; 804 sf_old_i->force_restat = 1; /* XXX: needed? */ 805 /* Set the new relative path in the inode. */ 806 sf_file_i->path = new_path; 807 } 808 else 809 { 810 LogFunc(("VbglR0SfRename failed rc=%Rrc\n", rc)); 811 err = -RTErrConvertToErrno(rc); 812 kfree(new_path); 813 } 814 } 815 } 816 return err; 746 if (flags) { 747 LogFunc(("rename with flags=%x\n", flags)); 748 return -EINVAL; 749 } 750 #endif 751 752 if (sf_g != GET_GLOB_INFO(new_parent->i_sb)) { 753 LogFunc(("rename with different roots\n")); 754 err = -EINVAL; 755 } else { 756 struct sf_inode_info *sf_old_i = GET_INODE_INFO(old_parent); 757 struct sf_inode_info *sf_new_i = GET_INODE_INFO(new_parent); 758 /* As we save the relative path inside the inode structure, we need to change 759 this if the rename is successful. */ 760 struct sf_inode_info *sf_file_i = 761 GET_INODE_INFO(old_dentry->d_inode); 762 SHFLSTRING *old_path; 763 SHFLSTRING *new_path; 764 765 BUG_ON(!sf_old_i); 766 BUG_ON(!sf_new_i); 767 BUG_ON(!sf_file_i); 768 769 old_path = sf_file_i->path; 770 err = sf_path_from_dentry(__func__, sf_g, sf_new_i, 771 new_dentry, &new_path); 772 if (err) 773 LogFunc(("failed to create new path\n")); 774 else { 775 int fDir = 776 ((old_dentry->d_inode->i_mode & S_IFDIR) != 0); 777 778 rc = VbglR0SfRename(&client_handle, &sf_g->map, 779 old_path, new_path, 780 fDir ? 0 : SHFL_RENAME_FILE | 781 SHFL_RENAME_REPLACE_IF_EXISTS); 782 if (RT_SUCCESS(rc)) { 783 kfree(old_path); 784 sf_new_i->force_restat = 1; 785 sf_old_i->force_restat = 1; /* XXX: needed? */ 786 /* Set the new relative path in the inode. */ 787 sf_file_i->path = new_path; 788 } else { 789 LogFunc(("VbglR0SfRename failed rc=%Rrc\n", 790 rc)); 791 err = -RTErrConvertToErrno(rc); 792 kfree(new_path); 793 } 794 } 795 } 796 return err; 817 797 } 818 798 819 799 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 820 static int sf_symlink(struct inode *parent, struct dentry *dentry, const char *symname) 821 { 822 int err; 823 int rc; 824 struct sf_inode_info *sf_i; 825 struct sf_glob_info *sf_g; 826 SHFLSTRING *path, *ssymname; 827 SHFLFSOBJINFO info; 828 int symname_len = strlen(symname) + 1; 829 830 TRACE(); 831 sf_g = GET_GLOB_INFO(parent->i_sb); 832 sf_i = GET_INODE_INFO(parent); 833 834 BUG_ON(!sf_g); 835 BUG_ON(!sf_i); 836 837 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 838 if (err) 839 goto fail0; 840 841 ssymname = kmalloc(offsetof(SHFLSTRING, String.utf8) + symname_len, GFP_KERNEL); 842 if (!ssymname) 843 { 844 LogRelFunc(("kmalloc failed, caller=sf_symlink\n")); 845 err = -ENOMEM; 846 goto fail1; 847 } 848 849 ssymname->u16Length = symname_len - 1; 850 ssymname->u16Size = symname_len; 851 memcpy(ssymname->String.utf8, symname, symname_len); 852 853 rc = VbglR0SfSymlink(&client_handle, &sf_g->map, path, ssymname, &info); 854 kfree(ssymname); 855 856 if (RT_FAILURE(rc)) 857 { 858 if (rc == VERR_WRITE_PROTECT) 859 { 860 err = -EROFS; 861 goto fail1; 862 } 863 LogFunc(("VbglR0SfSymlink(%s) failed rc=%Rrc\n", 864 sf_i->path->String.utf8, rc)); 865 err = -EPROTO; 866 goto fail1; 867 } 868 869 err = sf_instantiate(parent, dentry, path, &info, SHFL_HANDLE_NIL); 870 if (err) 871 { 872 LogFunc(("could not instantiate dentry for %s err=%d\n", 873 sf_i->path->String.utf8, err)); 874 goto fail1; 875 } 876 877 sf_i->force_restat = 1; 878 return 0; 879 880 fail1: 881 kfree(path); 882 fail0: 883 return err; 884 } 885 #endif 886 887 struct inode_operations sf_dir_iops = 888 { 889 .lookup = sf_lookup, 890 .create = sf_create, 891 .mkdir = sf_mkdir, 892 .rmdir = sf_rmdir, 893 .unlink = sf_unlink, 894 .rename = sf_rename, 800 static int sf_symlink(struct inode *parent, struct dentry *dentry, 801 const char *symname) 802 { 803 int err; 804 int rc; 805 struct sf_inode_info *sf_i; 806 struct sf_glob_info *sf_g; 807 SHFLSTRING *path, *ssymname; 808 SHFLFSOBJINFO info; 809 int symname_len = strlen(symname) + 1; 810 811 TRACE(); 812 sf_g = GET_GLOB_INFO(parent->i_sb); 813 sf_i = GET_INODE_INFO(parent); 814 815 BUG_ON(!sf_g); 816 BUG_ON(!sf_i); 817 818 err = sf_path_from_dentry(__func__, sf_g, sf_i, dentry, &path); 819 if (err) 820 goto fail0; 821 822 ssymname = 823 kmalloc(offsetof(SHFLSTRING, String.utf8) + symname_len, 824 GFP_KERNEL); 825 if (!ssymname) { 826 LogRelFunc(("kmalloc failed, caller=sf_symlink\n")); 827 err = -ENOMEM; 828 goto fail1; 829 } 830 831 ssymname->u16Length = symname_len - 1; 832 ssymname->u16Size = symname_len; 833 memcpy(ssymname->String.utf8, symname, symname_len); 834 835 rc = VbglR0SfSymlink(&client_handle, &sf_g->map, path, ssymname, &info); 836 kfree(ssymname); 837 838 if (RT_FAILURE(rc)) { 839 if (rc == VERR_WRITE_PROTECT) { 840 err = -EROFS; 841 goto fail1; 842 } 843 LogFunc(("VbglR0SfSymlink(%s) failed rc=%Rrc\n", 844 sf_i->path->String.utf8, rc)); 845 err = -EPROTO; 846 goto fail1; 847 } 848 849 err = sf_instantiate(parent, dentry, path, &info, SHFL_HANDLE_NIL); 850 if (err) { 851 LogFunc(("could not instantiate dentry for %s err=%d\n", 852 sf_i->path->String.utf8, err)); 853 goto fail1; 854 } 855 856 sf_i->force_restat = 1; 857 return 0; 858 859 fail1: 860 kfree(path); 861 fail0: 862 return err; 863 } 864 #endif 865 866 struct inode_operations sf_dir_iops = { 867 .lookup = sf_lookup, 868 .create = sf_create, 869 .mkdir = sf_mkdir, 870 .rmdir = sf_rmdir, 871 .unlink = sf_unlink, 872 .rename = sf_rename, 895 873 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 896 897 #else 898 .getattr= sf_getattr,899 .setattr= sf_setattr,900 .symlink= sf_symlink874 .revalidate = sf_inode_revalidate 875 #else 876 .getattr = sf_getattr, 877 .setattr = sf_setattr, 878 .symlink = sf_symlink 901 879 #endif 902 880 }; -
trunk/src/VBox/Additions/linux/sharedfolders/lnkops.c
r69500 r70786 21 21 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 22 22 23 # 24 # 23 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) 24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 25 25 static const char *sf_follow_link(struct dentry *dentry, void **cookie) 26 # 26 #else 27 27 static void *sf_follow_link(struct dentry *dentry, struct nameidata *nd) 28 # 28 #endif 29 29 { 30 31 32 33 34 char *path = (char*)get_zeroed_page(GFP_KERNEL);35 30 struct inode *inode = dentry->d_inode; 31 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 32 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 33 int error = -ENOMEM; 34 char *path = (char *)get_zeroed_page(GFP_KERNEL); 35 int rc; 36 36 37 if (path) 38 { 39 error = 0; 40 rc = VbglR0SfReadLink(&client_handle, &sf_g->map, sf_i->path, PATH_MAX, path); 41 if (RT_FAILURE(rc)) 42 { 43 LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n", __func__, rc)); 44 free_page((unsigned long)path); 45 error = -EPROTO; 46 } 47 } 48 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 49 return error ? ERR_PTR(error) : (*cookie = path); 50 # else 51 nd_set_link(nd, error ? ERR_PTR(error) : path); 52 return NULL; 53 # endif 37 if (path) { 38 error = 0; 39 rc = VbglR0SfReadLink(&client_handle, &sf_g->map, sf_i->path, 40 PATH_MAX, path); 41 if (RT_FAILURE(rc)) { 42 LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n", __func__, rc)); 43 free_page((unsigned long)path); 44 error = -EPROTO; 45 } 46 } 47 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 48 return error ? ERR_PTR(error) : (*cookie = path); 49 #else 50 nd_set_link(nd, error ? ERR_PTR(error) : path); 51 return NULL; 52 #endif 54 53 } 55 54 56 # if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) 57 static void sf_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) 55 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) 56 static void sf_put_link(struct dentry *dentry, struct nameidata *nd, 57 void *cookie) 58 58 { 59 60 61 59 char *page = nd_get_link(nd); 60 if (!IS_ERR(page)) 61 free_page((unsigned long)page); 62 62 } 63 # 63 #endif 64 64 65 # else/* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) */65 #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) */ 66 66 static const char *sf_get_link(struct dentry *dentry, struct inode *inode, 67 67 struct delayed_call *done) 68 68 { 69 70 71 72 69 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 70 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 71 char *path; 72 int rc; 73 73 74 if (!dentry) 75 return ERR_PTR(-ECHILD); 76 path = kzalloc(PAGE_SIZE, GFP_KERNEL); 77 if (!path) 78 return ERR_PTR(-ENOMEM); 79 rc = VbglR0SfReadLink(&client_handle, &sf_g->map, sf_i->path, PATH_MAX, path); 80 if (RT_FAILURE(rc)) 81 { 82 LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n", __func__, rc)); 83 kfree(path); 84 return ERR_PTR(-EPROTO); 85 } 86 set_delayed_call(done, kfree_link, path); 87 return path; 74 if (!dentry) 75 return ERR_PTR(-ECHILD); 76 path = kzalloc(PAGE_SIZE, GFP_KERNEL); 77 if (!path) 78 return ERR_PTR(-ENOMEM); 79 rc = VbglR0SfReadLink(&client_handle, &sf_g->map, sf_i->path, PATH_MAX, 80 path); 81 if (RT_FAILURE(rc)) { 82 LogFunc(("VbglR0SfReadLink failed, caller=%s, rc=%Rrc\n", 83 __func__, rc)); 84 kfree(path); 85 return ERR_PTR(-EPROTO); 86 } 87 set_delayed_call(done, kfree_link, path); 88 return path; 88 89 } 89 # endif/* LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) */90 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) */ 90 91 91 struct inode_operations sf_lnk_iops = 92 { 93 # if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) 94 .readlink = generic_readlink, 95 # endif 96 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) 97 .get_link = sf_get_link 98 # elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 99 .follow_link = sf_follow_link, 100 .put_link = free_page_put_link, 101 # else 102 .follow_link = sf_follow_link, 103 .put_link = sf_put_link 104 # endif 92 struct inode_operations sf_lnk_iops = { 93 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) 94 .readlink = generic_readlink, 95 #endif 96 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0) 97 .get_link = sf_get_link 98 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) 99 .follow_link = sf_follow_link, 100 .put_link = free_page_put_link, 101 #else 102 .follow_link = sf_follow_link, 103 .put_link = sf_put_link 104 #endif 105 105 }; 106 106 107 #endif 107 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) */ -
trunk/src/VBox/Additions/linux/sharedfolders/regops.c
r70461 r70786 22 22 #include "vfsmod.h" 23 23 24 static void *alloc_bounce_buffer(size_t *tmp_sizep, PRTCCPHYS physp, size_t 25 xfer_size, const char *caller) 26 { 27 size_t tmp_size; 28 void *tmp; 29 30 /* try for big first. */ 31 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE); 32 if (tmp_size > 16U*_1K) 33 tmp_size = 16U*_1K; 34 tmp = kmalloc(tmp_size, GFP_KERNEL); 35 if (!tmp) 36 { 37 /* fall back on a page sized buffer. */ 38 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); 39 if (!tmp) 40 { 41 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size)); 42 return NULL; 43 } 44 tmp_size = PAGE_SIZE; 45 } 46 47 *tmp_sizep = tmp_size; 48 *physp = virt_to_phys(tmp); 49 return tmp; 24 static void *alloc_bounce_buffer(size_t * tmp_sizep, PRTCCPHYS physp, size_t 25 xfer_size, const char *caller) 26 { 27 size_t tmp_size; 28 void *tmp; 29 30 /* try for big first. */ 31 tmp_size = RT_ALIGN_Z(xfer_size, PAGE_SIZE); 32 if (tmp_size > 16U * _1K) 33 tmp_size = 16U * _1K; 34 tmp = kmalloc(tmp_size, GFP_KERNEL); 35 if (!tmp) { 36 /* fall back on a page sized buffer. */ 37 tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); 38 if (!tmp) { 39 LogRel(("%s: could not allocate bounce buffer for xfer_size=%zu %s\n", caller, xfer_size)); 40 return NULL; 41 } 42 tmp_size = PAGE_SIZE; 43 } 44 45 *tmp_sizep = tmp_size; 46 *physp = virt_to_phys(tmp); 47 return tmp; 50 48 } 51 49 52 50 static void free_bounce_buffer(void *tmp) 53 51 { 54 kfree (tmp); 55 } 56 52 kfree(tmp); 53 } 57 54 58 55 /* fops */ 59 56 static int sf_reg_read_aux(const char *caller, struct sf_glob_info *sf_g, 60 61 uint32_t *nread, uint64_t pos)57 struct sf_reg_info *sf_r, void *buf, 58 uint32_t * nread, uint64_t pos) 62 59 { 63 60 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is 64 61 * contiguous in physical memory (kmalloc or single page), we should 65 62 * use a physical address here to speed things up. */ 66 67 pos, nread, buf, false /* already locked? */);68 if (RT_FAILURE(rc)) 69 { 70 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller,rc));71 72 73 63 int rc = VbglR0SfRead(&client_handle, &sf_g->map, sf_r->handle, 64 pos, nread, buf, false /* already locked? */ ); 65 if (RT_FAILURE(rc)) { 66 LogFunc(("VbglR0SfRead failed. caller=%s, rc=%Rrc\n", caller, 67 rc)); 68 return -EPROTO; 69 } 70 return 0; 74 71 } 75 72 76 73 static int sf_reg_write_aux(const char *caller, struct sf_glob_info *sf_g, 77 78 uint32_t *nwritten, uint64_t pos)74 struct sf_reg_info *sf_r, void *buf, 75 uint32_t * nwritten, uint64_t pos) 79 76 { 80 77 /** @todo bird: yes, kmap() and kmalloc() input only. Since the buffer is 81 78 * contiguous in physical memory (kmalloc or single page), we should 82 79 * use a physical address here to speed things up. */ 83 84 pos, nwritten, buf, false /* already locked? */); 85 if (RT_FAILURE(rc)) 86 87 88 89 90 91 80 int rc = VbglR0SfWrite(&client_handle, &sf_g->map, sf_r->handle, 81 pos, nwritten, buf, 82 false /* already locked? */ ); 83 if (RT_FAILURE(rc)) { 84 LogFunc(("VbglR0SfWrite failed. caller=%s, rc=%Rrc\n", 85 caller, rc)); 86 return -EPROTO; 87 } 88 return 0; 92 89 } 93 90 … … 97 94 void free_pipebuf(struct page *kpage) 98 95 { 99 100 96 kunmap(kpage); 97 __free_pages(kpage, 0); 101 98 } 102 99 103 100 void *sf_pipe_buf_map(struct pipe_inode_info *pipe, 104 105 { 106 101 struct pipe_buffer *pipe_buf, int atomic) 102 { 103 return 0; 107 104 } 108 105 … … 111 108 } 112 109 113 void sf_pipe_buf_unmap(struct pipe_inode_info *pipe, struct pipe_buffer *pipe_buf, void *map_data) 110 void sf_pipe_buf_unmap(struct pipe_inode_info *pipe, 111 struct pipe_buffer *pipe_buf, void *map_data) 114 112 { 115 113 } 116 114 117 115 int sf_pipe_buf_steal(struct pipe_inode_info *pipe, 118 struct pipe_buffer *pipe_buf) { 119 return 0; 116 struct pipe_buffer *pipe_buf) 117 { 118 return 0; 120 119 } 121 120 122 121 static void sf_pipe_buf_release(struct pipe_inode_info *pipe, 123 124 { 125 122 struct pipe_buffer *pipe_buf) 123 { 124 free_pipebuf(pipe_buf->page); 126 125 } 127 126 128 127 int sf_pipe_buf_confirm(struct pipe_inode_info *info, 129 130 { 131 128 struct pipe_buffer *pipe_buf) 129 { 130 return 0; 132 131 } 133 132 134 133 static struct pipe_buf_operations sf_pipe_buf_ops = { 135 136 137 138 139 140 141 134 .can_merge = 0, 135 .map = sf_pipe_buf_map, 136 .unmap = sf_pipe_buf_unmap, 137 .confirm = sf_pipe_buf_confirm, 138 .release = sf_pipe_buf_release, 139 .steal = sf_pipe_buf_steal, 140 .get = sf_pipe_buf_get, 142 141 }; 143 142 … … 151 150 152 151 ssize_t 153 sf_splice_read(struct file *in, loff_t *poffset, 154 struct pipe_inode_info *pipe, size_t len, 155 unsigned int flags) 156 { 157 size_t bytes_remaining = len; 158 loff_t orig_offset = *poffset; 159 loff_t offset = orig_offset; 160 struct inode *inode = GET_F_DENTRY(in)->d_inode; 161 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 162 struct sf_reg_info *sf_r = in->private_data; 163 ssize_t retval; 164 struct page *kpage = 0; 165 size_t nsent = 0; 166 167 TRACE(); 168 if (!S_ISREG(inode->i_mode)) 169 { 170 LogFunc(("read from non regular file %d\n", inode->i_mode)); 171 return -EINVAL; 172 } 173 if (!len) { 174 return 0; 175 } 176 177 LOCK_PIPE(pipe); 178 179 uint32_t req_size = 0; 180 while (bytes_remaining > 0) 181 { 182 kpage = alloc_page(GFP_KERNEL); 183 if (unlikely(kpage == NULL)) 184 { 185 UNLOCK_PIPE(pipe); 186 return -ENOMEM; 187 } 188 req_size = 0; 189 uint32_t nread = req_size = (uint32_t)min(bytes_remaining, (size_t)PAGE_SIZE); 190 uint32_t chunk = 0; 191 void *kbuf = kmap(kpage); 192 while (chunk < req_size) 193 { 194 retval = sf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk, &nread, offset); 195 if (retval < 0) 196 goto err; 197 if (nread == 0) 198 break; 199 chunk += nread; 200 offset += nread; 201 nread = req_size - chunk; 202 } 203 if (!pipe->readers) 204 { 205 send_sig(SIGPIPE, current, 0); 206 retval = -EPIPE; 207 goto err; 208 } 209 if (pipe->nrbufs < PIPE_BUFFERS) 210 { 211 struct pipe_buffer *pipebuf = 212 pipe->bufs + ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 1)); 213 pipebuf->page = kpage; 214 pipebuf->ops = &sf_pipe_buf_ops; 215 pipebuf->len = req_size; 216 pipebuf->offset = 0; 217 pipebuf->private = 0; 218 pipebuf->flags = 0; 219 pipe->nrbufs++; 220 nsent += req_size; 221 bytes_remaining -= req_size; 222 if (signal_pending(current)) 223 break; 224 } 225 else /* pipe full */ 226 { 227 if (flags & SPLICE_F_NONBLOCK) { 228 retval = -EAGAIN; 229 goto err; 230 } 231 free_pipebuf(kpage); 232 break; 233 } 234 } 235 UNLOCK_PIPE(pipe); 236 if (!nsent && signal_pending(current)) 237 return -ERESTARTSYS; 238 *poffset += nsent; 239 return offset - orig_offset; 240 241 err: 242 UNLOCK_PIPE(pipe); 243 free_pipebuf(kpage); 244 return retval; 152 sf_splice_read(struct file *in, loff_t * poffset, 153 struct pipe_inode_info *pipe, size_t len, unsigned int flags) 154 { 155 size_t bytes_remaining = len; 156 loff_t orig_offset = *poffset; 157 loff_t offset = orig_offset; 158 struct inode *inode = GET_F_DENTRY(in)->d_inode; 159 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 160 struct sf_reg_info *sf_r = in->private_data; 161 ssize_t retval; 162 struct page *kpage = 0; 163 size_t nsent = 0; 164 165 TRACE(); 166 if (!S_ISREG(inode->i_mode)) { 167 LogFunc(("read from non regular file %d\n", inode->i_mode)); 168 return -EINVAL; 169 } 170 if (!len) { 171 return 0; 172 } 173 174 LOCK_PIPE(pipe); 175 176 uint32_t req_size = 0; 177 while (bytes_remaining > 0) { 178 kpage = alloc_page(GFP_KERNEL); 179 if (unlikely(kpage == NULL)) { 180 UNLOCK_PIPE(pipe); 181 return -ENOMEM; 182 } 183 req_size = 0; 184 uint32_t nread = req_size = 185 (uint32_t) min(bytes_remaining, (size_t) PAGE_SIZE); 186 uint32_t chunk = 0; 187 void *kbuf = kmap(kpage); 188 while (chunk < req_size) { 189 retval = 190 sf_reg_read_aux(__func__, sf_g, sf_r, kbuf + chunk, 191 &nread, offset); 192 if (retval < 0) 193 goto err; 194 if (nread == 0) 195 break; 196 chunk += nread; 197 offset += nread; 198 nread = req_size - chunk; 199 } 200 if (!pipe->readers) { 201 send_sig(SIGPIPE, current, 0); 202 retval = -EPIPE; 203 goto err; 204 } 205 if (pipe->nrbufs < PIPE_BUFFERS) { 206 struct pipe_buffer *pipebuf = 207 pipe->bufs + 208 ((pipe->curbuf + pipe->nrbufs) & (PIPE_BUFFERS - 209 1)); 210 pipebuf->page = kpage; 211 pipebuf->ops = &sf_pipe_buf_ops; 212 pipebuf->len = req_size; 213 pipebuf->offset = 0; 214 pipebuf->private = 0; 215 pipebuf->flags = 0; 216 pipe->nrbufs++; 217 nsent += req_size; 218 bytes_remaining -= req_size; 219 if (signal_pending(current)) 220 break; 221 } else { /* pipe full */ 222 223 if (flags & SPLICE_F_NONBLOCK) { 224 retval = -EAGAIN; 225 goto err; 226 } 227 free_pipebuf(kpage); 228 break; 229 } 230 } 231 UNLOCK_PIPE(pipe); 232 if (!nsent && signal_pending(current)) 233 return -ERESTARTSYS; 234 *poffset += nsent; 235 return offset - orig_offset; 236 237 err: 238 UNLOCK_PIPE(pipe); 239 free_pipebuf(kpage); 240 return retval; 245 241 } 246 242 … … 256 252 * @returns the number of read bytes on success, Linux error code otherwise 257 253 */ 258 static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, loff_t *off)259 { 260 int err; 261 void *tmp;262 RTCCPHYS tmp_phys;263 size_t tmp_size;264 size_t left =size;265 ssize_t total_bytes_read = 0;266 struct inode *inode = GET_F_DENTRY(file)->d_inode;267 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);268 struct sf_reg_info *sf_r = file->private_data;269 loff_t pos = *off;270 271 TRACE(); 272 if (!S_ISREG(inode->i_mode)) 273 274 275 276 254 static ssize_t sf_reg_read(struct file *file, char *buf, size_t size, 255 loff_t * off) 256 { 257 int err; 258 void *tmp; 259 RTCCPHYS tmp_phys; 260 size_t tmp_size; 261 size_t left = size; 262 ssize_t total_bytes_read = 0; 263 struct inode *inode = GET_F_DENTRY(file)->d_inode; 264 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 265 struct sf_reg_info *sf_r = file->private_data; 266 loff_t pos = *off; 267 268 TRACE(); 269 if (!S_ISREG(inode->i_mode)) { 270 LogFunc(("read from non regular file %d\n", inode->i_mode)); 271 return -EINVAL; 272 } 277 273 278 274 /** XXX Check read permission according to inode->i_mode! */ 279 275 280 281 282 283 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__); 284 if (!tmp) 285 return -ENOMEM;286 287 while (left) 288 { 289 uint32_t to_read, nread; 290 291 to_read = tmp_size; 292 if (to_read > left) 293 to_read = (uint32_t) left; 294 295 nread = to_read; 296 297 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos); 298 if (err) 299 goto fail; 300 301 if (copy_to_user(buf, tmp, nread)) 302 303 304 305 306 307 pos+= nread;308 309 buf+= nread;310 311 312 313 314 315 316 317 318 319 fail:320 321 276 if (!size) 277 return 0; 278 279 tmp = 280 alloc_bounce_buffer(&tmp_size, &tmp_phys, size, 281 __PRETTY_FUNCTION__); 282 if (!tmp) 283 return -ENOMEM; 284 285 while (left) { 286 uint32_t to_read, nread; 287 288 to_read = tmp_size; 289 if (to_read > left) 290 to_read = (uint32_t) left; 291 292 nread = to_read; 293 294 err = sf_reg_read_aux(__func__, sf_g, sf_r, tmp, &nread, pos); 295 if (err) 296 goto fail; 297 298 if (copy_to_user(buf, tmp, nread)) { 299 err = -EFAULT; 300 goto fail; 301 } 302 303 pos += nread; 304 left -= nread; 305 buf += nread; 306 total_bytes_read += nread; 307 if (nread != to_read) 308 break; 309 } 310 311 *off += total_bytes_read; 312 free_bounce_buffer(tmp); 313 return total_bytes_read; 314 315 fail: 316 free_bounce_buffer(tmp); 317 return err; 322 318 } 323 319 … … 331 327 * @returns the number of written bytes on success, Linux error code otherwise 332 328 */ 333 static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, loff_t *off) 334 { 335 int err; 336 void *tmp; 337 RTCCPHYS tmp_phys; 338 size_t tmp_size; 339 size_t left = size; 340 ssize_t total_bytes_written = 0; 341 struct inode *inode = GET_F_DENTRY(file)->d_inode; 342 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 343 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 344 struct sf_reg_info *sf_r = file->private_data; 345 loff_t pos; 346 347 TRACE(); 348 BUG_ON(!sf_i); 349 BUG_ON(!sf_g); 350 BUG_ON(!sf_r); 351 352 if (!S_ISREG(inode->i_mode)) 353 { 354 LogFunc(("write to non regular file %d\n", inode->i_mode)); 355 return -EINVAL; 356 } 357 358 pos = *off; 359 if (file->f_flags & O_APPEND) 360 { 361 pos = inode->i_size; 362 *off = pos; 363 } 329 static ssize_t sf_reg_write(struct file *file, const char *buf, size_t size, 330 loff_t * off) 331 { 332 int err; 333 void *tmp; 334 RTCCPHYS tmp_phys; 335 size_t tmp_size; 336 size_t left = size; 337 ssize_t total_bytes_written = 0; 338 struct inode *inode = GET_F_DENTRY(file)->d_inode; 339 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 340 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 341 struct sf_reg_info *sf_r = file->private_data; 342 loff_t pos; 343 344 TRACE(); 345 BUG_ON(!sf_i); 346 BUG_ON(!sf_g); 347 BUG_ON(!sf_r); 348 349 if (!S_ISREG(inode->i_mode)) { 350 LogFunc(("write to non regular file %d\n", inode->i_mode)); 351 return -EINVAL; 352 } 353 354 pos = *off; 355 if (file->f_flags & O_APPEND) { 356 pos = inode->i_size; 357 *off = pos; 358 } 364 359 365 360 /** XXX Check write permission according to inode->i_mode! */ 366 361 367 if (!size) 368 return 0; 369 370 tmp = alloc_bounce_buffer(&tmp_size, &tmp_phys, size, __PRETTY_FUNCTION__); 371 if (!tmp) 372 return -ENOMEM; 373 374 while (left) 375 { 376 uint32_t to_write, nwritten; 377 378 to_write = tmp_size; 379 if (to_write > left) 380 to_write = (uint32_t) left; 381 382 nwritten = to_write; 383 384 if (copy_from_user(tmp, buf, to_write)) 385 { 386 err = -EFAULT; 387 goto fail; 388 } 389 390 err = VbglR0SfWritePhysCont(&client_handle, &sf_g->map, sf_r->handle, 391 pos, &nwritten, tmp_phys); 392 err = RT_FAILURE(err) ? -EPROTO : 0; 393 if (err) 394 goto fail; 395 396 pos += nwritten; 397 left -= nwritten; 398 buf += nwritten; 399 total_bytes_written += nwritten; 400 if (nwritten != to_write) 401 break; 402 } 403 404 *off += total_bytes_written; 405 if (*off > inode->i_size) 406 inode->i_size = *off; 407 408 sf_i->force_restat = 1; 409 free_bounce_buffer(tmp); 410 return total_bytes_written; 411 412 fail: 413 free_bounce_buffer(tmp); 414 return err; 362 if (!size) 363 return 0; 364 365 tmp = 366 alloc_bounce_buffer(&tmp_size, &tmp_phys, size, 367 __PRETTY_FUNCTION__); 368 if (!tmp) 369 return -ENOMEM; 370 371 while (left) { 372 uint32_t to_write, nwritten; 373 374 to_write = tmp_size; 375 if (to_write > left) 376 to_write = (uint32_t) left; 377 378 nwritten = to_write; 379 380 if (copy_from_user(tmp, buf, to_write)) { 381 err = -EFAULT; 382 goto fail; 383 } 384 385 err = 386 VbglR0SfWritePhysCont(&client_handle, &sf_g->map, 387 sf_r->handle, pos, &nwritten, 388 tmp_phys); 389 err = RT_FAILURE(err) ? -EPROTO : 0; 390 if (err) 391 goto fail; 392 393 pos += nwritten; 394 left -= nwritten; 395 buf += nwritten; 396 total_bytes_written += nwritten; 397 if (nwritten != to_write) 398 break; 399 } 400 401 *off += total_bytes_written; 402 if (*off > inode->i_size) 403 inode->i_size = *off; 404 405 sf_i->force_restat = 1; 406 free_bounce_buffer(tmp); 407 return total_bytes_written; 408 409 fail: 410 free_bounce_buffer(tmp); 411 return err; 415 412 } 416 413 … … 424 421 static int sf_reg_open(struct inode *inode, struct file *file) 425 422 { 426 int rc, rc_linux = 0; 427 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 428 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 429 struct sf_reg_info *sf_r; 430 SHFLCREATEPARMS params; 431 432 TRACE(); 433 BUG_ON(!sf_g); 434 BUG_ON(!sf_i); 435 436 LogFunc(("open %s\n", sf_i->path->String.utf8)); 437 438 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL); 439 if (!sf_r) 440 { 441 LogRelFunc(("could not allocate reg info\n")); 442 return -ENOMEM; 443 } 444 445 /* Already open? */ 446 if (sf_i->handle != SHFL_HANDLE_NIL) 447 { 448 /* 449 * This inode was created with sf_create_aux(). Check the CreateFlags: 450 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure 451 * about the access flags (SHFL_CF_ACCESS_*). 452 */ 453 sf_i->force_restat = 1; 454 sf_r->handle = sf_i->handle; 455 sf_i->handle = SHFL_HANDLE_NIL; 456 sf_i->file = file; 457 file->private_data = sf_r; 458 return 0; 459 } 460 461 RT_ZERO(params); 462 params.Handle = SHFL_HANDLE_NIL; 463 /* We check the value of params.Handle afterwards to find out if 464 * the call succeeded or failed, as the API does not seem to cleanly 465 * distinguish error and informational messages. 466 * 467 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to 468 * make the shared folders host service use our fMode parameter */ 469 470 if (file->f_flags & O_CREAT) 471 { 472 LogFunc(("O_CREAT set\n")); 473 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW; 474 /* We ignore O_EXCL, as the Linux kernel seems to call create 475 beforehand itself, so O_EXCL should always fail. */ 476 if (file->f_flags & O_TRUNC) 477 { 478 LogFunc(("O_TRUNC set\n")); 479 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; 480 } 481 else 482 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS; 483 } 484 else 485 { 486 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW; 487 if (file->f_flags & O_TRUNC) 488 { 489 LogFunc(("O_TRUNC set\n")); 490 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; 491 } 492 } 493 494 switch (file->f_flags & O_ACCMODE) 495 { 496 case O_RDONLY: 497 params.CreateFlags |= SHFL_CF_ACCESS_READ; 498 break; 499 500 case O_WRONLY: 501 params.CreateFlags |= SHFL_CF_ACCESS_WRITE; 502 break; 503 504 case O_RDWR: 505 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE; 506 break; 507 508 default: 509 BUG (); 510 } 511 512 if (file->f_flags & O_APPEND) 513 { 514 LogFunc(("O_APPEND set\n")); 515 params.CreateFlags |= SHFL_CF_ACCESS_APPEND; 516 } 517 518 params.Info.Attr.fMode = inode->i_mode; 519 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n", 520 sf_i->path->String.utf8 , file->f_flags, params.CreateFlags)); 521 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 522 if (RT_FAILURE(rc)) 523 { 524 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n", 525 file->f_flags, params.CreateFlags, rc)); 526 kfree(sf_r); 527 return -RTErrConvertToErrno(rc); 528 } 529 530 if (SHFL_HANDLE_NIL == params.Handle) 531 { 532 switch (params.Result) 533 { 534 case SHFL_PATH_NOT_FOUND: 535 case SHFL_FILE_NOT_FOUND: 536 rc_linux = -ENOENT; 537 break; 538 case SHFL_FILE_EXISTS: 539 rc_linux = -EEXIST; 540 break; 541 default: 542 break; 543 } 544 } 545 546 sf_i->force_restat = 1; 547 sf_r->handle = params.Handle; 548 sf_i->file = file; 549 file->private_data = sf_r; 550 return rc_linux; 423 int rc, rc_linux = 0; 424 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 425 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 426 struct sf_reg_info *sf_r; 427 SHFLCREATEPARMS params; 428 429 TRACE(); 430 BUG_ON(!sf_g); 431 BUG_ON(!sf_i); 432 433 LogFunc(("open %s\n", sf_i->path->String.utf8)); 434 435 sf_r = kmalloc(sizeof(*sf_r), GFP_KERNEL); 436 if (!sf_r) { 437 LogRelFunc(("could not allocate reg info\n")); 438 return -ENOMEM; 439 } 440 441 /* Already open? */ 442 if (sf_i->handle != SHFL_HANDLE_NIL) { 443 /* 444 * This inode was created with sf_create_aux(). Check the CreateFlags: 445 * O_CREAT, O_TRUNC: inherent true (file was just created). Not sure 446 * about the access flags (SHFL_CF_ACCESS_*). 447 */ 448 sf_i->force_restat = 1; 449 sf_r->handle = sf_i->handle; 450 sf_i->handle = SHFL_HANDLE_NIL; 451 sf_i->file = file; 452 file->private_data = sf_r; 453 return 0; 454 } 455 456 RT_ZERO(params); 457 params.Handle = SHFL_HANDLE_NIL; 458 /* We check the value of params.Handle afterwards to find out if 459 * the call succeeded or failed, as the API does not seem to cleanly 460 * distinguish error and informational messages. 461 * 462 * Furthermore, we must set params.Handle to SHFL_HANDLE_NIL to 463 * make the shared folders host service use our fMode parameter */ 464 465 if (file->f_flags & O_CREAT) { 466 LogFunc(("O_CREAT set\n")); 467 params.CreateFlags |= SHFL_CF_ACT_CREATE_IF_NEW; 468 /* We ignore O_EXCL, as the Linux kernel seems to call create 469 beforehand itself, so O_EXCL should always fail. */ 470 if (file->f_flags & O_TRUNC) { 471 LogFunc(("O_TRUNC set\n")); 472 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; 473 } else 474 params.CreateFlags |= SHFL_CF_ACT_OPEN_IF_EXISTS; 475 } else { 476 params.CreateFlags |= SHFL_CF_ACT_FAIL_IF_NEW; 477 if (file->f_flags & O_TRUNC) { 478 LogFunc(("O_TRUNC set\n")); 479 params.CreateFlags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; 480 } 481 } 482 483 switch (file->f_flags & O_ACCMODE) { 484 case O_RDONLY: 485 params.CreateFlags |= SHFL_CF_ACCESS_READ; 486 break; 487 488 case O_WRONLY: 489 params.CreateFlags |= SHFL_CF_ACCESS_WRITE; 490 break; 491 492 case O_RDWR: 493 params.CreateFlags |= SHFL_CF_ACCESS_READWRITE; 494 break; 495 496 default: 497 BUG(); 498 } 499 500 if (file->f_flags & O_APPEND) { 501 LogFunc(("O_APPEND set\n")); 502 params.CreateFlags |= SHFL_CF_ACCESS_APPEND; 503 } 504 505 params.Info.Attr.fMode = inode->i_mode; 506 LogFunc(("sf_reg_open: calling VbglR0SfCreate, file %s, flags=%#x, %#x\n", sf_i->path->String.utf8, file->f_flags, params.CreateFlags)); 507 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 508 if (RT_FAILURE(rc)) { 509 LogFunc(("VbglR0SfCreate failed flags=%d,%#x rc=%Rrc\n", 510 file->f_flags, params.CreateFlags, rc)); 511 kfree(sf_r); 512 return -RTErrConvertToErrno(rc); 513 } 514 515 if (SHFL_HANDLE_NIL == params.Handle) { 516 switch (params.Result) { 517 case SHFL_PATH_NOT_FOUND: 518 case SHFL_FILE_NOT_FOUND: 519 rc_linux = -ENOENT; 520 break; 521 case SHFL_FILE_EXISTS: 522 rc_linux = -EEXIST; 523 break; 524 default: 525 break; 526 } 527 } 528 529 sf_i->force_restat = 1; 530 sf_r->handle = params.Handle; 531 sf_i->file = file; 532 file->private_data = sf_r; 533 return rc_linux; 551 534 } 552 535 … … 560 543 static int sf_reg_release(struct inode *inode, struct file *file) 561 544 { 562 563 564 565 566 567 568 569 570 571 572 545 int rc; 546 struct sf_reg_info *sf_r; 547 struct sf_glob_info *sf_g; 548 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 549 550 TRACE(); 551 sf_g = GET_GLOB_INFO(inode->i_sb); 552 sf_r = file->private_data; 553 554 BUG_ON(!sf_g); 555 BUG_ON(!sf_r); 573 556 574 557 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 575 576 577 578 579 580 if (inode->i_mapping->nrpages581 582 583 #endif 584 585 586 587 588 589 590 591 592 558 /* See the smbfs source (file.c). mmap in particular can cause data to be 559 * written to the file after it is closed, which we can't cope with. We 560 * copy and paste the body of filemap_write_and_wait() here as it was not 561 * defined before 2.6.6 and not exported until quite a bit later. */ 562 /* filemap_write_and_wait(inode->i_mapping); */ 563 if (inode->i_mapping->nrpages 564 && filemap_fdatawrite(inode->i_mapping) != -EIO) 565 filemap_fdatawait(inode->i_mapping); 566 #endif 567 rc = VbglR0SfClose(&client_handle, &sf_g->map, sf_r->handle); 568 if (RT_FAILURE(rc)) 569 LogFunc(("VbglR0SfClose failed rc=%Rrc\n", rc)); 570 571 kfree(sf_r); 572 sf_i->file = NULL; 573 sf_i->handle = SHFL_HANDLE_NIL; 574 file->private_data = NULL; 575 return 0; 593 576 } 594 577 … … 598 581 static int sf_reg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 599 582 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 600 static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int *type) 601 # define SET_TYPE(t) *type = (t) 602 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ 603 static struct page *sf_reg_nopage(struct vm_area_struct *vma, unsigned long vaddr, int unused) 604 # define SET_TYPE(t) 605 #endif 606 { 607 struct page *page; 608 char *buf; 609 loff_t off; 610 uint32_t nread = PAGE_SIZE; 611 int err; 583 static struct page *sf_reg_nopage(struct vm_area_struct *vma, 584 unsigned long vaddr, int *type) 585 #define SET_TYPE(t) *type = (t) 586 #else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ 587 static struct page *sf_reg_nopage(struct vm_area_struct *vma, 588 unsigned long vaddr, int unused) 589 #define SET_TYPE(t) 590 #endif 591 { 592 struct page *page; 593 char *buf; 594 loff_t off; 595 uint32_t nread = PAGE_SIZE; 596 int err; 612 597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 613 614 #endif 615 616 617 618 619 620 598 struct vm_area_struct *vma = vmf->vma; 599 #endif 600 struct file *file = vma->vm_file; 601 struct inode *inode = GET_F_DENTRY(file)->d_inode; 602 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 603 struct sf_reg_info *sf_r = file->private_data; 604 605 TRACE(); 621 606 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 622 if (vmf->pgoff > vma->vm_end) 623 return VM_FAULT_SIGBUS; 624 #else 625 if (vaddr > vma->vm_end) 626 { 627 SET_TYPE(VM_FAULT_SIGBUS); 628 return NOPAGE_SIGBUS; 629 } 630 #endif 631 632 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead() 633 * which works on virtual addresses. On Linux cannot reliably determine the 634 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */ 635 page = alloc_page(GFP_USER); 636 if (!page) { 637 LogRelFunc(("failed to allocate page\n")); 607 if (vmf->pgoff > vma->vm_end) 608 return VM_FAULT_SIGBUS; 609 #else 610 if (vaddr > vma->vm_end) { 611 SET_TYPE(VM_FAULT_SIGBUS); 612 return NOPAGE_SIGBUS; 613 } 614 #endif 615 616 /* Don't use GFP_HIGHUSER as long as sf_reg_read_aux() calls VbglR0SfRead() 617 * which works on virtual addresses. On Linux cannot reliably determine the 618 * physical address for high memory, see rtR0MemObjNativeLockKernel(). */ 619 page = alloc_page(GFP_USER); 620 if (!page) { 621 LogRelFunc(("failed to allocate page\n")); 638 622 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 639 640 #else 641 642 643 #endif 644 645 646 623 return VM_FAULT_OOM; 624 #else 625 SET_TYPE(VM_FAULT_OOM); 626 return NOPAGE_OOM; 627 #endif 628 } 629 630 buf = kmap(page); 647 631 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 648 off = (vmf->pgoff << PAGE_SHIFT); 649 #else 650 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 651 #endif 652 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); 653 if (err) 654 { 655 kunmap(page); 656 put_page(page); 632 off = (vmf->pgoff << PAGE_SHIFT); 633 #else 634 off = (vaddr - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); 635 #endif 636 err = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); 637 if (err) { 638 kunmap(page); 639 put_page(page); 657 640 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 658 return VM_FAULT_SIGBUS; 659 #else 660 SET_TYPE(VM_FAULT_SIGBUS); 661 return NOPAGE_SIGBUS; 662 #endif 663 } 664 665 BUG_ON (nread > PAGE_SIZE); 666 if (!nread) 667 { 641 return VM_FAULT_SIGBUS; 642 #else 643 SET_TYPE(VM_FAULT_SIGBUS); 644 return NOPAGE_SIGBUS; 645 #endif 646 } 647 648 BUG_ON(nread > PAGE_SIZE); 649 if (!nread) { 668 650 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 669 651 clear_user_page(page_address(page), vmf->pgoff, page); 670 652 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 671 clear_user_page(page_address(page), vaddr, page); 672 #else 673 clear_user_page(page_address(page), vaddr); 674 #endif 675 } 676 else 677 memset(buf + nread, 0, PAGE_SIZE - nread); 678 679 flush_dcache_page(page); 680 kunmap(page); 653 clear_user_page(page_address(page), vaddr, page); 654 #else 655 clear_user_page(page_address(page), vaddr); 656 #endif 657 } else 658 memset(buf + nread, 0, PAGE_SIZE - nread); 659 660 flush_dcache_page(page); 661 kunmap(page); 681 662 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 682 vmf->page = page; 683 return 0; 684 #else 685 SET_TYPE(VM_FAULT_MAJOR); 686 return page; 687 #endif 688 } 689 690 static struct vm_operations_struct sf_vma_ops = 691 { 663 vmf->page = page; 664 return 0; 665 #else 666 SET_TYPE(VM_FAULT_MAJOR); 667 return page; 668 #endif 669 } 670 671 static struct vm_operations_struct sf_vma_ops = { 692 672 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) 693 694 #else 695 673 .fault = sf_reg_fault 674 #else 675 .nopage = sf_reg_nopage 696 676 #endif 697 677 }; … … 699 679 static int sf_reg_mmap(struct file *file, struct vm_area_struct *vma) 700 680 { 701 TRACE(); 702 if (vma->vm_flags & VM_SHARED) 703 { 704 LogFunc(("shared mmapping not available\n")); 705 return -EINVAL; 706 } 707 708 vma->vm_ops = &sf_vma_ops; 709 return 0; 710 } 711 712 struct file_operations sf_reg_fops = 713 { 714 .read = sf_reg_read, 715 .open = sf_reg_open, 716 .write = sf_reg_write, 717 .release = sf_reg_release, 718 .mmap = sf_reg_mmap, 681 TRACE(); 682 if (vma->vm_flags & VM_SHARED) { 683 LogFunc(("shared mmapping not available\n")); 684 return -EINVAL; 685 } 686 687 vma->vm_ops = &sf_vma_ops; 688 return 0; 689 } 690 691 struct file_operations sf_reg_fops = { 692 .read = sf_reg_read, 693 .open = sf_reg_open, 694 .write = sf_reg_write, 695 .release = sf_reg_release, 696 .mmap = sf_reg_mmap, 719 697 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 720 # 698 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31) 721 699 /** @todo This code is known to cause caching of data which should not be 722 700 * cached. Investigate. */ 723 # 724 725 # 726 .sendfile= generic_file_sendfile,727 # 728 .aio_read= generic_file_aio_read,729 .aio_write= generic_file_aio_write,730 # 731 # 732 .fsync= noop_fsync,733 # 734 .fsync= simple_sync_file,735 # 736 .llseek= generic_file_llseek,701 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) 702 .splice_read = sf_splice_read, 703 #else 704 .sendfile = generic_file_sendfile, 705 #endif 706 .aio_read = generic_file_aio_read, 707 .aio_write = generic_file_aio_write, 708 #endif 709 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35) 710 .fsync = noop_fsync, 711 #else 712 .fsync = simple_sync_file, 713 #endif 714 .llseek = generic_file_llseek, 737 715 #endif 738 716 }; 739 717 740 741 struct inode_operations sf_reg_iops = 742 { 718 struct inode_operations sf_reg_iops = { 743 719 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 744 745 #else 746 .getattr= sf_getattr,747 .setattr= sf_setattr720 .revalidate = sf_inode_revalidate 721 #else 722 .getattr = sf_getattr, 723 .setattr = sf_setattr 748 724 #endif 749 725 }; 750 751 726 752 727 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 753 728 static int sf_readpage(struct file *file, struct page *page) 754 729 { 755 struct inode *inode = GET_F_DENTRY(file)->d_inode; 756 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 757 struct sf_reg_info *sf_r = file->private_data; 758 uint32_t nread = PAGE_SIZE; 759 char *buf; 760 loff_t off = ((loff_t)page->index) << PAGE_SHIFT; 761 int ret; 762 763 TRACE(); 764 765 buf = kmap(page); 766 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); 767 if (ret) 768 { 769 kunmap(page); 770 if (PageLocked(page)) 771 unlock_page(page); 772 return ret; 773 } 774 BUG_ON(nread > PAGE_SIZE); 775 memset(&buf[nread], 0, PAGE_SIZE - nread); 776 flush_dcache_page(page); 777 kunmap(page); 778 SetPageUptodate(page); 779 unlock_page(page); 780 return 0; 781 } 782 783 static int 784 sf_writepage(struct page *page, struct writeback_control *wbc) 785 { 786 struct address_space *mapping = page->mapping; 787 struct inode *inode = mapping->host; 788 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 789 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 790 struct file *file = sf_i->file; 791 struct sf_reg_info *sf_r = file->private_data; 792 char *buf; 793 uint32_t nwritten = PAGE_SIZE; 794 int end_index = inode->i_size >> PAGE_SHIFT; 795 loff_t off = ((loff_t) page->index) << PAGE_SHIFT; 796 int err; 797 798 TRACE(); 799 800 if (page->index >= end_index) 801 nwritten = inode->i_size & (PAGE_SIZE-1); 802 803 buf = kmap(page); 804 805 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off); 806 if (err < 0) 807 { 808 ClearPageUptodate(page); 809 goto out; 810 } 811 812 if (off > inode->i_size) 813 inode->i_size = off; 814 815 if (PageError(page)) 816 ClearPageError(page); 817 err = 0; 818 819 out: 820 kunmap(page); 821 822 unlock_page(page); 823 return err; 824 } 825 826 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 730 struct inode *inode = GET_F_DENTRY(file)->d_inode; 731 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 732 struct sf_reg_info *sf_r = file->private_data; 733 uint32_t nread = PAGE_SIZE; 734 char *buf; 735 loff_t off = ((loff_t) page->index) << PAGE_SHIFT; 736 int ret; 737 738 TRACE(); 739 740 buf = kmap(page); 741 ret = sf_reg_read_aux(__func__, sf_g, sf_r, buf, &nread, off); 742 if (ret) { 743 kunmap(page); 744 if (PageLocked(page)) 745 unlock_page(page); 746 return ret; 747 } 748 BUG_ON(nread > PAGE_SIZE); 749 memset(&buf[nread], 0, PAGE_SIZE - nread); 750 flush_dcache_page(page); 751 kunmap(page); 752 SetPageUptodate(page); 753 unlock_page(page); 754 return 0; 755 } 756 757 static int sf_writepage(struct page *page, struct writeback_control *wbc) 758 { 759 struct address_space *mapping = page->mapping; 760 struct inode *inode = mapping->host; 761 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 762 struct sf_inode_info *sf_i = GET_INODE_INFO(inode); 763 struct file *file = sf_i->file; 764 struct sf_reg_info *sf_r = file->private_data; 765 char *buf; 766 uint32_t nwritten = PAGE_SIZE; 767 int end_index = inode->i_size >> PAGE_SHIFT; 768 loff_t off = ((loff_t) page->index) << PAGE_SHIFT; 769 int err; 770 771 TRACE(); 772 773 if (page->index >= end_index) 774 nwritten = inode->i_size & (PAGE_SIZE - 1); 775 776 buf = kmap(page); 777 778 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off); 779 if (err < 0) { 780 ClearPageUptodate(page); 781 goto out; 782 } 783 784 if (off > inode->i_size) 785 inode->i_size = off; 786 787 if (PageError(page)) 788 ClearPageError(page); 789 err = 0; 790 791 out: 792 kunmap(page); 793 794 unlock_page(page); 795 return err; 796 } 797 798 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 827 799 int sf_write_begin(struct file *file, struct address_space *mapping, loff_t pos, 828 unsigned len, unsigned flags, struct page **pagep, void **fsdata) 829 { 830 TRACE(); 831 832 return simple_write_begin(file, mapping, pos, len, flags, pagep, fsdata); 800 unsigned len, unsigned flags, struct page **pagep, 801 void **fsdata) 802 { 803 TRACE(); 804 805 return simple_write_begin(file, mapping, pos, len, flags, pagep, 806 fsdata); 833 807 } 834 808 835 809 int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos, 836 unsigned len, unsigned copied, struct page *page, void *fsdata) 837 { 838 struct inode *inode = mapping->host; 839 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 840 struct sf_reg_info *sf_r = file->private_data; 841 void *buf; 842 unsigned from = pos & (PAGE_SIZE - 1); 843 uint32_t nwritten = len; 844 int err; 845 846 TRACE(); 847 848 buf = kmap(page); 849 err = sf_reg_write_aux(__func__, sf_g, sf_r, buf+from, &nwritten, pos); 850 kunmap(page); 851 852 if (!PageUptodate(page) && err == PAGE_SIZE) 853 SetPageUptodate(page); 854 855 if (err >= 0) { 856 pos += nwritten; 857 if (pos > inode->i_size) 858 inode->i_size = pos; 859 } 860 861 unlock_page(page); 810 unsigned len, unsigned copied, struct page *page, void *fsdata) 811 { 812 struct inode *inode = mapping->host; 813 struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb); 814 struct sf_reg_info *sf_r = file->private_data; 815 void *buf; 816 unsigned from = pos & (PAGE_SIZE - 1); 817 uint32_t nwritten = len; 818 int err; 819 820 TRACE(); 821 822 buf = kmap(page); 823 err = 824 sf_reg_write_aux(__func__, sf_g, sf_r, buf + from, &nwritten, pos); 825 kunmap(page); 826 827 if (!PageUptodate(page) && err == PAGE_SIZE) 828 SetPageUptodate(page); 829 830 if (err >= 0) { 831 pos += nwritten; 832 if (pos > inode->i_size) 833 inode->i_size = pos; 834 } 835 836 unlock_page(page); 862 837 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0) 863 put_page(page); 864 #else 865 page_cache_release(page); 866 #endif 867 868 return nwritten; 869 } 870 871 # endif /* KERNEL_VERSION >= 2.6.24 */ 872 873 struct address_space_operations sf_reg_aops = 874 { 875 .readpage = sf_readpage, 876 .writepage = sf_writepage, 877 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 878 .write_begin = sf_write_begin, 879 .write_end = sf_write_end, 880 # else 881 .prepare_write = simple_prepare_write, 882 .commit_write = simple_commit_write, 883 # endif 838 put_page(page); 839 #else 840 page_cache_release(page); 841 #endif 842 843 return nwritten; 844 } 845 846 #endif /* KERNEL_VERSION >= 2.6.24 */ 847 848 struct address_space_operations sf_reg_aops = { 849 .readpage = sf_readpage, 850 .writepage = sf_writepage, 851 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 852 .write_begin = sf_write_begin, 853 .write_end = sf_write_end, 854 #else 855 .prepare_write = simple_prepare_write, 856 .commit_write = simple_commit_write, 857 #endif 884 858 }; 885 859 #endif -
trunk/src/VBox/Additions/linux/sharedfolders/utils.c
r70728 r70786 37 37 38 38 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 39 static void sf_ftime_from_timespec(time_t * time, RTTIMESPEC *ts)40 { 41 42 43 44 45 } 46 47 static void sf_timespec_from_ftime(RTTIMESPEC * ts, time_t *time)48 { 49 50 51 } 52 #else 53 static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC * ts)54 { 55 56 57 58 59 60 61 } 62 63 static void sf_timespec_from_ftime(RTTIMESPEC * ts, struct timespec *tv)64 { 65 int64_t t = (int64_t)tv->tv_nsec + (int64_t)tv->tv_sec * 1000000000;66 67 } 68 #endif 39 static void sf_ftime_from_timespec(time_t * time, RTTIMESPEC * ts) 40 { 41 int64_t t = RTTimeSpecGetNano(ts); 42 43 do_div(t, 1000000000); 44 *time = t; 45 } 46 47 static void sf_timespec_from_ftime(RTTIMESPEC * ts, time_t * time) 48 { 49 int64_t t = 1000000000 * *time; 50 RTTimeSpecSetNano(ts, t); 51 } 52 #else /* >= 2.6.0 */ 53 static void sf_ftime_from_timespec(struct timespec *tv, RTTIMESPEC * ts) 54 { 55 int64_t t = RTTimeSpecGetNano(ts); 56 int64_t nsec; 57 58 nsec = do_div(t, 1000000000); 59 tv->tv_sec = t; 60 tv->tv_nsec = nsec; 61 } 62 63 static void sf_timespec_from_ftime(RTTIMESPEC * ts, struct timespec *tv) 64 { 65 int64_t t = (int64_t) tv->tv_nsec + (int64_t) tv->tv_sec * 1000000000; 66 RTTimeSpecSetNano(ts, t); 67 } 68 #endif /* >= 2.6.0 */ 69 69 70 70 /* set [inode] attributes based on [info], uid/gid based on [sf_g] */ 71 71 void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode, 72 73 { 74 75 76 77 78 79 72 PSHFLFSOBJINFO info) 73 { 74 PSHFLFSOBJATTR attr; 75 int mode; 76 77 TRACE(); 78 79 attr = &info->Attr; 80 80 81 81 #define mode_set(r) attr->fMode & (RTFS_UNIX_##r) ? (S_##r) : 0; 82 mode= mode_set(IRUSR);83 84 85 86 87 88 89 90 91 92 82 mode = mode_set(IRUSR); 83 mode |= mode_set(IWUSR); 84 mode |= mode_set(IXUSR); 85 86 mode |= mode_set(IRGRP); 87 mode |= mode_set(IWGRP); 88 mode |= mode_set(IXGRP); 89 90 mode |= mode_set(IROTH); 91 mode |= mode_set(IWOTH); 92 mode |= mode_set(IXOTH); 93 93 94 94 #undef mode_set 95 95 96 96 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 97 inode->i_mapping->a_ops = &sf_reg_aops; 98 # if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 99 /* XXX Was this ever necessary? */ 100 inode->i_mapping->backing_dev_info = &sf_g->bdi; 101 # endif 102 #endif 103 104 if (RTFS_IS_DIRECTORY(attr->fMode)) 105 { 106 inode->i_mode = sf_g->dmode != ~0 ? (sf_g->dmode & 0777) : mode; 107 inode->i_mode &= ~sf_g->dmask; 108 inode->i_mode |= S_IFDIR; 109 inode->i_op = &sf_dir_iops; 110 inode->i_fop = &sf_dir_fops; 111 /* XXX: this probably should be set to the number of entries 112 in the directory plus two (. ..) */ 97 inode->i_mapping->a_ops = &sf_reg_aops; 98 #if LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 99 /* XXX Was this ever necessary? */ 100 inode->i_mapping->backing_dev_info = &sf_g->bdi; 101 #endif 102 #endif 103 104 if (RTFS_IS_DIRECTORY(attr->fMode)) { 105 inode->i_mode = sf_g->dmode != ~0 ? (sf_g->dmode & 0777) : mode; 106 inode->i_mode &= ~sf_g->dmask; 107 inode->i_mode |= S_IFDIR; 108 inode->i_op = &sf_dir_iops; 109 inode->i_fop = &sf_dir_fops; 110 /* XXX: this probably should be set to the number of entries 111 in the directory plus two (. ..) */ 113 112 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) 114 115 #else 116 117 #endif 118 113 set_nlink(inode, 1); 114 #else 115 inode->i_nlink = 1; 116 #endif 117 } 119 118 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 120 else if (RTFS_IS_SYMLINK(attr->fMode)) 121 { 122 inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777): mode; 123 inode->i_mode &= ~sf_g->fmask; 124 inode->i_mode |= S_IFLNK; 125 inode->i_op = &sf_lnk_iops; 126 # if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) 127 set_nlink(inode, 1); 128 # else 129 inode->i_nlink = 1; 130 # endif 131 } 132 #endif 133 else 134 { 135 inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777): mode; 136 inode->i_mode &= ~sf_g->fmask; 137 inode->i_mode |= S_IFREG; 138 inode->i_op = &sf_reg_iops; 139 inode->i_fop = &sf_reg_fops; 119 else if (RTFS_IS_SYMLINK(attr->fMode)) { 120 inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode; 121 inode->i_mode &= ~sf_g->fmask; 122 inode->i_mode |= S_IFLNK; 123 inode->i_op = &sf_lnk_iops; 140 124 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) 141 set_nlink(inode, 1); 142 #else 143 inode->i_nlink = 1; 144 #endif 145 } 125 set_nlink(inode, 1); 126 #else 127 inode->i_nlink = 1; 128 #endif 129 } 130 #endif 131 else { 132 inode->i_mode = sf_g->fmode != ~0 ? (sf_g->fmode & 0777) : mode; 133 inode->i_mode &= ~sf_g->fmask; 134 inode->i_mode |= S_IFREG; 135 inode->i_op = &sf_reg_iops; 136 inode->i_fop = &sf_reg_fops; 137 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) 138 set_nlink(inode, 1); 139 #else 140 inode->i_nlink = 1; 141 #endif 142 } 146 143 147 144 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) 148 149 150 #else 151 152 153 #endif 154 155 145 inode->i_uid = make_kuid(current_user_ns(), sf_g->uid); 146 inode->i_gid = make_kgid(current_user_ns(), sf_g->gid); 147 #else 148 inode->i_uid = sf_g->uid; 149 inode->i_gid = sf_g->gid; 150 #endif 151 152 inode->i_size = info->cbObject; 156 153 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && !defined(KERNEL_FC6) 157 154 inode->i_blksize = 4096; 158 155 #endif 159 156 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 11) 160 161 #endif 162 163 164 165 166 167 157 inode->i_blkbits = 12; 158 #endif 159 /* i_blocks always in units of 512 bytes! */ 160 inode->i_blocks = (info->cbAllocated + 511) / 512; 161 162 sf_ftime_from_timespec(&inode->i_atime, &info->AccessTime); 163 sf_ftime_from_timespec(&inode->i_ctime, &info->ChangeTime); 164 sf_ftime_from_timespec(&inode->i_mtime, &info->ModificationTime); 168 165 } 169 166 170 167 int sf_stat(const char *caller, struct sf_glob_info *sf_g, 171 SHFLSTRING *path, PSHFLFSOBJINFO result, int ok_to_fail) 172 { 173 int rc; 174 SHFLCREATEPARMS params; 175 NOREF(caller); 176 177 TRACE(); 178 179 RT_ZERO(params); 180 params.Handle = SHFL_HANDLE_NIL; 181 params.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW; 182 LogFunc(("sf_stat: calling VbglR0SfCreate, file %s, flags %#x\n", 183 path->String.utf8, params.CreateFlags)); 184 rc = VbglR0SfCreate(&client_handle, &sf_g->map, path, ¶ms); 185 if (rc == VERR_INVALID_NAME) 186 { 187 /* this can happen for names like 'foo*' on a Windows host */ 188 return -ENOENT; 189 } 190 if (RT_FAILURE(rc)) 191 { 192 LogFunc(("VbglR0SfCreate(%s) failed. caller=%s, rc=%Rrc\n", 193 path->String.utf8, rc, caller)); 194 return -EPROTO; 195 } 196 if (params.Result != SHFL_FILE_EXISTS) 197 { 198 if (!ok_to_fail) 199 LogFunc(("VbglR0SfCreate(%s) file does not exist. caller=%s, result=%d\n", 200 path->String.utf8, params.Result, caller)); 201 return -ENOENT; 202 } 203 204 *result = params.Info; 205 return 0; 168 SHFLSTRING * path, PSHFLFSOBJINFO result, int ok_to_fail) 169 { 170 int rc; 171 SHFLCREATEPARMS params; 172 NOREF(caller); 173 174 TRACE(); 175 176 RT_ZERO(params); 177 params.Handle = SHFL_HANDLE_NIL; 178 params.CreateFlags = SHFL_CF_LOOKUP | SHFL_CF_ACT_FAIL_IF_NEW; 179 LogFunc(("sf_stat: calling VbglR0SfCreate, file %s, flags %#x\n", 180 path->String.utf8, params.CreateFlags)); 181 rc = VbglR0SfCreate(&client_handle, &sf_g->map, path, ¶ms); 182 if (rc == VERR_INVALID_NAME) { 183 /* this can happen for names like 'foo*' on a Windows host */ 184 return -ENOENT; 185 } 186 if (RT_FAILURE(rc)) { 187 LogFunc(("VbglR0SfCreate(%s) failed. caller=%s, rc=%Rrc\n", 188 path->String.utf8, rc, caller)); 189 return -EPROTO; 190 } 191 if (params.Result != SHFL_FILE_EXISTS) { 192 if (!ok_to_fail) 193 LogFunc(("VbglR0SfCreate(%s) file does not exist. caller=%s, result=%d\n", path->String.utf8, params.Result, caller)); 194 return -ENOENT; 195 } 196 197 *result = params.Info; 198 return 0; 206 199 } 207 200 … … 214 207 int sf_inode_revalidate(struct dentry *dentry) 215 208 { 216 217 218 219 220 221 222 if (!dentry || !dentry->d_inode) 223 { 224 LogFunc(("no dentry(%p) or inode(%p)\n", dentry,dentry->d_inode));225 226 227 228 229 209 int err; 210 struct sf_glob_info *sf_g; 211 struct sf_inode_info *sf_i; 212 SHFLFSOBJINFO info; 213 214 TRACE(); 215 if (!dentry || !dentry->d_inode) { 216 LogFunc(("no dentry(%p) or inode(%p)\n", dentry, 217 dentry->d_inode)); 218 return -EINVAL; 219 } 220 221 sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb); 222 sf_i = GET_INODE_INFO(dentry->d_inode); 230 223 231 224 #if 0 232 printk("%s called by %p:%p\n", 233 sf_i->path->String.utf8, 234 __builtin_return_address (0), 235 __builtin_return_address (1)); 236 #endif 237 238 BUG_ON(!sf_g); 239 BUG_ON(!sf_i); 240 241 if (!sf_i->force_restat) 242 { 243 if (jiffies - dentry->d_time < sf_g->ttl) 244 return 0; 245 } 246 247 err = sf_stat(__func__, sf_g, sf_i->path, &info, 1); 248 if (err) 249 return err; 250 251 dentry->d_time = jiffies; 252 sf_init_inode(sf_g, dentry->d_inode, &info); 253 return 0; 225 printk("%s called by %p:%p\n", 226 sf_i->path->String.utf8, 227 __builtin_return_address(0), __builtin_return_address(1)); 228 #endif 229 230 BUG_ON(!sf_g); 231 BUG_ON(!sf_i); 232 233 if (!sf_i->force_restat) { 234 if (jiffies - dentry->d_time < sf_g->ttl) 235 return 0; 236 } 237 238 err = sf_stat(__func__, sf_g, sf_i->path, &info, 1); 239 if (err) 240 return err; 241 242 dentry->d_time = jiffies; 243 sf_init_inode(sf_g, dentry->d_inode, &info); 244 return 0; 254 245 } 255 246 … … 266 257 #endif 267 258 { 268 259 TRACE(); 269 260 270 261 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) 271 272 262 if (flags & LOOKUP_RCU) 263 return -ECHILD; 273 264 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) 274 275 276 277 #endif 278 279 280 281 282 265 /* see Documentation/filesystems/vfs.txt */ 266 if (nd && nd->flags & LOOKUP_RCU) 267 return -ECHILD; 268 #endif 269 270 if (sf_inode_revalidate(dentry)) 271 return 0; 272 273 return 1; 283 274 } 284 275 … … 288 279 [generic_fillattr] */ 289 280 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 290 # if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 291 int sf_getattr(const struct path *path, struct kstat *kstat, u32 request_mask, unsigned int flags) 292 # else 281 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 282 int sf_getattr(const struct path *path, struct kstat *kstat, u32 request_mask, 283 unsigned int flags) 284 #else 293 285 int sf_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *kstat) 294 # 295 { 296 297 # 298 299 # 300 301 302 303 304 305 306 307 286 #endif 287 { 288 int err; 289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 290 struct dentry *dentry = path->dentry; 291 #endif 292 293 TRACE(); 294 err = sf_inode_revalidate(dentry); 295 if (err) 296 return err; 297 298 generic_fillattr(dentry->d_inode, kstat); 299 return 0; 308 300 } 309 301 310 302 int sf_setattr(struct dentry *dentry, struct iattr *iattr) 311 303 { 312 struct sf_glob_info *sf_g; 313 struct sf_inode_info *sf_i; 314 SHFLCREATEPARMS params; 315 SHFLFSOBJINFO info; 316 uint32_t cbBuffer; 317 int rc, err; 318 319 TRACE(); 320 321 sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb); 322 sf_i = GET_INODE_INFO(dentry->d_inode); 323 err = 0; 324 325 RT_ZERO(params); 326 params.Handle = SHFL_HANDLE_NIL; 327 params.CreateFlags = SHFL_CF_ACT_OPEN_IF_EXISTS 328 | SHFL_CF_ACT_FAIL_IF_NEW 329 | SHFL_CF_ACCESS_ATTR_WRITE; 330 331 /* this is at least required for Posix hosts */ 332 if (iattr->ia_valid & ATTR_SIZE) 333 params.CreateFlags |= SHFL_CF_ACCESS_WRITE; 334 335 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 336 if (RT_FAILURE(rc)) 337 { 338 LogFunc(("VbglR0SfCreate(%s) failed rc=%Rrc\n", 339 sf_i->path->String.utf8, rc)); 340 err = -RTErrConvertToErrno(rc); 341 goto fail2; 342 } 343 if (params.Result != SHFL_FILE_EXISTS) 344 { 345 LogFunc(("file %s does not exist\n", sf_i->path->String.utf8)); 346 err = -ENOENT; 347 goto fail1; 348 } 349 350 /* Setting the file size and setting the other attributes has to be 351 * handled separately, see implementation of vbsfSetFSInfo() in 352 * vbsf.cpp */ 353 if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) 354 { 304 struct sf_glob_info *sf_g; 305 struct sf_inode_info *sf_i; 306 SHFLCREATEPARMS params; 307 SHFLFSOBJINFO info; 308 uint32_t cbBuffer; 309 int rc, err; 310 311 TRACE(); 312 313 sf_g = GET_GLOB_INFO(dentry->d_inode->i_sb); 314 sf_i = GET_INODE_INFO(dentry->d_inode); 315 err = 0; 316 317 RT_ZERO(params); 318 params.Handle = SHFL_HANDLE_NIL; 319 params.CreateFlags = SHFL_CF_ACT_OPEN_IF_EXISTS 320 | SHFL_CF_ACT_FAIL_IF_NEW | SHFL_CF_ACCESS_ATTR_WRITE; 321 322 /* this is at least required for Posix hosts */ 323 if (iattr->ia_valid & ATTR_SIZE) 324 params.CreateFlags |= SHFL_CF_ACCESS_WRITE; 325 326 rc = VbglR0SfCreate(&client_handle, &sf_g->map, sf_i->path, ¶ms); 327 if (RT_FAILURE(rc)) { 328 LogFunc(("VbglR0SfCreate(%s) failed rc=%Rrc\n", 329 sf_i->path->String.utf8, rc)); 330 err = -RTErrConvertToErrno(rc); 331 goto fail2; 332 } 333 if (params.Result != SHFL_FILE_EXISTS) { 334 LogFunc(("file %s does not exist\n", sf_i->path->String.utf8)); 335 err = -ENOENT; 336 goto fail1; 337 } 338 339 /* Setting the file size and setting the other attributes has to be 340 * handled separately, see implementation of vbsfSetFSInfo() in 341 * vbsf.cpp */ 342 if (iattr->ia_valid & (ATTR_MODE | ATTR_ATIME | ATTR_MTIME)) { 355 343 #define mode_set(r) ((iattr->ia_mode & (S_##r)) ? RTFS_UNIX_##r : 0) 356 344 357 358 if (iattr->ia_valid & ATTR_MODE) 359 { 360 info.Attr.fMode = mode_set(IRUSR);361 info.Attr.fMode |= mode_set(IWUSR);362 info.Attr.fMode |= mode_set(IXUSR);363 info.Attr.fMode |= mode_set(IRGRP);364 info.Attr.fMode |= mode_set(IWGRP);365 info.Attr.fMode |= mode_set(IXGRP);366 info.Attr.fMode |= mode_set(IROTH);367 info.Attr.fMode |= mode_set(IWOTH);368 info.Attr.fMode |= mode_set(IXOTH); 369 370 if (iattr->ia_mode & S_IFDIR) 371 info.Attr.fMode |= RTFS_TYPE_DIRECTORY; 372 else 373 info.Attr.fMode |= RTFS_TYPE_FILE; 374 } 375 376 if (iattr->ia_valid & ATTR_ATIME) 377 sf_timespec_from_ftime(&info.AccessTime,&iattr->ia_atime);378 379 sf_timespec_from_ftime(&info.ModificationTime, &iattr->ia_mtime); 380 /* ignore ctime (inode change time) as it can't be set from userland anyway */ 381 382 cbBuffer = sizeof(info); 383 rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, params.Handle, 384 SHFL_INFO_SET | SHFL_INFO_FILE, &cbBuffer,385 (PSHFLDIRINFO)&info); 386 if (RT_FAILURE(rc)) 387 388 389 390 391 392 393 394 395 if (iattr->ia_valid & ATTR_SIZE) 396 { 397 RT_ZERO(info);398 info.cbObject = iattr->ia_size;399 cbBuffer = sizeof(info); 400 rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, params.Handle,401 SHFL_INFO_SET | SHFL_INFO_SIZE, &cbBuffer, 402 (PSHFLDIRINFO)&info); 403 if (RT_FAILURE(rc)) 404 { 405 LogFunc(("VbglR0SfFsInfo(%s, SIZE) failed rc=%Rrc\n", 406 sf_i->path->String.utf8, rc));407 err = -RTErrConvertToErrno(rc); 408 goto fail1; 409 } 410 } 411 412 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 413 if (RT_FAILURE(rc)) 414 LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n", sf_i->path->String.utf8, rc)); 415 416 return sf_inode_revalidate(dentry); 417 418 fail1: 419 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 420 if (RT_FAILURE(rc)) 421 LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n",sf_i->path->String.utf8, rc));422 423 fail2:424 425 } 426 #endif 345 RT_ZERO(info); 346 if (iattr->ia_valid & ATTR_MODE) { 347 info.Attr.fMode = mode_set(IRUSR); 348 info.Attr.fMode |= mode_set(IWUSR); 349 info.Attr.fMode |= mode_set(IXUSR); 350 info.Attr.fMode |= mode_set(IRGRP); 351 info.Attr.fMode |= mode_set(IWGRP); 352 info.Attr.fMode |= mode_set(IXGRP); 353 info.Attr.fMode |= mode_set(IROTH); 354 info.Attr.fMode |= mode_set(IWOTH); 355 info.Attr.fMode |= mode_set(IXOTH); 356 357 if (iattr->ia_mode & S_IFDIR) 358 info.Attr.fMode |= RTFS_TYPE_DIRECTORY; 359 else 360 info.Attr.fMode |= RTFS_TYPE_FILE; 361 } 362 363 if (iattr->ia_valid & ATTR_ATIME) 364 sf_timespec_from_ftime(&info.AccessTime, 365 &iattr->ia_atime); 366 if (iattr->ia_valid & ATTR_MTIME) 367 sf_timespec_from_ftime(&info.ModificationTime, 368 &iattr->ia_mtime); 369 /* ignore ctime (inode change time) as it can't be set from userland anyway */ 370 371 cbBuffer = sizeof(info); 372 rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, params.Handle, 373 SHFL_INFO_SET | SHFL_INFO_FILE, &cbBuffer, 374 (PSHFLDIRINFO) & info); 375 if (RT_FAILURE(rc)) { 376 LogFunc(("VbglR0SfFsInfo(%s, FILE) failed rc=%Rrc\n", 377 sf_i->path->String.utf8, rc)); 378 err = -RTErrConvertToErrno(rc); 379 goto fail1; 380 } 381 } 382 383 if (iattr->ia_valid & ATTR_SIZE) { 384 RT_ZERO(info); 385 info.cbObject = iattr->ia_size; 386 cbBuffer = sizeof(info); 387 rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, params.Handle, 388 SHFL_INFO_SET | SHFL_INFO_SIZE, &cbBuffer, 389 (PSHFLDIRINFO) & info); 390 if (RT_FAILURE(rc)) { 391 LogFunc(("VbglR0SfFsInfo(%s, SIZE) failed rc=%Rrc\n", 392 sf_i->path->String.utf8, rc)); 393 err = -RTErrConvertToErrno(rc); 394 goto fail1; 395 } 396 } 397 398 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 399 if (RT_FAILURE(rc)) 400 LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n", 401 sf_i->path->String.utf8, rc)); 402 403 return sf_inode_revalidate(dentry); 404 405 fail1: 406 rc = VbglR0SfClose(&client_handle, &sf_g->map, params.Handle); 407 if (RT_FAILURE(rc)) 408 LogFunc(("VbglR0SfClose(%s) failed rc=%Rrc\n", 409 sf_i->path->String.utf8, rc)); 410 411 fail2: 412 return err; 413 } 414 #endif /* >= 2.6.0 */ 427 415 428 416 static int sf_make_path(const char *caller, struct sf_inode_info *sf_i, 429 const char *d_name, size_t d_len, SHFLSTRING **result) 430 { 431 size_t path_len, shflstring_len; 432 SHFLSTRING *tmp; 433 uint16_t p_len; 434 uint8_t *p_name; 435 int fRoot = 0; 436 437 TRACE(); 438 p_len = sf_i->path->u16Length; 439 p_name = sf_i->path->String.utf8; 440 441 if (p_len == 1 && *p_name == '/') 442 { 443 path_len = d_len + 1; 444 fRoot = 1; 445 } 446 else 447 { 448 /* lengths of constituents plus terminating zero plus slash */ 449 path_len = p_len + d_len + 2; 450 if (path_len > 0xffff) 451 { 452 LogFunc(("path too long. caller=%s, path_len=%zu\n", caller, path_len)); 453 return -ENAMETOOLONG; 454 } 455 } 456 457 shflstring_len = offsetof(SHFLSTRING, String.utf8) + path_len; 458 tmp = kmalloc(shflstring_len, GFP_KERNEL); 459 if (!tmp) 460 { 461 LogRelFunc(("kmalloc failed, caller=%s\n", caller)); 462 return -ENOMEM; 463 } 464 tmp->u16Length = path_len - 1; 465 tmp->u16Size = path_len; 466 467 if (fRoot) 468 memcpy(&tmp->String.utf8[0], d_name, d_len + 1); 469 else 470 { 471 memcpy(&tmp->String.utf8[0], p_name, p_len); 472 tmp->String.utf8[p_len] = '/'; 473 memcpy(&tmp->String.utf8[p_len + 1], d_name, d_len); 474 tmp->String.utf8[p_len + 1 + d_len] = '\0'; 475 } 476 477 *result = tmp; 478 return 0; 417 const char *d_name, size_t d_len, SHFLSTRING ** result) 418 { 419 size_t path_len, shflstring_len; 420 SHFLSTRING *tmp; 421 uint16_t p_len; 422 uint8_t *p_name; 423 int fRoot = 0; 424 425 TRACE(); 426 p_len = sf_i->path->u16Length; 427 p_name = sf_i->path->String.utf8; 428 429 if (p_len == 1 && *p_name == '/') { 430 path_len = d_len + 1; 431 fRoot = 1; 432 } else { 433 /* lengths of constituents plus terminating zero plus slash */ 434 path_len = p_len + d_len + 2; 435 if (path_len > 0xffff) { 436 LogFunc(("path too long. caller=%s, path_len=%zu\n", 437 caller, path_len)); 438 return -ENAMETOOLONG; 439 } 440 } 441 442 shflstring_len = offsetof(SHFLSTRING, String.utf8) + path_len; 443 tmp = kmalloc(shflstring_len, GFP_KERNEL); 444 if (!tmp) { 445 LogRelFunc(("kmalloc failed, caller=%s\n", caller)); 446 return -ENOMEM; 447 } 448 tmp->u16Length = path_len - 1; 449 tmp->u16Size = path_len; 450 451 if (fRoot) 452 memcpy(&tmp->String.utf8[0], d_name, d_len + 1); 453 else { 454 memcpy(&tmp->String.utf8[0], p_name, p_len); 455 tmp->String.utf8[p_len] = '/'; 456 memcpy(&tmp->String.utf8[p_len + 1], d_name, d_len); 457 tmp->String.utf8[p_len + 1 + d_len] = '\0'; 458 } 459 460 *result = tmp; 461 return 0; 479 462 } 480 463 … … 485 468 */ 486 469 int sf_path_from_dentry(const char *caller, struct sf_glob_info *sf_g, 487 struct sf_inode_info *sf_i, struct dentry *dentry, 488 SHFLSTRING **result) 489 { 490 int err; 491 const char *d_name; 492 size_t d_len; 493 const char *name; 494 size_t len = 0; 495 496 TRACE(); 497 d_name = dentry->d_name.name; 498 d_len = dentry->d_name.len; 499 500 if (sf_g->nls) 501 { 502 size_t in_len, i, out_bound_len; 503 const char *in; 504 char *out; 505 506 in = d_name; 507 in_len = d_len; 508 509 out_bound_len = PATH_MAX; 510 out = kmalloc(out_bound_len, GFP_KERNEL); 511 name = out; 512 513 for (i = 0; i < d_len; ++i) 514 { 515 /* We renamed the linux kernel wchar_t type to linux_wchar_t in 516 the-linux-kernel.h, as it conflicts with the C++ type of that name. */ 517 linux_wchar_t uni; 518 int nb; 519 520 nb = sf_g->nls->char2uni(in, in_len, &uni); 521 if (nb < 0) 522 { 523 LogFunc(("nls->char2uni failed %x %d\n", 524 *in, in_len)); 525 err = -EINVAL; 526 goto fail1; 527 } 528 in_len -= nb; 529 in += nb; 470 struct sf_inode_info *sf_i, struct dentry *dentry, 471 SHFLSTRING ** result) 472 { 473 int err; 474 const char *d_name; 475 size_t d_len; 476 const char *name; 477 size_t len = 0; 478 479 TRACE(); 480 d_name = dentry->d_name.name; 481 d_len = dentry->d_name.len; 482 483 if (sf_g->nls) { 484 size_t in_len, i, out_bound_len; 485 const char *in; 486 char *out; 487 488 in = d_name; 489 in_len = d_len; 490 491 out_bound_len = PATH_MAX; 492 out = kmalloc(out_bound_len, GFP_KERNEL); 493 name = out; 494 495 for (i = 0; i < d_len; ++i) { 496 /* We renamed the linux kernel wchar_t type to linux_wchar_t in 497 the-linux-kernel.h, as it conflicts with the C++ type of that name. */ 498 linux_wchar_t uni; 499 int nb; 500 501 nb = sf_g->nls->char2uni(in, in_len, &uni); 502 if (nb < 0) { 503 LogFunc(("nls->char2uni failed %x %d\n", 504 *in, in_len)); 505 err = -EINVAL; 506 goto fail1; 507 } 508 in_len -= nb; 509 in += nb; 530 510 531 511 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) 532 nb = utf32_to_utf8(uni, out, out_bound_len); 533 #else 534 nb = utf8_wctomb(out, uni, out_bound_len); 535 #endif 536 if (nb < 0) 537 { 538 LogFunc(("nls->uni2char failed %x %d\n", 539 uni, out_bound_len)); 540 err = -EINVAL; 541 goto fail1; 542 } 543 out_bound_len -= nb; 544 out += nb; 545 len += nb; 546 } 547 if (len >= PATH_MAX - 1) 548 { 549 err = -ENAMETOOLONG; 550 goto fail1; 551 } 552 553 LogFunc(("result(%d) = %.*s\n", len, len, name)); 554 *out = 0; 555 } 556 else 557 { 558 name = d_name; 559 len = d_len; 560 } 561 562 err = sf_make_path(caller, sf_i, name, len, result); 563 if (name != d_name) 564 kfree(name); 565 566 return err; 567 568 fail1: 569 kfree(name); 570 return err; 512 nb = utf32_to_utf8(uni, out, out_bound_len); 513 #else 514 nb = utf8_wctomb(out, uni, out_bound_len); 515 #endif 516 if (nb < 0) { 517 LogFunc(("nls->uni2char failed %x %d\n", 518 uni, out_bound_len)); 519 err = -EINVAL; 520 goto fail1; 521 } 522 out_bound_len -= nb; 523 out += nb; 524 len += nb; 525 } 526 if (len >= PATH_MAX - 1) { 527 err = -ENAMETOOLONG; 528 goto fail1; 529 } 530 531 LogFunc(("result(%d) = %.*s\n", len, len, name)); 532 *out = 0; 533 } else { 534 name = d_name; 535 len = d_len; 536 } 537 538 err = sf_make_path(caller, sf_i, name, len, result); 539 if (name != d_name) 540 kfree(name); 541 542 return err; 543 544 fail1: 545 kfree(name); 546 return err; 571 547 } 572 548 573 549 int sf_nlscpy(struct sf_glob_info *sf_g, 574 char *name, size_t name_bound_len, 575 const unsigned char *utf8_name, size_t utf8_len) 576 { 577 if (sf_g->nls) 578 { 579 const char *in; 580 char *out; 581 size_t out_len; 582 size_t out_bound_len; 583 size_t in_bound_len; 584 585 in = utf8_name; 586 in_bound_len = utf8_len; 587 588 out = name; 589 out_len = 0; 590 out_bound_len = name_bound_len; 591 592 while (in_bound_len) 593 { 594 int nb; 550 char *name, size_t name_bound_len, 551 const unsigned char *utf8_name, size_t utf8_len) 552 { 553 if (sf_g->nls) { 554 const char *in; 555 char *out; 556 size_t out_len; 557 size_t out_bound_len; 558 size_t in_bound_len; 559 560 in = utf8_name; 561 in_bound_len = utf8_len; 562 563 out = name; 564 out_len = 0; 565 out_bound_len = name_bound_len; 566 567 while (in_bound_len) { 568 int nb; 595 569 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) 596 unicode_t uni; 597 598 nb = utf8_to_utf32(in, in_bound_len, &uni); 599 #else 600 linux_wchar_t uni; 601 602 nb = utf8_mbtowc(&uni, in, in_bound_len); 603 #endif 604 if (nb < 0) 605 { 606 LogFunc(("utf8_mbtowc failed(%s) %x:%d\n", 607 (const char *) utf8_name, *in, in_bound_len)); 608 return -EINVAL; 609 } 610 in += nb; 611 in_bound_len -= nb; 612 613 nb = sf_g->nls->uni2char(uni, out, out_bound_len); 614 if (nb < 0) 615 { 616 LogFunc(("nls->uni2char failed(%s) %x:%d\n", 617 utf8_name, uni, out_bound_len)); 618 return nb; 619 } 620 out += nb; 621 out_bound_len -= nb; 622 out_len += nb; 623 } 624 625 *out = 0; 626 } 627 else 628 { 629 if (utf8_len + 1 > name_bound_len) 630 return -ENAMETOOLONG; 631 632 memcpy(name, utf8_name, utf8_len + 1); 633 } 634 return 0; 570 unicode_t uni; 571 572 nb = utf8_to_utf32(in, in_bound_len, &uni); 573 #else 574 linux_wchar_t uni; 575 576 nb = utf8_mbtowc(&uni, in, in_bound_len); 577 #endif 578 if (nb < 0) { 579 LogFunc(("utf8_mbtowc failed(%s) %x:%d\n", 580 (const char *)utf8_name, *in, 581 in_bound_len)); 582 return -EINVAL; 583 } 584 in += nb; 585 in_bound_len -= nb; 586 587 nb = sf_g->nls->uni2char(uni, out, out_bound_len); 588 if (nb < 0) { 589 LogFunc(("nls->uni2char failed(%s) %x:%d\n", 590 utf8_name, uni, out_bound_len)); 591 return nb; 592 } 593 out += nb; 594 out_bound_len -= nb; 595 out_len += nb; 596 } 597 598 *out = 0; 599 } else { 600 if (utf8_len + 1 > name_bound_len) 601 return -ENAMETOOLONG; 602 603 memcpy(name, utf8_name, utf8_len + 1); 604 } 605 return 0; 635 606 } 636 607 637 608 static struct sf_dir_buf *sf_dir_buf_alloc(void) 638 609 { 639 struct sf_dir_buf *b; 640 641 TRACE(); 642 b = kmalloc(sizeof(*b), GFP_KERNEL); 643 if (!b) 644 { 645 LogRelFunc(("could not alloc directory buffer\n")); 646 return NULL; 647 } 648 610 struct sf_dir_buf *b; 611 612 TRACE(); 613 b = kmalloc(sizeof(*b), GFP_KERNEL); 614 if (!b) { 615 LogRelFunc(("could not alloc directory buffer\n")); 616 return NULL; 617 } 649 618 #ifdef USE_VMALLOC 650 b->buf = vmalloc(DIR_BUFFER_SIZE); 651 #else 652 b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL); 653 #endif 654 if (!b->buf) 655 { 656 kfree(b); 657 LogRelFunc(("could not alloc directory buffer storage\n")); 658 return NULL; 659 } 660 661 INIT_LIST_HEAD(&b->head); 662 b->cEntries = 0; 663 b->cbUsed = 0; 664 b->cbFree = DIR_BUFFER_SIZE; 665 return b; 619 b->buf = vmalloc(DIR_BUFFER_SIZE); 620 #else 621 b->buf = kmalloc(DIR_BUFFER_SIZE, GFP_KERNEL); 622 #endif 623 if (!b->buf) { 624 kfree(b); 625 LogRelFunc(("could not alloc directory buffer storage\n")); 626 return NULL; 627 } 628 629 INIT_LIST_HEAD(&b->head); 630 b->cEntries = 0; 631 b->cbUsed = 0; 632 b->cbFree = DIR_BUFFER_SIZE; 633 return b; 666 634 } 667 635 668 636 static void sf_dir_buf_free(struct sf_dir_buf *b) 669 637 { 670 671 672 673 638 BUG_ON(!b || !b->buf); 639 640 TRACE(); 641 list_del(&b->head); 674 642 #ifdef USE_VMALLOC 675 676 #else 677 678 #endif 679 643 vfree(b->buf); 644 #else 645 kfree(b->buf); 646 #endif 647 kfree(b); 680 648 } 681 649 … … 685 653 void sf_dir_info_free(struct sf_dir_info *p) 686 654 { 687 struct list_head *list, *pos, *tmp; 688 689 TRACE(); 690 list = &p->info_list; 691 list_for_each_safe(pos, tmp, list) 692 { 693 struct sf_dir_buf *b; 694 695 b = list_entry(pos, struct sf_dir_buf, head); 696 sf_dir_buf_free(b); 697 } 698 kfree(p); 655 struct list_head *list, *pos, *tmp; 656 657 TRACE(); 658 list = &p->info_list; 659 list_for_each_safe(pos, tmp, list) { 660 struct sf_dir_buf *b; 661 662 b = list_entry(pos, struct sf_dir_buf, head); 663 sf_dir_buf_free(b); 664 } 665 kfree(p); 699 666 } 700 667 … … 704 671 void sf_dir_info_empty(struct sf_dir_info *p) 705 672 { 706 struct list_head *list, *pos, *tmp; 707 TRACE(); 708 list = &p->info_list; 709 list_for_each_safe(pos, tmp, list) 710 { 711 struct sf_dir_buf *b; 712 b = list_entry(pos, struct sf_dir_buf, head); 713 b->cEntries = 0; 714 b->cbUsed = 0; 715 b->cbFree = DIR_BUFFER_SIZE; 716 } 673 struct list_head *list, *pos, *tmp; 674 TRACE(); 675 list = &p->info_list; 676 list_for_each_safe(pos, tmp, list) { 677 struct sf_dir_buf *b; 678 b = list_entry(pos, struct sf_dir_buf, head); 679 b->cEntries = 0; 680 b->cbUsed = 0; 681 b->cbFree = DIR_BUFFER_SIZE; 682 } 717 683 } 718 684 … … 722 688 struct sf_dir_info *sf_dir_info_alloc(void) 723 689 { 724 struct sf_dir_info *p; 725 726 TRACE(); 727 p = kmalloc(sizeof(*p), GFP_KERNEL); 728 if (!p) 729 { 730 LogRelFunc(("could not alloc directory info\n")); 731 return NULL; 732 } 733 734 INIT_LIST_HEAD(&p->info_list); 735 return p; 690 struct sf_dir_info *p; 691 692 TRACE(); 693 p = kmalloc(sizeof(*p), GFP_KERNEL); 694 if (!p) { 695 LogRelFunc(("could not alloc directory info\n")); 696 return NULL; 697 } 698 699 INIT_LIST_HEAD(&p->info_list); 700 return p; 736 701 } 737 702 … … 741 706 static struct sf_dir_buf *sf_get_empty_dir_buf(struct sf_dir_info *sf_d) 742 707 { 743 struct list_head *list, *pos; 744 745 list = &sf_d->info_list; 746 list_for_each(pos, list) 747 { 748 struct sf_dir_buf *b; 749 750 b = list_entry(pos, struct sf_dir_buf, head); 751 if (!b) 752 return NULL; 753 else 754 { 755 if (b->cbUsed == 0) 756 return b; 757 } 758 } 759 760 return NULL; 708 struct list_head *list, *pos; 709 710 list = &sf_d->info_list; 711 list_for_each(pos, list) { 712 struct sf_dir_buf *b; 713 714 b = list_entry(pos, struct sf_dir_buf, head); 715 if (!b) 716 return NULL; 717 else { 718 if (b->cbUsed == 0) 719 return b; 720 } 721 } 722 723 return NULL; 761 724 } 762 725 763 726 int sf_dir_read_all(struct sf_glob_info *sf_g, struct sf_inode_info *sf_i, 764 struct sf_dir_info *sf_d, SHFLHANDLE handle) 765 { 766 int err; 767 SHFLSTRING *mask; 768 struct sf_dir_buf *b; 769 770 TRACE(); 771 err = sf_make_path(__func__, sf_i, "*", 1, &mask); 772 if (err) 773 goto fail0; 774 775 for (;;) 776 { 777 int rc; 778 void *buf; 779 uint32_t cbSize; 780 uint32_t cEntries; 781 782 b = sf_get_empty_dir_buf(sf_d); 783 if (!b) 784 { 785 b = sf_dir_buf_alloc(); 786 if (!b) 787 { 788 err = -ENOMEM; 789 LogRelFunc(("could not alloc directory buffer\n")); 790 goto fail1; 791 } 792 list_add(&b->head, &sf_d->info_list); 793 } 794 795 buf = b->buf; 796 cbSize = b->cbFree; 797 798 rc = VbglR0SfDirInfo(&client_handle, &sf_g->map, handle, mask, 799 0, 0, &cbSize, buf, &cEntries); 800 switch (rc) 801 { 802 case VINF_SUCCESS: 803 RT_FALL_THRU(); 804 case VERR_NO_MORE_FILES: 805 break; 806 case VERR_NO_TRANSLATION: 807 LogFunc(("host could not translate entry\n")); 808 /* XXX */ 809 break; 810 default: 811 err = -RTErrConvertToErrno(rc); 812 LogFunc(("VbglR0SfDirInfo failed rc=%Rrc\n", rc)); 813 goto fail1; 814 } 815 816 b->cEntries += cEntries; 817 b->cbFree -= cbSize; 818 b->cbUsed += cbSize; 819 820 if (RT_FAILURE(rc)) 821 break; 822 } 823 err = 0; 824 825 fail1: 826 kfree(mask); 827 828 fail0: 829 return err; 830 } 831 832 int sf_get_volume_info(struct super_block *sb, STRUCT_STATFS *stat) 833 { 834 struct sf_glob_info *sf_g; 835 SHFLVOLINFO SHFLVolumeInfo; 836 uint32_t cbBuffer; 837 int rc; 838 839 sf_g = GET_GLOB_INFO(sb); 840 cbBuffer = sizeof(SHFLVolumeInfo); 841 rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, 0, SHFL_INFO_GET | SHFL_INFO_VOLUME, 842 &cbBuffer, (PSHFLDIRINFO)&SHFLVolumeInfo); 843 if (RT_FAILURE(rc)) 844 return -RTErrConvertToErrno(rc); 845 846 stat->f_type = NFS_SUPER_MAGIC; /* XXX vboxsf type? */ 847 stat->f_bsize = SHFLVolumeInfo.ulBytesPerAllocationUnit; 848 stat->f_blocks = SHFLVolumeInfo.ullTotalAllocationBytes 849 / SHFLVolumeInfo.ulBytesPerAllocationUnit; 850 stat->f_bfree = SHFLVolumeInfo.ullAvailableAllocationBytes 851 / SHFLVolumeInfo.ulBytesPerAllocationUnit; 852 stat->f_bavail = SHFLVolumeInfo.ullAvailableAllocationBytes 853 / SHFLVolumeInfo.ulBytesPerAllocationUnit; 854 stat->f_files = 1000; 855 stat->f_ffree = 1000; /* don't return 0 here since the guest may think 856 * that it is not possible to create any more files */ 857 stat->f_fsid.val[0] = 0; 858 stat->f_fsid.val[1] = 0; 859 stat->f_namelen = 255; 860 return 0; 861 } 862 863 struct dentry_operations sf_dentry_ops = 864 { 865 .d_revalidate = sf_dentry_revalidate 727 struct sf_dir_info *sf_d, SHFLHANDLE handle) 728 { 729 int err; 730 SHFLSTRING *mask; 731 struct sf_dir_buf *b; 732 733 TRACE(); 734 err = sf_make_path(__func__, sf_i, "*", 1, &mask); 735 if (err) 736 goto fail0; 737 738 for (;;) { 739 int rc; 740 void *buf; 741 uint32_t cbSize; 742 uint32_t cEntries; 743 744 b = sf_get_empty_dir_buf(sf_d); 745 if (!b) { 746 b = sf_dir_buf_alloc(); 747 if (!b) { 748 err = -ENOMEM; 749 LogRelFunc(("could not alloc directory buffer\n")); 750 goto fail1; 751 } 752 list_add(&b->head, &sf_d->info_list); 753 } 754 755 buf = b->buf; 756 cbSize = b->cbFree; 757 758 rc = VbglR0SfDirInfo(&client_handle, &sf_g->map, handle, mask, 759 0, 0, &cbSize, buf, &cEntries); 760 switch (rc) { 761 case VINF_SUCCESS: 762 RT_FALL_THRU(); 763 case VERR_NO_MORE_FILES: 764 break; 765 case VERR_NO_TRANSLATION: 766 LogFunc(("host could not translate entry\n")); 767 /* XXX */ 768 break; 769 default: 770 err = -RTErrConvertToErrno(rc); 771 LogFunc(("VbglR0SfDirInfo failed rc=%Rrc\n", rc)); 772 goto fail1; 773 } 774 775 b->cEntries += cEntries; 776 b->cbFree -= cbSize; 777 b->cbUsed += cbSize; 778 779 if (RT_FAILURE(rc)) 780 break; 781 } 782 err = 0; 783 784 fail1: 785 kfree(mask); 786 787 fail0: 788 return err; 789 } 790 791 int sf_get_volume_info(struct super_block *sb, STRUCT_STATFS * stat) 792 { 793 struct sf_glob_info *sf_g; 794 SHFLVOLINFO SHFLVolumeInfo; 795 uint32_t cbBuffer; 796 int rc; 797 798 sf_g = GET_GLOB_INFO(sb); 799 cbBuffer = sizeof(SHFLVolumeInfo); 800 rc = VbglR0SfFsInfo(&client_handle, &sf_g->map, 0, 801 SHFL_INFO_GET | SHFL_INFO_VOLUME, &cbBuffer, 802 (PSHFLDIRINFO) & SHFLVolumeInfo); 803 if (RT_FAILURE(rc)) 804 return -RTErrConvertToErrno(rc); 805 806 stat->f_type = NFS_SUPER_MAGIC; /* XXX vboxsf type? */ 807 stat->f_bsize = SHFLVolumeInfo.ulBytesPerAllocationUnit; 808 stat->f_blocks = SHFLVolumeInfo.ullTotalAllocationBytes 809 / SHFLVolumeInfo.ulBytesPerAllocationUnit; 810 stat->f_bfree = SHFLVolumeInfo.ullAvailableAllocationBytes 811 / SHFLVolumeInfo.ulBytesPerAllocationUnit; 812 stat->f_bavail = SHFLVolumeInfo.ullAvailableAllocationBytes 813 / SHFLVolumeInfo.ulBytesPerAllocationUnit; 814 stat->f_files = 1000; 815 stat->f_ffree = 1000; /* don't return 0 here since the guest may think 816 * that it is not possible to create any more files */ 817 stat->f_fsid.val[0] = 0; 818 stat->f_fsid.val[1] = 0; 819 stat->f_namelen = 255; 820 return 0; 821 } 822 823 struct dentry_operations sf_dentry_ops = { 824 .d_revalidate = sf_dentry_revalidate 866 825 }; 867 826 868 827 int sf_init_backing_dev(struct sf_glob_info *sf_g) 869 828 { 870 829 int rc = 0; 871 830 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 872 873 874 875 876 877 sf_g->bdi.ra_pages = 0;/* No readahead */878 # 879 sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT/* MAP_SHARED */880 | BDI_CAP_MAP_COPY/* MAP_PRIVATE */881 | BDI_CAP_READ_MAP/* can be mapped for reading */882 | BDI_CAP_WRITE_MAP/* can be mapped for writing */883 | BDI_CAP_EXEC_MAP;/* can be mapped for execution */884 # endif/* >= 2.6.12 */885 # 886 887 # 888 889 890 891 # endif/* >= 2.6.26 */892 # endif/* >= 2.6.24 */893 #endif 894 831 /* Each new shared folder map gets a new uint64_t identifier, 832 * allocated in sequence. We ASSUME the sequence will not wrap. */ 833 static uint64_t s_u64Sequence = 0; 834 uint64_t u64CurrentSequence = ASMAtomicIncU64(&s_u64Sequence); 835 836 sf_g->bdi.ra_pages = 0; /* No readahead */ 837 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) 838 sf_g->bdi.capabilities = BDI_CAP_MAP_DIRECT /* MAP_SHARED */ 839 | BDI_CAP_MAP_COPY /* MAP_PRIVATE */ 840 | BDI_CAP_READ_MAP /* can be mapped for reading */ 841 | BDI_CAP_WRITE_MAP /* can be mapped for writing */ 842 | BDI_CAP_EXEC_MAP; /* can be mapped for execution */ 843 #endif /* >= 2.6.12 */ 844 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) 845 rc = bdi_init(&sf_g->bdi); 846 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) 847 if (!rc) 848 rc = bdi_register(&sf_g->bdi, NULL, "vboxsf-%llu", 849 (unsigned long long)u64CurrentSequence); 850 #endif /* >= 2.6.26 */ 851 #endif /* >= 2.6.24 */ 852 #endif /* >= 2.6.0 && <= 3.19.0 */ 853 return rc; 895 854 } 896 855 … … 898 857 { 899 858 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) && LINUX_VERSION_CODE <= KERNEL_VERSION(3, 19, 0) 900 bdi_destroy(&sf_g->bdi);/* includes bdi_unregister() */901 #endif 902 } 859 bdi_destroy(&sf_g->bdi); /* includes bdi_unregister() */ 860 #endif 861 } -
trunk/src/VBox/Additions/linux/sharedfolders/vbsfmount.h
r69500 r70786 26 26 #define VBSF_MOUNT_SIGNATURE_BYTE_2 '\375' 27 27 28 struct vbsf_mount_info_new 29 { 30 /* 31 * The old version of the mount_info struct started with a 32 * char name[MAX_HOST_NAME] field, where name cannot be '\0'. 33 * So the new version of the mount_info struct starts with a 34 * nullchar field which is always 0 so that we can detect and 35 * reject the old structure being passed. 36 */ 37 char nullchar; 38 char signature[3]; /* signature */ 39 int length; /* length of the whole structure */ 40 char name[MAX_HOST_NAME]; /* share name */ 41 char nls_name[MAX_NLS_NAME];/* name of an I/O charset */ 42 int uid; /* user ID for all entries, default 0=root */ 43 int gid; /* group ID for all entries, default 0=root */ 44 int ttl; /* time to live */ 45 int dmode; /* mode for directories if != 0xffffffff */ 46 int fmode; /* mode for regular files if != 0xffffffff */ 47 int dmask; /* umask applied to directories */ 48 int fmask; /* umask applied to regular files */ 28 struct vbsf_mount_info_new { 29 /* 30 * The old version of the mount_info struct started with a 31 * char name[MAX_HOST_NAME] field, where name cannot be '\0'. 32 * So the new version of the mount_info struct starts with a 33 * nullchar field which is always 0 so that we can detect and 34 * reject the old structure being passed. 35 */ 36 char nullchar; 37 char signature[3]; /* signature */ 38 int length; /* length of the whole structure */ 39 char name[MAX_HOST_NAME]; /* share name */ 40 char nls_name[MAX_NLS_NAME]; /* name of an I/O charset */ 41 int uid; /* user ID for all entries, default 0=root */ 42 int gid; /* group ID for all entries, default 0=root */ 43 int ttl; /* time to live */ 44 int dmode; /* mode for directories if != 0xffffffff */ 45 int fmode; /* mode for regular files if != 0xffffffff */ 46 int dmask; /* umask applied to directories */ 47 int fmask; /* umask applied to regular files */ 49 48 }; 50 49 51 struct vbsf_mount_opts 52 { 53 int uid; 54 int gid; 55 int ttl; 56 int dmode; 57 int fmode; 58 int dmask; 59 int fmask; 60 int ronly; 61 int sloppy; 62 int noexec; 63 int nodev; 64 int nosuid; 65 int remount; 66 char nls_name[MAX_NLS_NAME]; 67 char *convertcp; 50 struct vbsf_mount_opts { 51 int uid; 52 int gid; 53 int ttl; 54 int dmode; 55 int fmode; 56 int dmask; 57 int fmask; 58 int ronly; 59 int sloppy; 60 int noexec; 61 int nodev; 62 int nosuid; 63 int remount; 64 char nls_name[MAX_NLS_NAME]; 65 char *convertcp; 68 66 }; 69 67 70 68 /** Completes the mount operation by adding the new mount point to mtab if required. */ 71 69 int vbsfmount_complete(const char *host_name, const char *mount_point, 72 70 unsigned long flags, struct vbsf_mount_opts *opts); 73 71 74 #endif 72 #endif /* vbsfmount.h */ -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.c
r69500 r70786 52 52 53 53 /* allocate global info, try to map host share */ 54 static int sf_glob_alloc(struct vbsf_mount_info_new *info, struct sf_glob_info **sf_gp) 55 { 56 int err, rc; 57 SHFLSTRING *str_name; 58 size_t name_len, str_len; 59 struct sf_glob_info *sf_g; 60 61 TRACE(); 62 sf_g = kmalloc(sizeof(*sf_g), GFP_KERNEL); 63 if (!sf_g) 64 { 65 err = -ENOMEM; 66 LogRelFunc(("could not allocate memory for global info\n")); 67 goto fail0; 68 } 69 70 RT_ZERO(*sf_g); 71 72 if ( info->nullchar != '\0' 73 || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0 74 || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1 75 || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) 76 { 77 err = -EINVAL; 78 goto fail1; 79 } 80 81 info->name[sizeof(info->name) - 1] = 0; 82 info->nls_name[sizeof(info->nls_name) - 1] = 0; 83 84 name_len = strlen(info->name); 85 str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1; 86 str_name = kmalloc(str_len, GFP_KERNEL); 87 if (!str_name) 88 { 89 err = -ENOMEM; 90 LogRelFunc(("could not allocate memory for host name\n")); 91 goto fail1; 92 } 93 94 str_name->u16Length = name_len; 95 str_name->u16Size = name_len + 1; 96 memcpy(str_name->String.utf8, info->name, name_len + 1); 54 static int sf_glob_alloc(struct vbsf_mount_info_new *info, 55 struct sf_glob_info **sf_gp) 56 { 57 int err, rc; 58 SHFLSTRING *str_name; 59 size_t name_len, str_len; 60 struct sf_glob_info *sf_g; 61 62 TRACE(); 63 sf_g = kmalloc(sizeof(*sf_g), GFP_KERNEL); 64 if (!sf_g) { 65 err = -ENOMEM; 66 LogRelFunc(("could not allocate memory for global info\n")); 67 goto fail0; 68 } 69 70 RT_ZERO(*sf_g); 71 72 if (info->nullchar != '\0' 73 || info->signature[0] != VBSF_MOUNT_SIGNATURE_BYTE_0 74 || info->signature[1] != VBSF_MOUNT_SIGNATURE_BYTE_1 75 || info->signature[2] != VBSF_MOUNT_SIGNATURE_BYTE_2) { 76 err = -EINVAL; 77 goto fail1; 78 } 79 80 info->name[sizeof(info->name) - 1] = 0; 81 info->nls_name[sizeof(info->nls_name) - 1] = 0; 82 83 name_len = strlen(info->name); 84 str_len = offsetof(SHFLSTRING, String.utf8) + name_len + 1; 85 str_name = kmalloc(str_len, GFP_KERNEL); 86 if (!str_name) { 87 err = -ENOMEM; 88 LogRelFunc(("could not allocate memory for host name\n")); 89 goto fail1; 90 } 91 92 str_name->u16Length = name_len; 93 str_name->u16Size = name_len + 1; 94 memcpy(str_name->String.utf8, info->name, name_len + 1); 97 95 98 96 #define _IS_UTF8(_str) \ … … 101 99 (strcmp(_str, "") == 0) 102 100 103 /* Check if NLS charset is valid and not points to UTF8 table */ 104 if (info->nls_name[0]) 105 { 106 if (_IS_UTF8(info->nls_name)) 107 sf_g->nls = NULL; 108 else 109 { 110 sf_g->nls = load_nls(info->nls_name); 111 if (!sf_g->nls) 112 { 113 err = -EINVAL; 114 LogFunc(("failed to load nls %s\n", info->nls_name)); 115 kfree(str_name); 116 goto fail1; 117 } 118 } 119 } 120 else 121 { 101 /* Check if NLS charset is valid and not points to UTF8 table */ 102 if (info->nls_name[0]) { 103 if (_IS_UTF8(info->nls_name)) 104 sf_g->nls = NULL; 105 else { 106 sf_g->nls = load_nls(info->nls_name); 107 if (!sf_g->nls) { 108 err = -EINVAL; 109 LogFunc(("failed to load nls %s\n", 110 info->nls_name)); 111 kfree(str_name); 112 goto fail1; 113 } 114 } 115 } else { 122 116 #ifdef CONFIG_NLS_DEFAULT 123 /* If no NLS charset specified, try to load the default 124 * one if it's not points to UTF8. */ 125 if (!_IS_UTF8(CONFIG_NLS_DEFAULT) && !_IS_EMPTY(CONFIG_NLS_DEFAULT)) 126 sf_g->nls = load_nls_default(); 127 else 128 sf_g->nls = NULL; 129 #else 130 sf_g->nls = NULL; 117 /* If no NLS charset specified, try to load the default 118 * one if it's not points to UTF8. */ 119 if (!_IS_UTF8(CONFIG_NLS_DEFAULT) 120 && !_IS_EMPTY(CONFIG_NLS_DEFAULT)) 121 sf_g->nls = load_nls_default(); 122 else 123 sf_g->nls = NULL; 124 #else 125 sf_g->nls = NULL; 131 126 #endif 132 127 133 128 #undef _IS_UTF8 134 129 #undef _IS_EMPTY 135 } 136 137 rc = VbglR0SfMapFolder(&client_handle, str_name, &sf_g->map); 138 kfree(str_name); 139 140 if (RT_FAILURE(rc)) 141 { 142 err = -EPROTO; 143 LogFunc(("VbglR0SfMapFolder failed rc=%d\n", rc)); 144 goto fail2; 145 } 146 147 sf_g->ttl = info->ttl; 148 sf_g->uid = info->uid; 149 sf_g->gid = info->gid; 150 151 if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) 152 { 153 /* new fields */ 154 sf_g->dmode = info->dmode; 155 sf_g->fmode = info->fmode; 156 sf_g->dmask = info->dmask; 157 sf_g->fmask = info->fmask; 158 } 159 else 160 { 161 sf_g->dmode = ~0; 162 sf_g->fmode = ~0; 163 } 164 165 *sf_gp = sf_g; 166 return 0; 167 168 fail2: 169 if (sf_g->nls) 170 unload_nls(sf_g->nls); 171 172 fail1: 173 kfree(sf_g); 174 175 fail0: 176 return err; 130 } 131 132 rc = VbglR0SfMapFolder(&client_handle, str_name, &sf_g->map); 133 kfree(str_name); 134 135 if (RT_FAILURE(rc)) { 136 err = -EPROTO; 137 LogFunc(("VbglR0SfMapFolder failed rc=%d\n", rc)); 138 goto fail2; 139 } 140 141 sf_g->ttl = info->ttl; 142 sf_g->uid = info->uid; 143 sf_g->gid = info->gid; 144 145 if ((unsigned)info->length >= sizeof(struct vbsf_mount_info_new)) { 146 /* new fields */ 147 sf_g->dmode = info->dmode; 148 sf_g->fmode = info->fmode; 149 sf_g->dmask = info->dmask; 150 sf_g->fmask = info->fmask; 151 } else { 152 sf_g->dmode = ~0; 153 sf_g->fmode = ~0; 154 } 155 156 *sf_gp = sf_g; 157 return 0; 158 159 fail2: 160 if (sf_g->nls) 161 unload_nls(sf_g->nls); 162 163 fail1: 164 kfree(sf_g); 165 166 fail0: 167 return err; 177 168 } 178 169 179 170 /* unmap the share and free global info [sf_g] */ 180 static void 181 sf_glob_free(struct sf_glob_info *sf_g) 182 { 183 int rc; 184 185 TRACE(); 186 rc = VbglR0SfUnmapFolder(&client_handle, &sf_g->map); 187 if (RT_FAILURE(rc)) 188 LogFunc(("VbglR0SfUnmapFolder failed rc=%d\n", rc)); 189 190 if (sf_g->nls) 191 unload_nls(sf_g->nls); 192 193 kfree(sf_g); 171 static void sf_glob_free(struct sf_glob_info *sf_g) 172 { 173 int rc; 174 175 TRACE(); 176 rc = VbglR0SfUnmapFolder(&client_handle, &sf_g->map); 177 if (RT_FAILURE(rc)) 178 LogFunc(("VbglR0SfUnmapFolder failed rc=%d\n", rc)); 179 180 if (sf_g->nls) 181 unload_nls(sf_g->nls); 182 183 kfree(sf_g); 194 184 } 195 185 … … 207 197 static int sf_read_super_aux(struct super_block *sb, void *data, int flags) 208 198 { 209 int err; 210 struct dentry *droot; 211 struct inode *iroot; 212 struct sf_inode_info *sf_i; 213 struct sf_glob_info *sf_g; 214 SHFLFSOBJINFO fsinfo; 215 struct vbsf_mount_info_new *info; 216 bool fInodePut = true; 217 218 TRACE(); 219 if (!data) 220 { 221 LogFunc(("no mount info specified\n")); 222 return -EINVAL; 223 } 224 225 info = data; 226 227 if (flags & MS_REMOUNT) 228 { 229 LogFunc(("remounting is not supported\n")); 230 return -ENOSYS; 231 } 232 233 err = sf_glob_alloc(info, &sf_g); 234 if (err) 235 goto fail0; 236 237 sf_i = kmalloc(sizeof (*sf_i), GFP_KERNEL); 238 if (!sf_i) 239 { 240 err = -ENOMEM; 241 LogRelFunc(("could not allocate memory for root inode info\n")); 242 goto fail1; 243 } 244 245 sf_i->handle = SHFL_HANDLE_NIL; 246 sf_i->path = kmalloc(sizeof(SHFLSTRING) + 1, GFP_KERNEL); 247 if (!sf_i->path) 248 { 249 err = -ENOMEM; 250 LogRelFunc(("could not allocate memory for root inode path\n")); 251 goto fail2; 252 } 253 254 sf_i->path->u16Length = 1; 255 sf_i->path->u16Size = 2; 256 sf_i->path->String.utf8[0] = '/'; 257 sf_i->path->String.utf8[1] = 0; 258 sf_i->force_reread = 0; 259 260 err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); 261 if (err) 262 { 263 LogFunc(("could not stat root of share\n")); 264 goto fail3; 265 } 266 267 sb->s_magic = 0xface; 268 sb->s_blocksize = 1024; 199 int err; 200 struct dentry *droot; 201 struct inode *iroot; 202 struct sf_inode_info *sf_i; 203 struct sf_glob_info *sf_g; 204 SHFLFSOBJINFO fsinfo; 205 struct vbsf_mount_info_new *info; 206 bool fInodePut = true; 207 208 TRACE(); 209 if (!data) { 210 LogFunc(("no mount info specified\n")); 211 return -EINVAL; 212 } 213 214 info = data; 215 216 if (flags & MS_REMOUNT) { 217 LogFunc(("remounting is not supported\n")); 218 return -ENOSYS; 219 } 220 221 err = sf_glob_alloc(info, &sf_g); 222 if (err) 223 goto fail0; 224 225 sf_i = kmalloc(sizeof(*sf_i), GFP_KERNEL); 226 if (!sf_i) { 227 err = -ENOMEM; 228 LogRelFunc(("could not allocate memory for root inode info\n")); 229 goto fail1; 230 } 231 232 sf_i->handle = SHFL_HANDLE_NIL; 233 sf_i->path = kmalloc(sizeof(SHFLSTRING) + 1, GFP_KERNEL); 234 if (!sf_i->path) { 235 err = -ENOMEM; 236 LogRelFunc(("could not allocate memory for root inode path\n")); 237 goto fail2; 238 } 239 240 sf_i->path->u16Length = 1; 241 sf_i->path->u16Size = 2; 242 sf_i->path->String.utf8[0] = '/'; 243 sf_i->path->String.utf8[1] = 0; 244 sf_i->force_reread = 0; 245 246 err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); 247 if (err) { 248 LogFunc(("could not stat root of share\n")); 249 goto fail3; 250 } 251 252 sb->s_magic = 0xface; 253 sb->s_blocksize = 1024; 269 254 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 3) 270 271 272 273 274 275 276 277 # 278 279 # 280 281 # 282 #endif 283 255 /* Required for seek/sendfile. 256 * 257 * Must by less than or equal to INT64_MAX despite the fact that the 258 * declaration of this variable is unsigned long long. See determination 259 * of 'loff_t max' in fs/read_write.c / do_sendfile(). I don't know the 260 * correct limit but MAX_LFS_FILESIZE (8TB-1 on 32-bit boxes) takes the 261 * page cache into account and is the suggested limit. */ 262 #if defined MAX_LFS_FILESIZE 263 sb->s_maxbytes = MAX_LFS_FILESIZE; 264 #else 265 sb->s_maxbytes = 0x7fffffffffffffffULL; 266 #endif 267 #endif 268 sb->s_op = &sf_super_ops; 284 269 285 270 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 286 iroot = iget_locked(sb, 0); 287 #else 288 iroot = iget(sb, 0); 289 #endif 290 if (!iroot) 291 { 292 err = -ENOMEM; /* XXX */ 293 LogFunc(("could not get root inode\n")); 294 goto fail3; 295 } 296 297 if (sf_init_backing_dev(sf_g)) 298 { 299 err = -EINVAL; 300 LogFunc(("could not init bdi\n")); 271 iroot = iget_locked(sb, 0); 272 #else 273 iroot = iget(sb, 0); 274 #endif 275 if (!iroot) { 276 err = -ENOMEM; /* XXX */ 277 LogFunc(("could not get root inode\n")); 278 goto fail3; 279 } 280 281 if (sf_init_backing_dev(sf_g)) { 282 err = -EINVAL; 283 LogFunc(("could not init bdi\n")); 301 284 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 302 303 #endif 304 305 306 307 308 285 unlock_new_inode(iroot); 286 #endif 287 goto fail4; 288 } 289 290 sf_init_inode(sf_g, iroot, &fsinfo); 291 SET_INODE_INFO(iroot, sf_i); 309 292 310 293 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 25) 311 294 unlock_new_inode(iroot); 312 295 #endif 313 296 314 297 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) 315 droot = d_make_root(iroot); 316 #else 317 droot = d_alloc_root(iroot); 318 #endif 319 if (!droot) 320 { 321 err = -ENOMEM; /* XXX */ 322 LogFunc(("d_alloc_root failed\n")); 298 droot = d_make_root(iroot); 299 #else 300 droot = d_alloc_root(iroot); 301 #endif 302 if (!droot) { 303 err = -ENOMEM; /* XXX */ 304 LogFunc(("d_alloc_root failed\n")); 323 305 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) 324 325 #endif 326 327 328 329 330 331 332 333 fail5:334 335 336 fail4:337 338 339 340 fail3:341 342 343 fail2:344 345 346 fail1:347 348 349 fail0:350 306 fInodePut = false; 307 #endif 308 goto fail5; 309 } 310 311 sb->s_root = droot; 312 SET_GLOB_INFO(sb, sf_g); 313 return 0; 314 315 fail5: 316 sf_done_backing_dev(sf_g); 317 318 fail4: 319 if (fInodePut) 320 iput(iroot); 321 322 fail3: 323 kfree(sf_i->path); 324 325 fail2: 326 kfree(sf_i); 327 328 fail1: 329 sf_glob_free(sf_g); 330 331 fail0: 332 return err; 351 333 } 352 334 353 335 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 354 static struct super_block * 355 sf_read_super_24(struct super_block *sb, void *data,int flags)356 { 357 358 359 360 361 362 363 364 336 static struct super_block *sf_read_super_24(struct super_block *sb, void *data, 337 int flags) 338 { 339 int err; 340 341 TRACE(); 342 err = sf_read_super_aux(sb, data, flags); 343 if (err) 344 return NULL; 345 346 return sb; 365 347 } 366 348 #endif … … 371 353 static void sf_clear_inode(struct inode *inode) 372 354 { 373 374 375 376 377 378 379 380 381 382 383 355 struct sf_inode_info *sf_i; 356 357 TRACE(); 358 sf_i = GET_INODE_INFO(inode); 359 if (!sf_i) 360 return; 361 362 BUG_ON(!sf_i->path); 363 kfree(sf_i->path); 364 kfree(sf_i); 365 SET_INODE_INFO(inode, NULL); 384 366 } 385 367 #else 386 368 static void sf_evict_inode(struct inode *inode) 387 369 { 388 389 390 391 392 # 393 394 # 395 396 # 397 398 399 400 401 402 403 404 405 370 struct sf_inode_info *sf_i; 371 372 TRACE(); 373 truncate_inode_pages(&inode->i_data, 0); 374 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) 375 clear_inode(inode); 376 #else 377 end_writeback(inode); 378 #endif 379 380 sf_i = GET_INODE_INFO(inode); 381 if (!sf_i) 382 return; 383 384 BUG_ON(!sf_i->path); 385 kfree(sf_i->path); 386 kfree(sf_i); 387 SET_INODE_INFO(inode, NULL); 406 388 } 407 389 #endif … … 421 403 static void sf_put_super(struct super_block *sb) 422 404 { 423 424 425 426 427 428 405 struct sf_glob_info *sf_g; 406 407 sf_g = GET_GLOB_INFO(sb); 408 BUG_ON(!sf_g); 409 sf_done_backing_dev(sf_g); 410 sf_glob_free(sf_g); 429 411 } 430 412 431 413 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) 432 static int sf_statfs(struct super_block *sb, STRUCT_STATFS * stat)433 { 434 435 } 436 #else 437 static int sf_statfs(struct dentry *dentry, STRUCT_STATFS * stat)438 { 439 440 414 static int sf_statfs(struct super_block *sb, STRUCT_STATFS * stat) 415 { 416 return sf_get_volume_info(sb, stat); 417 } 418 #else 419 static int sf_statfs(struct dentry *dentry, STRUCT_STATFS * stat) 420 { 421 struct super_block *sb = dentry->d_inode->i_sb; 422 return sf_get_volume_info(sb, stat); 441 423 } 442 424 #endif … … 445 427 { 446 428 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 23) 447 struct sf_glob_info *sf_g; 448 struct sf_inode_info *sf_i; 449 struct inode *iroot; 450 SHFLFSOBJINFO fsinfo; 451 int err; 452 453 sf_g = GET_GLOB_INFO(sb); 454 BUG_ON(!sf_g); 455 if (data && data[0] != 0) 456 { 457 struct vbsf_mount_info_new *info = 458 (struct vbsf_mount_info_new *)data; 459 if ( info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 460 && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 461 && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) 462 { 463 sf_g->uid = info->uid; 464 sf_g->gid = info->gid; 465 sf_g->ttl = info->ttl; 466 sf_g->dmode = info->dmode; 467 sf_g->fmode = info->fmode; 468 sf_g->dmask = info->dmask; 469 sf_g->fmask = info->fmask; 470 } 471 } 472 473 iroot = ilookup(sb, 0); 474 if (!iroot) 475 return -ENOSYS; 476 477 sf_i = GET_INODE_INFO(iroot); 478 err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); 479 BUG_ON(err != 0); 480 sf_init_inode(sf_g, iroot, &fsinfo); 481 /*unlock_new_inode(iroot);*/ 482 return 0; 483 #else 484 return -ENOSYS; 485 #endif 486 } 487 488 static struct super_operations sf_super_ops = 489 { 429 struct sf_glob_info *sf_g; 430 struct sf_inode_info *sf_i; 431 struct inode *iroot; 432 SHFLFSOBJINFO fsinfo; 433 int err; 434 435 sf_g = GET_GLOB_INFO(sb); 436 BUG_ON(!sf_g); 437 if (data && data[0] != 0) { 438 struct vbsf_mount_info_new *info = 439 (struct vbsf_mount_info_new *)data; 440 if (info->signature[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 441 && info->signature[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 442 && info->signature[2] == VBSF_MOUNT_SIGNATURE_BYTE_2) { 443 sf_g->uid = info->uid; 444 sf_g->gid = info->gid; 445 sf_g->ttl = info->ttl; 446 sf_g->dmode = info->dmode; 447 sf_g->fmode = info->fmode; 448 sf_g->dmask = info->dmask; 449 sf_g->fmask = info->fmask; 450 } 451 } 452 453 iroot = ilookup(sb, 0); 454 if (!iroot) 455 return -ENOSYS; 456 457 sf_i = GET_INODE_INFO(iroot); 458 err = sf_stat(__func__, sf_g, sf_i->path, &fsinfo, 0); 459 BUG_ON(err != 0); 460 sf_init_inode(sf_g, iroot, &fsinfo); 461 /*unlock_new_inode(iroot); */ 462 return 0; 463 #else 464 return -ENOSYS; 465 #endif 466 } 467 468 static struct super_operations sf_super_ops = { 490 469 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) 491 492 #else 493 470 .clear_inode = sf_clear_inode, 471 #else 472 .evict_inode = sf_evict_inode, 494 473 #endif 495 474 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25) 496 .read_inode= sf_read_inode,497 #endif 498 .put_super= sf_put_super,499 .statfs= sf_statfs,500 .remount_fs= sf_remount_fs475 .read_inode = sf_read_inode, 476 #endif 477 .put_super = sf_put_super, 478 .statfs = sf_statfs, 479 .remount_fs = sf_remount_fs 501 480 }; 502 481 … … 504 483 static DECLARE_FSTYPE(vboxsf_fs_type, "vboxsf", sf_read_super_24, 0); 505 484 #else 506 static int 507 sf_read_super_26(struct super_block *sb, void *data, int flags) 508 { 509 int err; 510 511 TRACE();512 err = sf_read_super_aux(sb, data, flags); 513 if (err) 514 printk(KERN_DEBUG "sf_read_super_aux err=%d\n", err); 515 516 return err; 517 } 518 519 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) 520 static struct super_block *sf_get_sb(struct file_system_type *fs_type, int flags,521 const char *dev_name,void *data)522 { 523 524 525 } 526 # 485 static int sf_read_super_26(struct super_block *sb, void *data, int flags) 486 { 487 int err; 488 489 TRACE(); 490 err = sf_read_super_aux(sb, data, flags); 491 if (err) 492 printk(KERN_DEBUG "sf_read_super_aux err=%d\n", err); 493 494 return err; 495 } 496 497 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) 498 static struct super_block *sf_get_sb(struct file_system_type *fs_type, 499 int flags, const char *dev_name, 500 void *data) 501 { 502 TRACE(); 503 return get_sb_nodev(fs_type, flags, data, sf_read_super_26); 504 } 505 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) 527 506 static int sf_get_sb(struct file_system_type *fs_type, int flags, 528 529 { 530 531 532 } 533 # 507 const char *dev_name, void *data, struct vfsmount *mnt) 508 { 509 TRACE(); 510 return get_sb_nodev(fs_type, flags, data, sf_read_super_26, mnt); 511 } 512 #else 534 513 static struct dentry *sf_mount(struct file_system_type *fs_type, int flags, 535 const char *dev_name, void *data) 536 { 537 TRACE(); 538 return mount_nodev(fs_type, flags, data, sf_read_super_26); 539 } 540 # endif 541 542 static struct file_system_type vboxsf_fs_type = 543 { 544 .owner = THIS_MODULE, 545 .name = "vboxsf", 546 # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) 547 .get_sb = sf_get_sb, 548 # else 549 .mount = sf_mount, 550 # endif 551 .kill_sb = kill_anon_super 514 const char *dev_name, void *data) 515 { 516 TRACE(); 517 return mount_nodev(fs_type, flags, data, sf_read_super_26); 518 } 519 #endif 520 521 static struct file_system_type vboxsf_fs_type = { 522 .owner = THIS_MODULE, 523 .name = "vboxsf", 524 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 39) 525 .get_sb = sf_get_sb, 526 #else 527 .mount = sf_mount, 528 #endif 529 .kill_sb = kill_anon_super 552 530 }; 553 531 #endif … … 556 534 static int follow_symlinks = 0; 557 535 module_param(follow_symlinks, int, 0); 558 MODULE_PARM_DESC(follow_symlinks, "Let host resolve symlinks rather than showing them"); 536 MODULE_PARM_DESC(follow_symlinks, 537 "Let host resolve symlinks rather than showing them"); 559 538 #endif 560 539 … … 562 541 static int __init init(void) 563 542 { 564 int rcVBox; 565 int rcRet = 0; 566 int err; 567 568 TRACE(); 569 570 if (sizeof(struct vbsf_mount_info_new) > PAGE_SIZE) 571 { 572 printk(KERN_ERR 573 "Mount information structure is too large %lu\n" 574 "Must be less than or equal to %lu\n", 575 (unsigned long)sizeof (struct vbsf_mount_info_new), 576 (unsigned long)PAGE_SIZE); 577 return -EINVAL; 578 } 579 580 err = register_filesystem(&vboxsf_fs_type); 581 if (err) 582 { 583 LogFunc(("register_filesystem err=%d\n", err)); 584 return err; 585 } 586 587 rcVBox = VbglR0HGCMInit(); 588 if (RT_FAILURE(rcVBox)) 589 { 590 LogRelFunc(("VbglR0HGCMInit failed, rc=%d\n", rcVBox)); 591 rcRet = -EPROTO; 592 goto fail0; 593 } 594 595 rcVBox = VbglR0SfConnect(&client_handle); 596 if (RT_FAILURE(rcVBox)) 597 { 598 LogRelFunc(("VbglR0SfConnect failed, rc=%d\n", rcVBox)); 599 rcRet = -EPROTO; 600 goto fail1; 601 } 602 603 rcVBox = VbglR0SfSetUtf8(&client_handle); 604 if (RT_FAILURE(rcVBox)) 605 { 606 LogRelFunc(("VbglR0SfSetUtf8 failed, rc=%d\n", rcVBox)); 607 rcRet = -EPROTO; 608 goto fail2; 609 } 610 543 int rcVBox; 544 int rcRet = 0; 545 int err; 546 547 TRACE(); 548 549 if (sizeof(struct vbsf_mount_info_new) > PAGE_SIZE) { 550 printk(KERN_ERR 551 "Mount information structure is too large %lu\n" 552 "Must be less than or equal to %lu\n", 553 (unsigned long)sizeof(struct vbsf_mount_info_new), 554 (unsigned long)PAGE_SIZE); 555 return -EINVAL; 556 } 557 558 err = register_filesystem(&vboxsf_fs_type); 559 if (err) { 560 LogFunc(("register_filesystem err=%d\n", err)); 561 return err; 562 } 563 564 rcVBox = VbglR0HGCMInit(); 565 if (RT_FAILURE(rcVBox)) { 566 LogRelFunc(("VbglR0HGCMInit failed, rc=%d\n", rcVBox)); 567 rcRet = -EPROTO; 568 goto fail0; 569 } 570 571 rcVBox = VbglR0SfConnect(&client_handle); 572 if (RT_FAILURE(rcVBox)) { 573 LogRelFunc(("VbglR0SfConnect failed, rc=%d\n", rcVBox)); 574 rcRet = -EPROTO; 575 goto fail1; 576 } 577 578 rcVBox = VbglR0SfSetUtf8(&client_handle); 579 if (RT_FAILURE(rcVBox)) { 580 LogRelFunc(("VbglR0SfSetUtf8 failed, rc=%d\n", rcVBox)); 581 rcRet = -EPROTO; 582 goto fail2; 583 } 611 584 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 612 if (!follow_symlinks) 613 { 614 rcVBox = VbglR0SfSetSymlinks(&client_handle); 615 if (RT_FAILURE(rcVBox)) 616 { 617 printk(KERN_WARNING 618 "vboxsf: Host unable to show symlinks, rc=%d\n", 619 rcVBox); 620 } 621 } 622 #endif 623 624 printk(KERN_DEBUG 625 "vboxsf: Successfully loaded version " VBOX_VERSION_STRING 626 " (interface " RT_XSTR(VMMDEV_VERSION) ")\n"); 627 628 return 0; 629 630 fail2: 631 VbglR0SfDisconnect(&client_handle); 632 633 fail1: 634 VbglR0HGCMTerminate(); 635 636 fail0: 637 unregister_filesystem(&vboxsf_fs_type); 638 return rcRet; 585 if (!follow_symlinks) { 586 rcVBox = VbglR0SfSetSymlinks(&client_handle); 587 if (RT_FAILURE(rcVBox)) { 588 printk(KERN_WARNING 589 "vboxsf: Host unable to show symlinks, rc=%d\n", 590 rcVBox); 591 } 592 } 593 #endif 594 595 printk(KERN_DEBUG 596 "vboxsf: Successfully loaded version " VBOX_VERSION_STRING 597 " (interface " RT_XSTR(VMMDEV_VERSION) ")\n"); 598 599 return 0; 600 601 fail2: 602 VbglR0SfDisconnect(&client_handle); 603 604 fail1: 605 VbglR0HGCMTerminate(); 606 607 fail0: 608 unregister_filesystem(&vboxsf_fs_type); 609 return rcRet; 639 610 } 640 611 641 612 static void __exit fini(void) 642 613 { 643 644 645 646 647 614 TRACE(); 615 616 VbglR0SfDisconnect(&client_handle); 617 VbglR0HGCMTerminate(); 618 unregister_filesystem(&vboxsf_fs_type); 648 619 } 649 620 -
trunk/src/VBox/Additions/linux/sharedfolders/vfsmod.h
r69500 r70786 23 23 24 24 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 25 # 25 #include <linux/backing-dev.h> 26 26 #endif 27 27 … … 32 32 33 33 /* per-shared folder information */ 34 struct sf_glob_info 35 { 36 VBGLSFMAP map; 37 struct nls_table *nls; 38 int ttl; 39 int uid; 40 int gid; 41 int dmode; 42 int fmode; 43 int dmask; 44 int fmask; 34 struct sf_glob_info { 35 VBGLSFMAP map; 36 struct nls_table *nls; 37 int ttl; 38 int uid; 39 int gid; 40 int dmode; 41 int fmode; 42 int dmask; 43 int fmask; 45 44 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 46 45 struct backing_dev_info bdi; 47 46 #endif 48 47 }; 49 48 50 49 /* per-inode information */ 51 struct sf_inode_info 52 { 53 /* which file */ 54 SHFLSTRING *path; 55 /* some information was changed, update data on next revalidate */ 56 int force_restat; 57 /* directory content changed, update the whole directory on next sf_getdent */ 58 int force_reread; 59 /* file structure, only valid between open() and release() */ 60 struct file *file; 61 /* handle valid if a file was created with sf_create_aux until it will 62 * be opened with sf_reg_open() */ 63 SHFLHANDLE handle; 50 struct sf_inode_info { 51 /* which file */ 52 SHFLSTRING *path; 53 /* some information was changed, update data on next revalidate */ 54 int force_restat; 55 /* directory content changed, update the whole directory on next sf_getdent */ 56 int force_reread; 57 /* file structure, only valid between open() and release() */ 58 struct file *file; 59 /* handle valid if a file was created with sf_create_aux until it will 60 * be opened with sf_reg_open() */ 61 SHFLHANDLE handle; 64 62 }; 65 63 66 struct sf_dir_info 67 { 68 struct list_head info_list; 64 struct sf_dir_info { 65 struct list_head info_list; 69 66 }; 70 67 71 struct sf_dir_buf 72 { 73 size_t cEntries; 74 size_t cbFree; 75 size_t cbUsed; 76 void *buf; 77 struct list_head head; 68 struct sf_dir_buf { 69 size_t cEntries; 70 size_t cbFree; 71 size_t cbUsed; 72 void *buf; 73 struct list_head head; 78 74 }; 79 75 80 struct sf_reg_info 81 { 82 SHFLHANDLE handle; 76 struct sf_reg_info { 77 SHFLHANDLE handle; 83 78 }; 84 79 … … 87 82 88 83 /* forward declarations */ 89 extern struct inode_operations 90 extern struct inode_operations 91 extern struct inode_operations 92 extern struct file_operations 93 extern struct file_operations 94 extern struct dentry_operations 84 extern struct inode_operations sf_dir_iops; 85 extern struct inode_operations sf_lnk_iops; 86 extern struct inode_operations sf_reg_iops; 87 extern struct file_operations sf_dir_fops; 88 extern struct file_operations sf_reg_fops; 89 extern struct dentry_operations sf_dentry_ops; 95 90 extern struct address_space_operations sf_reg_aops; 96 91 97 92 extern void sf_init_inode(struct sf_glob_info *sf_g, struct inode *inode, 98 99 extern int 100 SHFLSTRING *path, PSHFLFSOBJINFO result, int ok_to_fail);101 extern int 93 PSHFLFSOBJINFO info); 94 extern int sf_stat(const char *caller, struct sf_glob_info *sf_g, 95 SHFLSTRING * path, PSHFLFSOBJINFO result, int ok_to_fail); 96 extern int sf_inode_revalidate(struct dentry *dentry); 102 97 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) 103 # 104 extern int 105 106 # 107 extern int 108 98 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) 99 extern int sf_getattr(const struct path *path, struct kstat *kstat, 100 u32 request_mask, unsigned int query_flags); 101 #else 102 extern int sf_getattr(struct vfsmount *mnt, struct dentry *dentry, 103 struct kstat *kstat); 109 104 #endif 110 extern int 105 extern int sf_setattr(struct dentry *dentry, struct iattr *iattr); 111 106 #endif 112 extern int 113 struct sf_inode_info *sf_i, struct dentry *dentry,114 SHFLSTRING **result);115 extern int sf_nlscpy(struct sf_glob_info *sf_g,116 char *name, size_t name_bound_len,117 const unsigned char *utf8_name,size_t utf8_len);107 extern int sf_path_from_dentry(const char *caller, struct sf_glob_info *sf_g, 108 struct sf_inode_info *sf_i, 109 struct dentry *dentry, SHFLSTRING ** result); 110 extern int sf_nlscpy(struct sf_glob_info *sf_g, char *name, 111 size_t name_bound_len, const unsigned char *utf8_name, 112 size_t utf8_len); 118 113 extern void sf_dir_info_free(struct sf_dir_info *p); 119 114 extern void sf_dir_info_empty(struct sf_dir_info *p); 120 115 extern struct sf_dir_info *sf_dir_info_alloc(void); 121 extern int sf_dir_read_all(struct sf_glob_info *sf_g, struct sf_inode_info *sf_i, 122 struct sf_dir_info *sf_d, SHFLHANDLE handle); 123 extern int sf_init_backing_dev(struct sf_glob_info *sf_g); 116 extern int sf_dir_read_all(struct sf_glob_info *sf_g, 117 struct sf_inode_info *sf_i, struct sf_dir_info *sf_d, 118 SHFLHANDLE handle); 119 extern int sf_init_backing_dev(struct sf_glob_info *sf_g); 124 120 extern void sf_done_backing_dev(struct sf_glob_info *sf_g); 125 121 126 122 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 127 # 123 #define STRUCT_STATFS struct statfs 128 124 #else 129 # 125 #define STRUCT_STATFS struct kstatfs 130 126 #endif 131 int sf_get_volume_info(struct super_block *sb, STRUCT_STATFS *stat);127 int sf_get_volume_info(struct super_block *sb, STRUCT_STATFS * stat); 132 128 133 129 #ifdef __cplusplus 134 # 130 #define CMC_API __attribute__ ((cdecl, regparm (0))) 135 131 #else 136 # 132 #define CMC_API __attribute__ ((regparm (0))) 137 133 #endif 138 134 … … 142 138 pointers of arbitrary type */ 143 139 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) 144 # 145 # 140 #define GET_GLOB_INFO(sb) ((struct sf_glob_info *) (sb)->u.generic_sbp) 141 #define SET_GLOB_INFO(sb, sf_g) (sb)->u.generic_sbp = sf_g 146 142 #else 147 # 148 # 143 #define GET_GLOB_INFO(sb) ((struct sf_glob_info *) (sb)->s_fs_info) 144 #define SET_GLOB_INFO(sb, sf_g) (sb)->s_fs_info = sf_g 149 145 #endif 150 146 151 147 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) || defined(KERNEL_FC6) 152 148 /* FC6 kernel 2.6.18, vanilla kernel 2.6.19+ */ 153 # 154 # 149 #define GET_INODE_INFO(i) ((struct sf_inode_info *) (i)->i_private) 150 #define SET_INODE_INFO(i, sf_i) (i)->i_private = sf_i 155 151 #else 156 152 /* vanilla kernel up to 2.6.18 */ 157 # 158 # 153 #define GET_INODE_INFO(i) ((struct sf_inode_info *) (i)->u.generic_ip) 154 #define SET_INODE_INFO(i, sf_i) (i)->u.generic_ip = sf_i 159 155 #endif 160 156 161 157 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) 162 # 158 #define GET_F_DENTRY(f) (f->f_path.dentry) 163 159 #else 164 # 160 #define GET_F_DENTRY(f) (f->f_dentry) 165 161 #endif 166 162 167 163 #endif 168
Note:
See TracChangeset
for help on using the changeset viewer.