1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* CacheFiles path walking and related routines
3 *
4 * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/fs.h>
9 #include <linux/namei.h>
10 #include "internal.h"
11
12 /*
13 * Mark the backing file as being a cache file if it's not already in use. The
14 * mark tells the culling request command that it's not allowed to cull the
15 * file or directory. The caller must hold the inode lock.
16 */
__cachefiles_mark_inode_in_use(struct cachefiles_object * object,struct inode * inode)17 static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18 struct inode *inode)
19 {
20 bool can_use = false;
21
22 if (!(inode->i_flags & S_KERNEL_FILE)) {
23 inode->i_flags |= S_KERNEL_FILE;
24 trace_cachefiles_mark_active(object, inode);
25 can_use = true;
26 } else {
27 trace_cachefiles_mark_failed(object, inode);
28 }
29
30 return can_use;
31 }
32
cachefiles_mark_inode_in_use(struct cachefiles_object * object,struct inode * inode)33 static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
34 struct inode *inode)
35 {
36 bool can_use;
37
38 inode_lock(inode);
39 can_use = __cachefiles_mark_inode_in_use(object, inode);
40 inode_unlock(inode);
41 return can_use;
42 }
43
44 /*
45 * Unmark a backing inode. The caller must hold the inode lock.
46 */
__cachefiles_unmark_inode_in_use(struct cachefiles_object * object,struct inode * inode)47 static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
48 struct inode *inode)
49 {
50 inode->i_flags &= ~S_KERNEL_FILE;
51 trace_cachefiles_mark_inactive(object, inode);
52 }
53
cachefiles_do_unmark_inode_in_use(struct cachefiles_object * object,struct inode * inode)54 static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
55 struct inode *inode)
56 {
57 inode_lock(inode);
58 __cachefiles_unmark_inode_in_use(object, inode);
59 inode_unlock(inode);
60 }
61
62 /*
63 * Unmark a backing inode and tell cachefilesd that there's something that can
64 * be culled.
65 */
cachefiles_unmark_inode_in_use(struct cachefiles_object * object,struct file * file)66 void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
67 struct file *file)
68 {
69 struct cachefiles_cache *cache = object->volume->cache;
70 struct inode *inode = file_inode(file);
71
72 cachefiles_do_unmark_inode_in_use(object, inode);
73
74 if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
75 atomic_long_add(inode->i_blocks, &cache->b_released);
76 if (atomic_inc_return(&cache->f_released))
77 cachefiles_state_changed(cache);
78 }
79 }
80
81 /*
82 * get a subdirectory
83 */
cachefiles_get_directory(struct cachefiles_cache * cache,struct dentry * dir,const char * dirname,bool * _is_new)84 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
85 struct dentry *dir,
86 const char *dirname,
87 bool *_is_new)
88 {
89 struct dentry *subdir;
90 struct path path;
91 int ret;
92
93 _enter(",,%s", dirname);
94
95 /* search the current directory for the element name */
96
97 retry:
98 ret = cachefiles_inject_read_error();
99 if (ret == 0)
100 subdir = start_creating(&nop_mnt_idmap, dir, &QSTR(dirname));
101 else
102 subdir = ERR_PTR(ret);
103 trace_cachefiles_lookup(NULL, dir, subdir);
104 if (IS_ERR(subdir)) {
105 trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
106 PTR_ERR(subdir),
107 cachefiles_trace_lookup_error);
108 if (PTR_ERR(subdir) == -ENOMEM)
109 goto nomem_d_alloc;
110 goto lookup_error;
111 }
112
113 _debug("subdir -> %pd %s",
114 subdir, d_backing_inode(subdir) ? "positive" : "negative");
115
116 /* we need to create the subdir if it doesn't exist yet */
117 if (d_is_negative(subdir)) {
118 ret = cachefiles_has_space(cache, 1, 0,
119 cachefiles_has_space_for_create);
120 if (ret < 0)
121 goto mkdir_error;
122
123 _debug("attempt mkdir");
124
125 path.mnt = cache->mnt;
126 path.dentry = dir;
127 ret = security_path_mkdir(&path, subdir, 0700);
128 if (ret < 0)
129 goto mkdir_error;
130 ret = cachefiles_inject_write_error();
131 if (ret == 0) {
132 subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700, NULL);
133 } else {
134 end_creating(subdir);
135 subdir = ERR_PTR(ret);
136 }
137 if (IS_ERR(subdir)) {
138 trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
139 cachefiles_trace_mkdir_error);
140 goto mkdir_error;
141 }
142 trace_cachefiles_mkdir(dir, subdir);
143
144 if (unlikely(d_unhashed(subdir) || d_is_negative(subdir))) {
145 end_creating(subdir);
146 goto retry;
147 }
148 ASSERT(d_backing_inode(subdir));
149
150 _debug("mkdir -> %pd{ino=%lu}",
151 subdir, d_backing_inode(subdir)->i_ino);
152 if (_is_new)
153 *_is_new = true;
154 }
155
156 /* Tell rmdir() it's not allowed to delete the subdir */
157 inode_lock(d_inode(subdir));
158 end_creating_keep(subdir);
159
160 if (!__cachefiles_mark_inode_in_use(NULL, d_inode(subdir))) {
161 pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
162 subdir, d_inode(subdir)->i_ino);
163 goto mark_error;
164 }
165
166 inode_unlock(d_inode(subdir));
167
168 /* we need to make sure the subdir is a directory */
169 ASSERT(d_backing_inode(subdir));
170
171 if (!d_can_lookup(subdir)) {
172 pr_err("%s is not a directory\n", dirname);
173 ret = -EIO;
174 goto check_error;
175 }
176
177 ret = -EPERM;
178 if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
179 !d_backing_inode(subdir)->i_op->lookup ||
180 !d_backing_inode(subdir)->i_op->mkdir ||
181 !d_backing_inode(subdir)->i_op->rename ||
182 !d_backing_inode(subdir)->i_op->rmdir ||
183 !d_backing_inode(subdir)->i_op->unlink)
184 goto check_error;
185
186 _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
187 return subdir;
188
189 check_error:
190 cachefiles_put_directory(subdir);
191 _leave(" = %d [check]", ret);
192 return ERR_PTR(ret);
193
194 mark_error:
195 inode_unlock(d_inode(subdir));
196 dput(subdir);
197 return ERR_PTR(-EBUSY);
198
199 mkdir_error:
200 end_creating(subdir);
201 pr_err("mkdir %s failed with error %d\n", dirname, ret);
202 return ERR_PTR(ret);
203
204 lookup_error:
205 ret = PTR_ERR(subdir);
206 pr_err("Lookup %s failed with error %d\n", dirname, ret);
207 return ERR_PTR(ret);
208
209 nomem_d_alloc:
210 inode_unlock(d_inode(dir));
211 _leave(" = -ENOMEM");
212 return ERR_PTR(-ENOMEM);
213 }
214
215 /*
216 * Put a subdirectory.
217 */
cachefiles_put_directory(struct dentry * dir)218 void cachefiles_put_directory(struct dentry *dir)
219 {
220 if (dir) {
221 cachefiles_do_unmark_inode_in_use(NULL, d_inode(dir));
222 dput(dir);
223 }
224 }
225
226 /*
227 * Remove a regular file from the cache.
228 */
cachefiles_unlink(struct cachefiles_cache * cache,struct cachefiles_object * object,struct dentry * dir,struct dentry * dentry,enum fscache_why_object_killed why)229 static int cachefiles_unlink(struct cachefiles_cache *cache,
230 struct cachefiles_object *object,
231 struct dentry *dir, struct dentry *dentry,
232 enum fscache_why_object_killed why)
233 {
234 struct path path = {
235 .mnt = cache->mnt,
236 .dentry = dir,
237 };
238 int ret;
239
240 trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
241 ret = security_path_unlink(&path, dentry);
242 if (ret < 0) {
243 cachefiles_io_error(cache, "Unlink security error");
244 return ret;
245 }
246
247 ret = cachefiles_inject_remove_error();
248 if (ret == 0) {
249 ret = vfs_unlink(&nop_mnt_idmap, d_backing_inode(dir), dentry, NULL);
250 if (ret == -EIO)
251 cachefiles_io_error(cache, "Unlink failed");
252 }
253 if (ret != 0)
254 trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
255 cachefiles_trace_unlink_error);
256 return ret;
257 }
258
259 /*
260 * Delete an object representation from the cache
261 * - File backed objects are unlinked
262 * - Directory backed objects are stuffed into the graveyard for userspace to
263 * delete
264 * On entry dir must be locked. It will be unlocked on exit.
265 * On entry there must be at least 2 refs on rep, one will be dropped on exit.
266 */
cachefiles_bury_object(struct cachefiles_cache * cache,struct cachefiles_object * object,struct dentry * dir,struct dentry * rep,enum fscache_why_object_killed why)267 int cachefiles_bury_object(struct cachefiles_cache *cache,
268 struct cachefiles_object *object,
269 struct dentry *dir,
270 struct dentry *rep,
271 enum fscache_why_object_killed why)
272 {
273 struct dentry *grave;
274 struct renamedata rd = {};
275 struct path path, path_to_graveyard;
276 char nbuffer[8 + 8 + 1];
277 int ret;
278
279 _enter(",'%pd','%pd'", dir, rep);
280
281 if (rep->d_parent != dir) {
282 end_removing(rep);
283 _leave(" = -ESTALE");
284 return -ESTALE;
285 }
286
287 /* non-directories can just be unlinked */
288 if (!d_is_dir(rep)) {
289 ret = cachefiles_unlink(cache, object, dir, rep, why);
290 end_removing(rep);
291
292 _leave(" = %d", ret);
293 return ret;
294 }
295
296 /* directories have to be moved to the graveyard */
297 _debug("move stale object to graveyard");
298 end_removing(rep);
299
300 try_again:
301 /* first step is to make up a grave dentry in the graveyard */
302 sprintf(nbuffer, "%08x%08x",
303 (uint32_t) ktime_get_real_seconds(),
304 (uint32_t) atomic_inc_return(&cache->gravecounter));
305
306 rd.mnt_idmap = &nop_mnt_idmap;
307 rd.old_parent = dir;
308 rd.new_parent = cache->graveyard;
309 rd.flags = 0;
310 ret = start_renaming_dentry(&rd, 0, rep, &QSTR(nbuffer));
311 if (ret) {
312 /* Some errors aren't fatal */
313 if (ret == -EXDEV)
314 /* double-lock failed */
315 return ret;
316 if (d_unhashed(rep) || rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
317 /* the entry was probably culled when we dropped the parent dir
318 * lock */
319 _leave(" = 0 [culled?]");
320 return 0;
321 }
322 if (ret == -EINVAL || ret == -ENOTEMPTY) {
323 cachefiles_io_error(cache, "May not make directory loop");
324 return -EIO;
325 }
326 if (ret == -ENOMEM) {
327 _leave(" = -ENOMEM");
328 return -ENOMEM;
329 }
330
331 cachefiles_io_error(cache, "Lookup error %d", ret);
332 return -EIO;
333 }
334
335 if (d_mountpoint(rep)) {
336 end_renaming(&rd);
337 cachefiles_io_error(cache, "Mountpoint in cache");
338 return -EIO;
339 }
340
341 grave = rd.new_dentry;
342 if (d_is_positive(grave)) {
343 end_renaming(&rd);
344 grave = NULL;
345 cond_resched();
346 goto try_again;
347 }
348
349 if (d_mountpoint(grave)) {
350 end_renaming(&rd);
351 cachefiles_io_error(cache, "Mountpoint in graveyard");
352 return -EIO;
353 }
354
355 /* attempt the rename */
356 path.mnt = cache->mnt;
357 path.dentry = dir;
358 path_to_graveyard.mnt = cache->mnt;
359 path_to_graveyard.dentry = cache->graveyard;
360 ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
361 if (ret < 0) {
362 cachefiles_io_error(cache, "Rename security error %d", ret);
363 } else {
364 trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
365 ret = cachefiles_inject_read_error();
366 if (ret == 0)
367 ret = vfs_rename(&rd);
368 if (ret != 0)
369 trace_cachefiles_vfs_error(object, d_inode(dir), ret,
370 cachefiles_trace_rename_error);
371 if (ret != 0 && ret != -ENOMEM)
372 cachefiles_io_error(cache,
373 "Rename failed with error %d", ret);
374 }
375
376 __cachefiles_unmark_inode_in_use(object, d_inode(rep));
377 end_renaming(&rd);
378 _leave(" = 0");
379 return 0;
380 }
381
382 /*
383 * Delete a cache file.
384 */
cachefiles_delete_object(struct cachefiles_object * object,enum fscache_why_object_killed why)385 int cachefiles_delete_object(struct cachefiles_object *object,
386 enum fscache_why_object_killed why)
387 {
388 struct cachefiles_volume *volume = object->volume;
389 struct dentry *dentry = object->file->f_path.dentry;
390 struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
391 int ret;
392
393 _enter(",OBJ%x{%pD}", object->debug_id, object->file);
394
395 dentry = start_removing_dentry(fan, dentry);
396 if (IS_ERR(dentry))
397 ret = PTR_ERR(dentry);
398 else
399 ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
400 end_removing(dentry);
401 return ret;
402 }
403
404 /*
405 * Create a temporary file and leave it unattached and un-xattr'd until the
406 * time comes to discard the object from memory.
407 */
cachefiles_create_tmpfile(struct cachefiles_object * object)408 struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
409 {
410 struct cachefiles_volume *volume = object->volume;
411 struct cachefiles_cache *cache = volume->cache;
412 const struct cred *saved_cred;
413 struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
414 struct file *file;
415 const struct path parentpath = { .mnt = cache->mnt, .dentry = fan };
416 uint64_t ni_size;
417 long ret;
418
419
420 cachefiles_begin_secure(cache, &saved_cred);
421
422 ret = cachefiles_inject_write_error();
423 if (ret == 0) {
424 file = kernel_tmpfile_open(&nop_mnt_idmap, &parentpath,
425 S_IFREG | 0600,
426 O_RDWR | O_LARGEFILE | O_DIRECT,
427 cache->cache_cred);
428 ret = PTR_ERR_OR_ZERO(file);
429 }
430 if (ret) {
431 trace_cachefiles_vfs_error(object, d_inode(fan), ret,
432 cachefiles_trace_tmpfile_error);
433 if (ret == -EIO)
434 cachefiles_io_error_obj(object, "Failed to create tmpfile");
435 goto err;
436 }
437
438 trace_cachefiles_tmpfile(object, file_inode(file));
439
440 /* This is a newly created file with no other possible user */
441 if (!cachefiles_mark_inode_in_use(object, file_inode(file)))
442 WARN_ON(1);
443
444 ret = cachefiles_ondemand_init_object(object);
445 if (ret < 0)
446 goto err_unuse;
447
448 ni_size = object->cookie->object_size;
449 ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
450
451 if (ni_size > 0) {
452 trace_cachefiles_trunc(object, file_inode(file), 0, ni_size,
453 cachefiles_trunc_expand_tmpfile);
454 ret = cachefiles_inject_write_error();
455 if (ret == 0)
456 ret = vfs_truncate(&file->f_path, ni_size);
457 if (ret < 0) {
458 trace_cachefiles_vfs_error(
459 object, file_inode(file), ret,
460 cachefiles_trace_trunc_error);
461 goto err_unuse;
462 }
463 }
464
465 ret = -EINVAL;
466 if (unlikely(!file->f_op->read_iter) ||
467 unlikely(!file->f_op->write_iter)) {
468 fput(file);
469 pr_notice("Cache does not support read_iter and write_iter\n");
470 goto err_unuse;
471 }
472 out:
473 cachefiles_end_secure(cache, saved_cred);
474 return file;
475
476 err_unuse:
477 cachefiles_do_unmark_inode_in_use(object, file_inode(file));
478 fput(file);
479 err:
480 file = ERR_PTR(ret);
481 goto out;
482 }
483
484 /*
485 * Create a new file.
486 */
cachefiles_create_file(struct cachefiles_object * object)487 static bool cachefiles_create_file(struct cachefiles_object *object)
488 {
489 struct file *file;
490 int ret;
491
492 ret = cachefiles_has_space(object->volume->cache, 1, 0,
493 cachefiles_has_space_for_create);
494 if (ret < 0)
495 return false;
496
497 file = cachefiles_create_tmpfile(object);
498 if (IS_ERR(file))
499 return false;
500
501 set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
502 set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
503 _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
504 object->file = file;
505 return true;
506 }
507
508 /*
509 * Open an existing file, checking its attributes and replacing it if it is
510 * stale.
511 */
cachefiles_open_file(struct cachefiles_object * object,struct dentry * dentry)512 static bool cachefiles_open_file(struct cachefiles_object *object,
513 struct dentry *dentry)
514 {
515 struct cachefiles_cache *cache = object->volume->cache;
516 struct file *file;
517 struct path path;
518 int ret;
519
520 _enter("%pd", dentry);
521
522 if (!cachefiles_mark_inode_in_use(object, d_inode(dentry))) {
523 pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
524 dentry, d_inode(dentry)->i_ino);
525 return false;
526 }
527
528 /* We need to open a file interface onto a data file now as we can't do
529 * it on demand because writeback called from do_exit() sees
530 * current->fs == NULL - which breaks d_path() called from ext4 open.
531 */
532 path.mnt = cache->mnt;
533 path.dentry = dentry;
534 file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, cache->cache_cred);
535 if (IS_ERR(file)) {
536 trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
537 PTR_ERR(file),
538 cachefiles_trace_open_error);
539 goto error;
540 }
541
542 if (unlikely(!file->f_op->read_iter) ||
543 unlikely(!file->f_op->write_iter)) {
544 pr_notice("Cache does not support read_iter and write_iter\n");
545 goto error_fput;
546 }
547 _debug("file -> %pd positive", dentry);
548
549 ret = cachefiles_ondemand_init_object(object);
550 if (ret < 0)
551 goto error_fput;
552
553 ret = cachefiles_check_auxdata(object, file);
554 if (ret < 0)
555 goto check_failed;
556
557 clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &object->cookie->flags);
558
559 object->file = file;
560
561 /* Always update the atime on an object we've just looked up (this is
562 * used to keep track of culling, and atimes are only updated by read,
563 * write and readdir but not lookup or open).
564 */
565 touch_atime(&file->f_path);
566 return true;
567
568 check_failed:
569 fscache_cookie_lookup_negative(object->cookie);
570 cachefiles_unmark_inode_in_use(object, file);
571 fput(file);
572 if (ret == -ESTALE)
573 return cachefiles_create_file(object);
574 return false;
575
576 error_fput:
577 fput(file);
578 error:
579 cachefiles_do_unmark_inode_in_use(object, d_inode(dentry));
580 return false;
581 }
582
583 /*
584 * walk from the parent object to the child object through the backing
585 * filesystem, creating directories as we go
586 */
cachefiles_look_up_object(struct cachefiles_object * object)587 bool cachefiles_look_up_object(struct cachefiles_object *object)
588 {
589 struct cachefiles_volume *volume = object->volume;
590 struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
591 int ret;
592
593 _enter("OBJ%x,%s,", object->debug_id, object->d_name);
594
595 /* Look up path "cache/vol/fanout/file". */
596 ret = cachefiles_inject_read_error();
597 if (ret == 0)
598 dentry = lookup_one_positive_unlocked(&nop_mnt_idmap,
599 &QSTR(object->d_name), fan);
600 else
601 dentry = ERR_PTR(ret);
602 trace_cachefiles_lookup(object, fan, dentry);
603 if (IS_ERR(dentry)) {
604 if (dentry == ERR_PTR(-ENOENT))
605 goto new_file;
606 if (dentry == ERR_PTR(-EIO))
607 cachefiles_io_error_obj(object, "Lookup failed");
608 return false;
609 }
610
611 if (!d_is_reg(dentry)) {
612 pr_err("%pd is not a file\n", dentry);
613 struct dentry *de = start_removing_dentry(fan, dentry);
614 if (IS_ERR(de))
615 ret = PTR_ERR(de);
616 else
617 ret = cachefiles_bury_object(volume->cache, object,
618 fan, de,
619 FSCACHE_OBJECT_IS_WEIRD);
620 dput(dentry);
621 if (ret < 0)
622 return false;
623 goto new_file;
624 }
625
626 ret = cachefiles_open_file(object, dentry);
627 dput(dentry);
628 if (!ret)
629 return false;
630
631 _leave(" = t [%lu]", file_inode(object->file)->i_ino);
632 return true;
633
634 new_file:
635 fscache_cookie_lookup_negative(object->cookie);
636 return cachefiles_create_file(object);
637 }
638
639 /*
640 * Attempt to link a temporary file into its rightful place in the cache.
641 */
cachefiles_commit_tmpfile(struct cachefiles_cache * cache,struct cachefiles_object * object)642 bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
643 struct cachefiles_object *object)
644 {
645 struct cachefiles_volume *volume = object->volume;
646 struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
647 bool success = false;
648 int ret;
649
650 _enter(",%pD", object->file);
651
652 ret = cachefiles_inject_read_error();
653 if (ret == 0)
654 dentry = start_creating(&nop_mnt_idmap, fan, &QSTR(object->d_name));
655 else
656 dentry = ERR_PTR(ret);
657 if (IS_ERR(dentry)) {
658 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
659 cachefiles_trace_lookup_error);
660 _debug("lookup fail %ld", PTR_ERR(dentry));
661 goto out;
662 }
663
664 /*
665 * This loop will only execute more than once if some other thread
666 * races to create the object we are trying to create.
667 */
668 while (!d_is_negative(dentry)) {
669 ret = cachefiles_unlink(volume->cache, object, fan, dentry,
670 FSCACHE_OBJECT_IS_STALE);
671 if (ret < 0)
672 goto out_end;
673
674 end_creating(dentry);
675
676 ret = cachefiles_inject_read_error();
677 if (ret == 0)
678 dentry = start_creating(&nop_mnt_idmap, fan,
679 &QSTR(object->d_name));
680 else
681 dentry = ERR_PTR(ret);
682 if (IS_ERR(dentry)) {
683 trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
684 cachefiles_trace_lookup_error);
685 _debug("lookup fail %ld", PTR_ERR(dentry));
686 goto out;
687 }
688 }
689
690 ret = cachefiles_inject_read_error();
691 if (ret == 0)
692 ret = vfs_link(object->file->f_path.dentry, &nop_mnt_idmap,
693 d_inode(fan), dentry, NULL);
694 if (ret < 0) {
695 trace_cachefiles_vfs_error(object, d_inode(fan), ret,
696 cachefiles_trace_link_error);
697 _debug("link fail %d", ret);
698 } else {
699 trace_cachefiles_link(object, file_inode(object->file));
700 spin_lock(&object->lock);
701 /* TODO: Do we want to switch the file pointer to the new dentry? */
702 clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
703 spin_unlock(&object->lock);
704 success = true;
705 }
706
707 out_end:
708 end_creating(dentry);
709 out:
710 _leave(" = %u", success);
711 return success;
712 }
713
714 /*
715 * Look up an inode to be checked or culled. Return -EBUSY if the inode is
716 * marked in use.
717 */
cachefiles_lookup_for_cull(struct cachefiles_cache * cache,struct dentry * dir,char * filename)718 static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
719 struct dentry *dir,
720 char *filename)
721 {
722 struct dentry *victim;
723 int ret = -ENOENT;
724
725 victim = start_removing(&nop_mnt_idmap, dir, &QSTR(filename));
726
727 if (IS_ERR(victim))
728 goto lookup_error;
729 if (d_inode(victim)->i_flags & S_KERNEL_FILE)
730 goto lookup_busy;
731 return victim;
732
733 lookup_busy:
734 ret = -EBUSY;
735 end_removing(victim);
736 return ERR_PTR(ret);
737
738 lookup_error:
739 ret = PTR_ERR(victim);
740 if (ret == -ENOENT)
741 return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
742
743 if (ret == -EIO) {
744 cachefiles_io_error(cache, "Lookup failed");
745 } else if (ret != -ENOMEM) {
746 pr_err("Internal error: %d\n", ret);
747 ret = -EIO;
748 }
749
750 return ERR_PTR(ret);
751 }
752
753 /*
754 * Cull an object if it's not in use
755 * - called only by cache manager daemon
756 */
cachefiles_cull(struct cachefiles_cache * cache,struct dentry * dir,char * filename)757 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
758 char *filename)
759 {
760 struct dentry *victim;
761 struct inode *inode;
762 int ret;
763
764 _enter(",%pd/,%s", dir, filename);
765
766 victim = cachefiles_lookup_for_cull(cache, dir, filename);
767 if (IS_ERR(victim))
768 return PTR_ERR(victim);
769
770 /* check to see if someone is using this object */
771 inode = d_inode(victim);
772 inode_lock(inode);
773 if (inode->i_flags & S_KERNEL_FILE) {
774 ret = -EBUSY;
775 } else {
776 /* Stop the cache from picking it back up */
777 inode->i_flags |= S_KERNEL_FILE;
778 ret = 0;
779 }
780 inode_unlock(inode);
781 if (ret < 0)
782 goto error_unlock;
783
784 /*
785 * cachefiles_bury_object() expects 2 references to 'victim',
786 * and drops one.
787 */
788 dget(victim);
789 ret = cachefiles_bury_object(cache, NULL, dir, victim,
790 FSCACHE_OBJECT_WAS_CULLED);
791 dput(victim);
792 if (ret < 0)
793 goto error;
794
795 fscache_count_culled();
796 _leave(" = 0");
797 return 0;
798
799 error_unlock:
800 end_removing(victim);
801 error:
802 if (ret == -ENOENT)
803 return -ESTALE; /* Probably got retired by the netfs */
804
805 if (ret != -ENOMEM) {
806 pr_err("Internal error: %d\n", ret);
807 ret = -EIO;
808 }
809
810 _leave(" = %d", ret);
811 return ret;
812 }
813
814 /*
815 * Find out if an object is in use or not
816 * - called only by cache manager daemon
817 * - returns -EBUSY or 0 to indicate whether an object is in use or not
818 */
cachefiles_check_in_use(struct cachefiles_cache * cache,struct dentry * dir,char * filename)819 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
820 char *filename)
821 {
822 struct dentry *victim;
823 int ret = 0;
824
825 victim = cachefiles_lookup_for_cull(cache, dir, filename);
826 if (IS_ERR(victim))
827 return PTR_ERR(victim);
828
829 inode_unlock(d_inode(dir));
830 dput(victim);
831 return ret;
832 }
833