1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2014 Christoph Hellwig.
4  */
5 #include <linux/blkdev.h>
6 #include <linux/kmod.h>
7 #include <linux/file.h>
8 #include <linux/jhash.h>
9 #include <linux/sched.h>
10 #include <linux/sunrpc/addr.h>
11 
12 #include "pnfs.h"
13 #include "netns.h"
14 #include "trace.h"
15 
16 #define NFSDDBG_FACILITY                NFSDDBG_PNFS
17 
18 struct nfs4_layout {
19 	struct list_head		lo_perstate;
20 	struct nfs4_layout_stateid	*lo_state;
21 	struct nfsd4_layout_seg		lo_seg;
22 };
23 
24 static struct kmem_cache *nfs4_layout_cache;
25 static struct kmem_cache *nfs4_layout_stateid_cache;
26 
27 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
28 static const struct lease_manager_operations nfsd4_layouts_lm_ops;
29 
30 const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] =  {
31 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
32 	[LAYOUT_FLEX_FILES]	= &ff_layout_ops,
33 #endif
34 #ifdef CONFIG_NFSD_BLOCKLAYOUT
35 	[LAYOUT_BLOCK_VOLUME]	= &bl_layout_ops,
36 #endif
37 #ifdef CONFIG_NFSD_SCSILAYOUT
38 	[LAYOUT_SCSI]		= &scsi_layout_ops,
39 #endif
40 };
41 
42 /* pNFS device ID to export fsid mapping */
43 #define DEVID_HASH_BITS	8
44 #define DEVID_HASH_SIZE	(1 << DEVID_HASH_BITS)
45 #define DEVID_HASH_MASK	(DEVID_HASH_SIZE - 1)
46 static u64 nfsd_devid_seq = 1;
47 static struct list_head nfsd_devid_hash[DEVID_HASH_SIZE];
48 static DEFINE_SPINLOCK(nfsd_devid_lock);
49 
50 static inline u32 devid_hashfn(u64 idx)
51 {
52 	return jhash_2words(idx, idx >> 32, 0) & DEVID_HASH_MASK;
53 }
54 
55 static void
56 nfsd4_alloc_devid_map(const struct svc_fh *fhp)
57 {
58 	const struct knfsd_fh *fh = &fhp->fh_handle;
59 	size_t fsid_len = key_len(fh->fh_fsid_type);
60 	struct nfsd4_deviceid_map *map, *old;
61 	int i;
62 
63 	map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL);
64 	if (!map)
65 		return;
66 
67 	map->fsid_type = fh->fh_fsid_type;
68 	memcpy(&map->fsid, fh->fh_fsid, fsid_len);
69 
70 	spin_lock(&nfsd_devid_lock);
71 	if (fhp->fh_export->ex_devid_map)
72 		goto out_unlock;
73 
74 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
75 		list_for_each_entry(old, &nfsd_devid_hash[i], hash) {
76 			if (old->fsid_type != fh->fh_fsid_type)
77 				continue;
78 			if (memcmp(old->fsid, fh->fh_fsid,
79 					key_len(old->fsid_type)))
80 				continue;
81 
82 			fhp->fh_export->ex_devid_map = old;
83 			goto out_unlock;
84 		}
85 	}
86 
87 	map->idx = nfsd_devid_seq++;
88 	list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]);
89 	fhp->fh_export->ex_devid_map = map;
90 	map = NULL;
91 
92 out_unlock:
93 	spin_unlock(&nfsd_devid_lock);
94 	kfree(map);
95 }
96 
97 struct nfsd4_deviceid_map *
98 nfsd4_find_devid_map(int idx)
99 {
100 	struct nfsd4_deviceid_map *map, *ret = NULL;
101 
102 	rcu_read_lock();
103 	list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash)
104 		if (map->idx == idx)
105 			ret = map;
106 	rcu_read_unlock();
107 
108 	return ret;
109 }
110 
111 int
112 nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
113 		u32 device_generation)
114 {
115 	if (!fhp->fh_export->ex_devid_map) {
116 		nfsd4_alloc_devid_map(fhp);
117 		if (!fhp->fh_export->ex_devid_map)
118 			return -ENOMEM;
119 	}
120 
121 	id->fsid_idx = fhp->fh_export->ex_devid_map->idx;
122 	id->generation = device_generation;
123 	id->pad = 0;
124 	return 0;
125 }
126 
127 void nfsd4_setup_layout_type(struct svc_export *exp)
128 {
129 #if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
130 	struct super_block *sb = exp->ex_path.mnt->mnt_sb;
131 #endif
132 
133 	if (!(exp->ex_flags & NFSEXP_PNFS))
134 		return;
135 
136 #ifdef CONFIG_NFSD_FLEXFILELAYOUT
137 	exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
138 #endif
139 #ifdef CONFIG_NFSD_BLOCKLAYOUT
140 	if (sb->s_export_op->get_uuid &&
141 	    sb->s_export_op->map_blocks &&
142 	    sb->s_export_op->commit_blocks)
143 		exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
144 #endif
145 #ifdef CONFIG_NFSD_SCSILAYOUT
146 	if (sb->s_export_op->map_blocks &&
147 	    sb->s_export_op->commit_blocks &&
148 	    sb->s_bdev &&
149 	    sb->s_bdev->bd_disk->fops->pr_ops &&
150 	    sb->s_bdev->bd_disk->fops->get_unique_id)
151 		exp->ex_layout_types |= 1 << LAYOUT_SCSI;
152 #endif
153 }
154 
155 void nfsd4_close_layout(struct nfs4_layout_stateid *ls)
156 {
157 	struct nfsd_file *fl;
158 
159 	spin_lock(&ls->ls_stid.sc_file->fi_lock);
160 	fl = ls->ls_file;
161 	ls->ls_file = NULL;
162 	spin_unlock(&ls->ls_stid.sc_file->fi_lock);
163 
164 	if (fl) {
165 		if (!nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
166 			kernel_setlease(fl->nf_file, F_UNLCK, NULL,
167 					(void **)&ls);
168 		nfsd_file_put(fl);
169 	}
170 }
171 
172 static void
173 nfsd4_free_layout_stateid(struct nfs4_stid *stid)
174 {
175 	struct nfs4_layout_stateid *ls = layoutstateid(stid);
176 	struct nfs4_client *clp = ls->ls_stid.sc_client;
177 	struct nfs4_file *fp = ls->ls_stid.sc_file;
178 
179 	trace_nfsd_layoutstate_free(&ls->ls_stid.sc_stateid);
180 
181 	spin_lock(&clp->cl_lock);
182 	list_del_init(&ls->ls_perclnt);
183 	spin_unlock(&clp->cl_lock);
184 
185 	spin_lock(&fp->fi_lock);
186 	list_del_init(&ls->ls_perfile);
187 	spin_unlock(&fp->fi_lock);
188 
189 	nfsd4_close_layout(ls);
190 
191 	if (ls->ls_recalled)
192 		atomic_dec(&ls->ls_stid.sc_file->fi_lo_recalls);
193 
194 	kmem_cache_free(nfs4_layout_stateid_cache, ls);
195 }
196 
197 static int
198 nfsd4_layout_setlease(struct nfs4_layout_stateid *ls)
199 {
200 	struct file_lease *fl;
201 	int status;
202 
203 	if (nfsd4_layout_ops[ls->ls_layout_type]->disable_recalls)
204 		return 0;
205 
206 	fl = locks_alloc_lease();
207 	if (!fl)
208 		return -ENOMEM;
209 	locks_init_lease(fl);
210 	fl->fl_lmops = &nfsd4_layouts_lm_ops;
211 	fl->c.flc_flags = FL_LAYOUT;
212 	fl->c.flc_type = F_RDLCK;
213 	fl->c.flc_owner = ls;
214 	fl->c.flc_pid = current->tgid;
215 	fl->c.flc_file = ls->ls_file->nf_file;
216 
217 	status = kernel_setlease(fl->c.flc_file, fl->c.flc_type, &fl, NULL);
218 	if (status) {
219 		locks_free_lease(fl);
220 		return status;
221 	}
222 	BUG_ON(fl != NULL);
223 	return 0;
224 }
225 
226 static struct nfs4_layout_stateid *
227 nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
228 		struct nfs4_stid *parent, u32 layout_type)
229 {
230 	struct nfs4_client *clp = cstate->clp;
231 	struct nfs4_file *fp = parent->sc_file;
232 	struct nfs4_layout_stateid *ls;
233 	struct nfs4_stid *stp;
234 
235 	stp = nfs4_alloc_stid(cstate->clp, nfs4_layout_stateid_cache,
236 					nfsd4_free_layout_stateid);
237 	if (!stp)
238 		return NULL;
239 
240 	get_nfs4_file(fp);
241 	stp->sc_file = fp;
242 
243 	ls = layoutstateid(stp);
244 	INIT_LIST_HEAD(&ls->ls_perclnt);
245 	INIT_LIST_HEAD(&ls->ls_perfile);
246 	spin_lock_init(&ls->ls_lock);
247 	INIT_LIST_HEAD(&ls->ls_layouts);
248 	mutex_init(&ls->ls_mutex);
249 	ls->ls_layout_type = layout_type;
250 	nfsd4_init_cb(&ls->ls_recall, clp, &nfsd4_cb_layout_ops,
251 			NFSPROC4_CLNT_CB_LAYOUT);
252 
253 	if (parent->sc_type == SC_TYPE_DELEG)
254 		ls->ls_file = nfsd_file_get(fp->fi_deleg_file);
255 	else
256 		ls->ls_file = find_any_file(fp);
257 	BUG_ON(!ls->ls_file);
258 
259 	if (nfsd4_layout_setlease(ls)) {
260 		nfsd_file_put(ls->ls_file);
261 		put_nfs4_file(fp);
262 		kmem_cache_free(nfs4_layout_stateid_cache, ls);
263 		return NULL;
264 	}
265 
266 	spin_lock(&clp->cl_lock);
267 	stp->sc_type = SC_TYPE_LAYOUT;
268 	list_add(&ls->ls_perclnt, &clp->cl_lo_states);
269 	spin_unlock(&clp->cl_lock);
270 
271 	spin_lock(&fp->fi_lock);
272 	list_add(&ls->ls_perfile, &fp->fi_lo_states);
273 	spin_unlock(&fp->fi_lock);
274 
275 	trace_nfsd_layoutstate_alloc(&ls->ls_stid.sc_stateid);
276 	return ls;
277 }
278 
279 __be32
280 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
281 		struct nfsd4_compound_state *cstate, stateid_t *stateid,
282 		bool create, u32 layout_type, struct nfs4_layout_stateid **lsp)
283 {
284 	struct nfs4_layout_stateid *ls;
285 	struct nfs4_stid *stid;
286 	unsigned short typemask = SC_TYPE_LAYOUT;
287 	__be32 status;
288 
289 	if (create)
290 		typemask |= (SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG);
291 
292 	status = nfsd4_lookup_stateid(cstate, stateid, typemask, 0, &stid,
293 			net_generic(SVC_NET(rqstp), nfsd_net_id));
294 	if (status)
295 		goto out;
296 
297 	if (!fh_match(&cstate->current_fh.fh_handle,
298 		      &stid->sc_file->fi_fhandle)) {
299 		status = nfserr_bad_stateid;
300 		goto out_put_stid;
301 	}
302 
303 	if (stid->sc_type != SC_TYPE_LAYOUT) {
304 		ls = nfsd4_alloc_layout_stateid(cstate, stid, layout_type);
305 		nfs4_put_stid(stid);
306 
307 		status = nfserr_jukebox;
308 		if (!ls)
309 			goto out;
310 		mutex_lock(&ls->ls_mutex);
311 	} else {
312 		ls = container_of(stid, struct nfs4_layout_stateid, ls_stid);
313 
314 		status = nfserr_bad_stateid;
315 		mutex_lock(&ls->ls_mutex);
316 		if (nfsd4_stateid_generation_after(stateid, &stid->sc_stateid))
317 			goto out_unlock_stid;
318 		if (layout_type != ls->ls_layout_type)
319 			goto out_unlock_stid;
320 	}
321 
322 	*lsp = ls;
323 	return 0;
324 
325 out_unlock_stid:
326 	mutex_unlock(&ls->ls_mutex);
327 out_put_stid:
328 	nfs4_put_stid(stid);
329 out:
330 	return status;
331 }
332 
333 static void
334 nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
335 {
336 	spin_lock(&ls->ls_lock);
337 	if (ls->ls_recalled)
338 		goto out_unlock;
339 
340 	if (list_empty(&ls->ls_layouts))
341 		goto out_unlock;
342 
343 	ls->ls_recalled = true;
344 	atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
345 	trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
346 
347 	if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ls->ls_recall.cb_flags)) {
348 		refcount_inc(&ls->ls_stid.sc_count);
349 		nfsd4_run_cb(&ls->ls_recall);
350 	}
351 out_unlock:
352 	spin_unlock(&ls->ls_lock);
353 }
354 
355 static inline u64
356 layout_end(struct nfsd4_layout_seg *seg)
357 {
358 	u64 end = seg->offset + seg->length;
359 	return end >= seg->offset ? end : NFS4_MAX_UINT64;
360 }
361 
362 static void
363 layout_update_len(struct nfsd4_layout_seg *lo, u64 end)
364 {
365 	if (end == NFS4_MAX_UINT64)
366 		lo->length = NFS4_MAX_UINT64;
367 	else
368 		lo->length = end - lo->offset;
369 }
370 
371 static bool
372 layouts_overlapping(struct nfs4_layout *lo, struct nfsd4_layout_seg *s)
373 {
374 	if (s->iomode != IOMODE_ANY && s->iomode != lo->lo_seg.iomode)
375 		return false;
376 	if (layout_end(&lo->lo_seg) <= s->offset)
377 		return false;
378 	if (layout_end(s) <= lo->lo_seg.offset)
379 		return false;
380 	return true;
381 }
382 
383 static bool
384 layouts_try_merge(struct nfsd4_layout_seg *lo, struct nfsd4_layout_seg *new)
385 {
386 	if (lo->iomode != new->iomode)
387 		return false;
388 	if (layout_end(new) < lo->offset)
389 		return false;
390 	if (layout_end(lo) < new->offset)
391 		return false;
392 
393 	lo->offset = min(lo->offset, new->offset);
394 	layout_update_len(lo, max(layout_end(lo), layout_end(new)));
395 	return true;
396 }
397 
398 static __be32
399 nfsd4_recall_conflict(struct nfs4_layout_stateid *ls)
400 {
401 	struct nfs4_file *fp = ls->ls_stid.sc_file;
402 	struct nfs4_layout_stateid *l, *n;
403 	__be32 nfserr = nfs_ok;
404 
405 	assert_spin_locked(&fp->fi_lock);
406 
407 	list_for_each_entry_safe(l, n, &fp->fi_lo_states, ls_perfile) {
408 		if (l != ls) {
409 			nfsd4_recall_file_layout(l);
410 			nfserr = nfserr_recallconflict;
411 		}
412 	}
413 
414 	return nfserr;
415 }
416 
417 __be32
418 nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls)
419 {
420 	struct nfsd4_layout_seg *seg = &lgp->lg_seg;
421 	struct nfs4_file *fp = ls->ls_stid.sc_file;
422 	struct nfs4_layout *lp, *new = NULL;
423 	__be32 nfserr;
424 
425 	spin_lock(&fp->fi_lock);
426 	nfserr = nfsd4_recall_conflict(ls);
427 	if (nfserr)
428 		goto out;
429 	spin_lock(&ls->ls_lock);
430 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
431 		if (layouts_try_merge(&lp->lo_seg, seg))
432 			goto done;
433 	}
434 	spin_unlock(&ls->ls_lock);
435 	spin_unlock(&fp->fi_lock);
436 
437 	new = kmem_cache_alloc(nfs4_layout_cache, GFP_KERNEL);
438 	if (!new)
439 		return nfserr_jukebox;
440 	memcpy(&new->lo_seg, seg, sizeof(new->lo_seg));
441 	new->lo_state = ls;
442 
443 	spin_lock(&fp->fi_lock);
444 	nfserr = nfsd4_recall_conflict(ls);
445 	if (nfserr)
446 		goto out;
447 	spin_lock(&ls->ls_lock);
448 	list_for_each_entry(lp, &ls->ls_layouts, lo_perstate) {
449 		if (layouts_try_merge(&lp->lo_seg, seg))
450 			goto done;
451 	}
452 
453 	refcount_inc(&ls->ls_stid.sc_count);
454 	list_add_tail(&new->lo_perstate, &ls->ls_layouts);
455 	new = NULL;
456 done:
457 	nfs4_inc_and_copy_stateid(&lgp->lg_sid, &ls->ls_stid);
458 	spin_unlock(&ls->ls_lock);
459 out:
460 	spin_unlock(&fp->fi_lock);
461 	if (new)
462 		kmem_cache_free(nfs4_layout_cache, new);
463 	return nfserr;
464 }
465 
466 static void
467 nfsd4_free_layouts(struct list_head *reaplist)
468 {
469 	while (!list_empty(reaplist)) {
470 		struct nfs4_layout *lp = list_first_entry(reaplist,
471 				struct nfs4_layout, lo_perstate);
472 
473 		list_del(&lp->lo_perstate);
474 		nfs4_put_stid(&lp->lo_state->ls_stid);
475 		kmem_cache_free(nfs4_layout_cache, lp);
476 	}
477 }
478 
479 static void
480 nfsd4_return_file_layout(struct nfs4_layout *lp, struct nfsd4_layout_seg *seg,
481 		struct list_head *reaplist)
482 {
483 	struct nfsd4_layout_seg *lo = &lp->lo_seg;
484 	u64 end = layout_end(lo);
485 
486 	if (seg->offset <= lo->offset) {
487 		if (layout_end(seg) >= end) {
488 			list_move_tail(&lp->lo_perstate, reaplist);
489 			return;
490 		}
491 		lo->offset = layout_end(seg);
492 	} else {
493 		/* retain the whole layout segment on a split. */
494 		if (layout_end(seg) < end) {
495 			dprintk("%s: split not supported\n", __func__);
496 			return;
497 		}
498 		end = seg->offset;
499 	}
500 
501 	layout_update_len(lo, end);
502 }
503 
504 __be32
505 nfsd4_return_file_layouts(struct svc_rqst *rqstp,
506 		struct nfsd4_compound_state *cstate,
507 		struct nfsd4_layoutreturn *lrp)
508 {
509 	struct nfs4_layout_stateid *ls;
510 	struct nfs4_layout *lp, *n;
511 	LIST_HEAD(reaplist);
512 	__be32 nfserr;
513 	int found = 0;
514 
515 	nfserr = nfsd4_preprocess_layout_stateid(rqstp, cstate, &lrp->lr_sid,
516 						false, lrp->lr_layout_type,
517 						&ls);
518 	if (nfserr) {
519 		trace_nfsd_layout_return_lookup_fail(&lrp->lr_sid);
520 		return nfserr;
521 	}
522 
523 	spin_lock(&ls->ls_lock);
524 	list_for_each_entry_safe(lp, n, &ls->ls_layouts, lo_perstate) {
525 		if (layouts_overlapping(lp, &lrp->lr_seg)) {
526 			nfsd4_return_file_layout(lp, &lrp->lr_seg, &reaplist);
527 			found++;
528 		}
529 	}
530 	if (!list_empty(&ls->ls_layouts)) {
531 		if (found)
532 			nfs4_inc_and_copy_stateid(&lrp->lr_sid, &ls->ls_stid);
533 		lrp->lrs_present = true;
534 	} else {
535 		trace_nfsd_layoutstate_unhash(&ls->ls_stid.sc_stateid);
536 		ls->ls_stid.sc_status |= SC_STATUS_CLOSED;
537 		lrp->lrs_present = false;
538 	}
539 	spin_unlock(&ls->ls_lock);
540 
541 	mutex_unlock(&ls->ls_mutex);
542 	nfs4_put_stid(&ls->ls_stid);
543 	nfsd4_free_layouts(&reaplist);
544 	return nfs_ok;
545 }
546 
547 __be32
548 nfsd4_return_client_layouts(struct svc_rqst *rqstp,
549 		struct nfsd4_compound_state *cstate,
550 		struct nfsd4_layoutreturn *lrp)
551 {
552 	struct nfs4_layout_stateid *ls, *n;
553 	struct nfs4_client *clp = cstate->clp;
554 	struct nfs4_layout *lp, *t;
555 	LIST_HEAD(reaplist);
556 
557 	lrp->lrs_present = false;
558 
559 	spin_lock(&clp->cl_lock);
560 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt) {
561 		if (ls->ls_layout_type != lrp->lr_layout_type)
562 			continue;
563 
564 		if (lrp->lr_return_type == RETURN_FSID &&
565 		    !fh_fsid_match(&ls->ls_stid.sc_file->fi_fhandle,
566 				   &cstate->current_fh.fh_handle))
567 			continue;
568 
569 		spin_lock(&ls->ls_lock);
570 		list_for_each_entry_safe(lp, t, &ls->ls_layouts, lo_perstate) {
571 			if (lrp->lr_seg.iomode == IOMODE_ANY ||
572 			    lrp->lr_seg.iomode == lp->lo_seg.iomode)
573 				list_move_tail(&lp->lo_perstate, &reaplist);
574 		}
575 		spin_unlock(&ls->ls_lock);
576 	}
577 	spin_unlock(&clp->cl_lock);
578 
579 	nfsd4_free_layouts(&reaplist);
580 	return 0;
581 }
582 
583 static void
584 nfsd4_return_all_layouts(struct nfs4_layout_stateid *ls,
585 		struct list_head *reaplist)
586 {
587 	spin_lock(&ls->ls_lock);
588 	list_splice_init(&ls->ls_layouts, reaplist);
589 	spin_unlock(&ls->ls_lock);
590 }
591 
592 void
593 nfsd4_return_all_client_layouts(struct nfs4_client *clp)
594 {
595 	struct nfs4_layout_stateid *ls, *n;
596 	LIST_HEAD(reaplist);
597 
598 	spin_lock(&clp->cl_lock);
599 	list_for_each_entry_safe(ls, n, &clp->cl_lo_states, ls_perclnt)
600 		nfsd4_return_all_layouts(ls, &reaplist);
601 	spin_unlock(&clp->cl_lock);
602 
603 	nfsd4_free_layouts(&reaplist);
604 }
605 
606 void
607 nfsd4_return_all_file_layouts(struct nfs4_client *clp, struct nfs4_file *fp)
608 {
609 	struct nfs4_layout_stateid *ls, *n;
610 	LIST_HEAD(reaplist);
611 
612 	spin_lock(&fp->fi_lock);
613 	list_for_each_entry_safe(ls, n, &fp->fi_lo_states, ls_perfile) {
614 		if (ls->ls_stid.sc_client == clp)
615 			nfsd4_return_all_layouts(ls, &reaplist);
616 	}
617 	spin_unlock(&fp->fi_lock);
618 
619 	nfsd4_free_layouts(&reaplist);
620 }
621 
622 static void
623 nfsd4_cb_layout_fail(struct nfs4_layout_stateid *ls, struct nfsd_file *file)
624 {
625 	struct nfs4_client *clp = ls->ls_stid.sc_client;
626 	char addr_str[INET6_ADDRSTRLEN];
627 	static char const nfsd_recall_failed[] = "/sbin/nfsd-recall-failed";
628 	static char *envp[] = {
629 		"HOME=/",
630 		"TERM=linux",
631 		"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
632 		NULL
633 	};
634 	char *argv[8];
635 	int error;
636 
637 	rpc_ntop((struct sockaddr *)&clp->cl_addr, addr_str, sizeof(addr_str));
638 
639 	printk(KERN_WARNING
640 		"nfsd: client %s failed to respond to layout recall. "
641 		"  Fencing..\n", addr_str);
642 
643 	argv[0] = (char *)nfsd_recall_failed;
644 	argv[1] = addr_str;
645 	argv[2] = file->nf_file->f_path.mnt->mnt_sb->s_id;
646 	argv[3] = NULL;
647 
648 	error = call_usermodehelper(nfsd_recall_failed, argv, envp,
649 				    UMH_WAIT_PROC);
650 	if (error) {
651 		printk(KERN_ERR "nfsd: fence failed for client %s: %d!\n",
652 			addr_str, error);
653 	}
654 }
655 
656 static void
657 nfsd4_cb_layout_prepare(struct nfsd4_callback *cb)
658 {
659 	struct nfs4_layout_stateid *ls =
660 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
661 
662 	mutex_lock(&ls->ls_mutex);
663 	nfs4_inc_and_copy_stateid(&ls->ls_recall_sid, &ls->ls_stid);
664 	mutex_unlock(&ls->ls_mutex);
665 }
666 
667 static int
668 nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task)
669 {
670 	struct nfs4_layout_stateid *ls =
671 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
672 	struct nfsd_net *nn;
673 	ktime_t now, cutoff;
674 	const struct nfsd4_layout_ops *ops;
675 	struct nfsd_file *fl;
676 
677 	trace_nfsd_cb_layout_done(&ls->ls_stid.sc_stateid, task);
678 	switch (task->tk_status) {
679 	case 0:
680 	case -NFS4ERR_DELAY:
681 		/*
682 		 * Anything left? If not, then call it done. Note that we don't
683 		 * take the spinlock since this is an optimization and nothing
684 		 * should get added until the cb counter goes to zero.
685 		 */
686 		if (list_empty(&ls->ls_layouts))
687 			return 1;
688 
689 		/* Poll the client until it's done with the layout */
690 		now = ktime_get();
691 		nn = net_generic(ls->ls_stid.sc_client->net, nfsd_net_id);
692 
693 		/* Client gets 2 lease periods to return it */
694 		cutoff = ktime_add_ns(task->tk_start,
695 					 (u64)nn->nfsd4_lease * NSEC_PER_SEC * 2);
696 
697 		if (ktime_before(now, cutoff)) {
698 			rpc_delay(task, HZ/100); /* 10 mili-seconds */
699 			return 0;
700 		}
701 		fallthrough;
702 	default:
703 		/*
704 		 * Unknown error or non-responding client, we'll need to fence.
705 		 */
706 		trace_nfsd_layout_recall_fail(&ls->ls_stid.sc_stateid);
707 		rcu_read_lock();
708 		fl = nfsd_file_get(ls->ls_file);
709 		rcu_read_unlock();
710 		if (fl) {
711 			ops = nfsd4_layout_ops[ls->ls_layout_type];
712 			if (ops->fence_client)
713 				ops->fence_client(ls, fl);
714 			else
715 				nfsd4_cb_layout_fail(ls, fl);
716 			nfsd_file_put(fl);
717 		}
718 		return 1;
719 	case -NFS4ERR_NOMATCHING_LAYOUT:
720 		trace_nfsd_layout_recall_done(&ls->ls_stid.sc_stateid);
721 		task->tk_status = 0;
722 		return 1;
723 	}
724 }
725 
726 static void
727 nfsd4_cb_layout_release(struct nfsd4_callback *cb)
728 {
729 	struct nfs4_layout_stateid *ls =
730 		container_of(cb, struct nfs4_layout_stateid, ls_recall);
731 	LIST_HEAD(reaplist);
732 
733 	trace_nfsd_layout_recall_release(&ls->ls_stid.sc_stateid);
734 
735 	nfsd4_return_all_layouts(ls, &reaplist);
736 	nfsd4_free_layouts(&reaplist);
737 	nfs4_put_stid(&ls->ls_stid);
738 }
739 
740 static const struct nfsd4_callback_ops nfsd4_cb_layout_ops = {
741 	.prepare	= nfsd4_cb_layout_prepare,
742 	.done		= nfsd4_cb_layout_done,
743 	.release	= nfsd4_cb_layout_release,
744 	.opcode		= OP_CB_LAYOUTRECALL,
745 };
746 
747 static bool
748 nfsd4_layout_lm_break(struct file_lease *fl)
749 {
750 	/*
751 	 * We don't want the locks code to timeout the lease for us;
752 	 * we'll remove it ourself if a layout isn't returned
753 	 * in time:
754 	 */
755 	fl->fl_break_time = 0;
756 	nfsd4_recall_file_layout(fl->c.flc_owner);
757 	return false;
758 }
759 
760 static int
761 nfsd4_layout_lm_change(struct file_lease *onlist, int arg,
762 		struct list_head *dispose)
763 {
764 	BUG_ON(!(arg & F_UNLCK));
765 	return lease_modify(onlist, arg, dispose);
766 }
767 
768 static const struct lease_manager_operations nfsd4_layouts_lm_ops = {
769 	.lm_break	= nfsd4_layout_lm_break,
770 	.lm_change	= nfsd4_layout_lm_change,
771 };
772 
773 int
774 nfsd4_init_pnfs(void)
775 {
776 	int i;
777 
778 	for (i = 0; i < DEVID_HASH_SIZE; i++)
779 		INIT_LIST_HEAD(&nfsd_devid_hash[i]);
780 
781 	nfs4_layout_cache = KMEM_CACHE(nfs4_layout, 0);
782 	if (!nfs4_layout_cache)
783 		return -ENOMEM;
784 
785 	nfs4_layout_stateid_cache = KMEM_CACHE(nfs4_layout_stateid, 0);
786 	if (!nfs4_layout_stateid_cache) {
787 		kmem_cache_destroy(nfs4_layout_cache);
788 		return -ENOMEM;
789 	}
790 	return 0;
791 }
792 
793 void
794 nfsd4_exit_pnfs(void)
795 {
796 	int i;
797 
798 	kmem_cache_destroy(nfs4_layout_cache);
799 	kmem_cache_destroy(nfs4_layout_stateid_cache);
800 
801 	for (i = 0; i < DEVID_HASH_SIZE; i++) {
802 		struct nfsd4_deviceid_map *map, *n;
803 
804 		list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash)
805 			kfree(map);
806 	}
807 }
808