1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Module for pnfs flexfile layout driver.
4  *
5  * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
6  *
7  * Tao Peng <bergwolf@primarydata.com>
8  */
9 
10 #include <linux/nfs_fs.h>
11 #include <linux/nfs_mount.h>
12 #include <linux/nfs_page.h>
13 #include <linux/module.h>
14 #include <linux/file.h>
15 #include <linux/sched/mm.h>
16 
17 #include <linux/sunrpc/metrics.h>
18 
19 #include "flexfilelayout.h"
20 #include "../nfs4session.h"
21 #include "../nfs4idmap.h"
22 #include "../internal.h"
23 #include "../delegation.h"
24 #include "../nfs4trace.h"
25 #include "../iostat.h"
26 #include "../nfs.h"
27 #include "../nfs42.h"
28 
29 #define NFSDBG_FACILITY         NFSDBG_PNFS_LD
30 
31 #define FF_LAYOUT_POLL_RETRY_MAX     (15*HZ)
32 #define FF_LAYOUTRETURN_MAXERR 20
33 
34 enum nfs4_ff_op_type {
35 	NFS4_FF_OP_LAYOUTSTATS,
36 	NFS4_FF_OP_LAYOUTRETURN,
37 };
38 
39 static unsigned short io_maxretrans;
40 
41 static const struct pnfs_commit_ops ff_layout_commit_ops;
42 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
43 		struct nfs_pgio_header *hdr);
44 static int
45 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
46 			       struct nfs42_layoutstat_devinfo *devinfo,
47 			       int dev_limit, enum nfs4_ff_op_type type);
48 static void ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
49 			      const struct nfs42_layoutstat_devinfo *devinfo,
50 			      struct nfs4_ff_layout_mirror *mirror);
51 
52 static struct pnfs_layout_hdr *
ff_layout_alloc_layout_hdr(struct inode * inode,gfp_t gfp_flags)53 ff_layout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
54 {
55 	struct nfs4_flexfile_layout *ffl;
56 
57 	ffl = kzalloc(sizeof(*ffl), gfp_flags);
58 	if (ffl) {
59 		pnfs_init_ds_commit_info(&ffl->commit_info);
60 		INIT_LIST_HEAD(&ffl->error_list);
61 		INIT_LIST_HEAD(&ffl->mirrors);
62 		ffl->last_report_time = ktime_get();
63 		ffl->commit_info.ops = &ff_layout_commit_ops;
64 		return &ffl->generic_hdr;
65 	} else
66 		return NULL;
67 }
68 
69 static void
ff_layout_free_layout_hdr(struct pnfs_layout_hdr * lo)70 ff_layout_free_layout_hdr(struct pnfs_layout_hdr *lo)
71 {
72 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(lo);
73 	struct nfs4_ff_layout_ds_err *err, *n;
74 
75 	list_for_each_entry_safe(err, n, &ffl->error_list, list) {
76 		list_del(&err->list);
77 		kfree(err);
78 	}
79 	kfree_rcu(ffl, generic_hdr.plh_rcu);
80 }
81 
decode_pnfs_stateid(struct xdr_stream * xdr,nfs4_stateid * stateid)82 static int decode_pnfs_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
83 {
84 	__be32 *p;
85 
86 	p = xdr_inline_decode(xdr, NFS4_STATEID_SIZE);
87 	if (unlikely(p == NULL))
88 		return -ENOBUFS;
89 	stateid->type = NFS4_PNFS_DS_STATEID_TYPE;
90 	memcpy(stateid->data, p, NFS4_STATEID_SIZE);
91 	dprintk("%s: stateid id= [%x%x%x%x]\n", __func__,
92 		p[0], p[1], p[2], p[3]);
93 	return 0;
94 }
95 
decode_deviceid(struct xdr_stream * xdr,struct nfs4_deviceid * devid)96 static int decode_deviceid(struct xdr_stream *xdr, struct nfs4_deviceid *devid)
97 {
98 	__be32 *p;
99 
100 	p = xdr_inline_decode(xdr, NFS4_DEVICEID4_SIZE);
101 	if (unlikely(!p))
102 		return -ENOBUFS;
103 	memcpy(devid, p, NFS4_DEVICEID4_SIZE);
104 	nfs4_print_deviceid(devid);
105 	return 0;
106 }
107 
decode_nfs_fh(struct xdr_stream * xdr,struct nfs_fh * fh)108 static int decode_nfs_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
109 {
110 	__be32 *p;
111 
112 	p = xdr_inline_decode(xdr, 4);
113 	if (unlikely(!p))
114 		return -ENOBUFS;
115 	fh->size = be32_to_cpup(p++);
116 	if (fh->size > NFS_MAXFHSIZE) {
117 		printk(KERN_ERR "NFS flexfiles: Too big fh received %d\n",
118 		       fh->size);
119 		return -EOVERFLOW;
120 	}
121 	/* fh.data */
122 	p = xdr_inline_decode(xdr, fh->size);
123 	if (unlikely(!p))
124 		return -ENOBUFS;
125 	memcpy(&fh->data, p, fh->size);
126 	dprintk("%s: fh len %d\n", __func__, fh->size);
127 
128 	return 0;
129 }
130 
131 /*
132  * Currently only stringified uids and gids are accepted.
133  * I.e., kerberos is not supported to the DSes, so no pricipals.
134  *
135  * That means that one common function will suffice, but when
136  * principals are added, this should be split to accomodate
137  * calls to both nfs_map_name_to_uid() and nfs_map_group_to_gid().
138  */
139 static int
decode_name(struct xdr_stream * xdr,u32 * id)140 decode_name(struct xdr_stream *xdr, u32 *id)
141 {
142 	__be32 *p;
143 	int len;
144 
145 	/* opaque_length(4)*/
146 	p = xdr_inline_decode(xdr, 4);
147 	if (unlikely(!p))
148 		return -ENOBUFS;
149 	len = be32_to_cpup(p++);
150 	if (len < 0)
151 		return -EINVAL;
152 
153 	dprintk("%s: len %u\n", __func__, len);
154 
155 	/* opaque body */
156 	p = xdr_inline_decode(xdr, len);
157 	if (unlikely(!p))
158 		return -ENOBUFS;
159 
160 	if (!nfs_map_string_to_numeric((char *)p, len, id))
161 		return -EINVAL;
162 
163 	return 0;
164 }
165 
166 static struct nfsd_file *
ff_local_open_fh(struct pnfs_layout_segment * lseg,u32 ds_idx,struct nfs_client * clp,const struct cred * cred,struct nfs_fh * fh,fmode_t mode)167 ff_local_open_fh(struct pnfs_layout_segment *lseg, u32 ds_idx,
168 		 struct nfs_client *clp, const struct cred *cred,
169 		 struct nfs_fh *fh, fmode_t mode)
170 {
171 #if IS_ENABLED(CONFIG_NFS_LOCALIO)
172 	struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
173 
174 	return nfs_local_open_fh(clp, cred, fh, &mirror->nfl, mode);
175 #else
176 	return NULL;
177 #endif
178 }
179 
ff_mirror_match_fh(const struct nfs4_ff_layout_mirror * m1,const struct nfs4_ff_layout_mirror * m2)180 static bool ff_mirror_match_fh(const struct nfs4_ff_layout_mirror *m1,
181 		const struct nfs4_ff_layout_mirror *m2)
182 {
183 	int i, j;
184 
185 	if (m1->fh_versions_cnt != m2->fh_versions_cnt)
186 		return false;
187 	for (i = 0; i < m1->fh_versions_cnt; i++) {
188 		bool found_fh = false;
189 		for (j = 0; j < m2->fh_versions_cnt; j++) {
190 			if (nfs_compare_fh(&m1->fh_versions[i],
191 					&m2->fh_versions[j]) == 0) {
192 				found_fh = true;
193 				break;
194 			}
195 		}
196 		if (!found_fh)
197 			return false;
198 	}
199 	return true;
200 }
201 
202 static struct nfs4_ff_layout_mirror *
ff_layout_add_mirror(struct pnfs_layout_hdr * lo,struct nfs4_ff_layout_mirror * mirror)203 ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
204 		struct nfs4_ff_layout_mirror *mirror)
205 {
206 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
207 	struct nfs4_ff_layout_mirror *pos;
208 	struct inode *inode = lo->plh_inode;
209 
210 	spin_lock(&inode->i_lock);
211 	list_for_each_entry(pos, &ff_layout->mirrors, mirrors) {
212 		if (memcmp(&mirror->devid, &pos->devid, sizeof(pos->devid)) != 0)
213 			continue;
214 		if (!ff_mirror_match_fh(mirror, pos))
215 			continue;
216 		if (refcount_inc_not_zero(&pos->ref)) {
217 			spin_unlock(&inode->i_lock);
218 			return pos;
219 		}
220 	}
221 	list_add(&mirror->mirrors, &ff_layout->mirrors);
222 	mirror->layout = lo;
223 	spin_unlock(&inode->i_lock);
224 	return mirror;
225 }
226 
227 static void
ff_layout_remove_mirror(struct nfs4_ff_layout_mirror * mirror)228 ff_layout_remove_mirror(struct nfs4_ff_layout_mirror *mirror)
229 {
230 	struct inode *inode;
231 	if (mirror->layout == NULL)
232 		return;
233 	inode = mirror->layout->plh_inode;
234 	spin_lock(&inode->i_lock);
235 	list_del(&mirror->mirrors);
236 	spin_unlock(&inode->i_lock);
237 	mirror->layout = NULL;
238 }
239 
ff_layout_alloc_mirror(gfp_t gfp_flags)240 static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
241 {
242 	struct nfs4_ff_layout_mirror *mirror;
243 
244 	mirror = kzalloc(sizeof(*mirror), gfp_flags);
245 	if (mirror != NULL) {
246 		spin_lock_init(&mirror->lock);
247 		refcount_set(&mirror->ref, 1);
248 		INIT_LIST_HEAD(&mirror->mirrors);
249 		nfs_localio_file_init(&mirror->nfl);
250 	}
251 	return mirror;
252 }
253 
ff_layout_free_mirror(struct nfs4_ff_layout_mirror * mirror)254 static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
255 {
256 	const struct cred *cred;
257 
258 	ff_layout_remove_mirror(mirror);
259 	kfree(mirror->fh_versions);
260 	nfs_close_local_fh(&mirror->nfl);
261 	cred = rcu_access_pointer(mirror->ro_cred);
262 	put_cred(cred);
263 	cred = rcu_access_pointer(mirror->rw_cred);
264 	put_cred(cred);
265 	nfs4_ff_layout_put_deviceid(mirror->mirror_ds);
266 	kfree(mirror);
267 }
268 
ff_layout_put_mirror(struct nfs4_ff_layout_mirror * mirror)269 static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
270 {
271 	if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
272 		ff_layout_free_mirror(mirror);
273 }
274 
ff_layout_free_mirror_array(struct nfs4_ff_layout_segment * fls)275 static void ff_layout_free_mirror_array(struct nfs4_ff_layout_segment *fls)
276 {
277 	u32 i;
278 
279 	for (i = 0; i < fls->mirror_array_cnt; i++)
280 		ff_layout_put_mirror(fls->mirror_array[i]);
281 }
282 
_ff_layout_free_lseg(struct nfs4_ff_layout_segment * fls)283 static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls)
284 {
285 	if (fls) {
286 		ff_layout_free_mirror_array(fls);
287 		kfree(fls);
288 	}
289 }
290 
291 static bool
ff_lseg_match_mirrors(struct pnfs_layout_segment * l1,struct pnfs_layout_segment * l2)292 ff_lseg_match_mirrors(struct pnfs_layout_segment *l1,
293 		struct pnfs_layout_segment *l2)
294 {
295 	const struct nfs4_ff_layout_segment *fl1 = FF_LAYOUT_LSEG(l1);
296 	const struct nfs4_ff_layout_segment *fl2 = FF_LAYOUT_LSEG(l1);
297 	u32 i;
298 
299 	if (fl1->mirror_array_cnt != fl2->mirror_array_cnt)
300 		return false;
301 	for (i = 0; i < fl1->mirror_array_cnt; i++) {
302 		if (fl1->mirror_array[i] != fl2->mirror_array[i])
303 			return false;
304 	}
305 	return true;
306 }
307 
308 static bool
ff_lseg_range_is_after(const struct pnfs_layout_range * l1,const struct pnfs_layout_range * l2)309 ff_lseg_range_is_after(const struct pnfs_layout_range *l1,
310 		const struct pnfs_layout_range *l2)
311 {
312 	u64 end1, end2;
313 
314 	if (l1->iomode != l2->iomode)
315 		return l1->iomode != IOMODE_READ;
316 	end1 = pnfs_calc_offset_end(l1->offset, l1->length);
317 	end2 = pnfs_calc_offset_end(l2->offset, l2->length);
318 	if (end1 < l2->offset)
319 		return false;
320 	if (end2 < l1->offset)
321 		return true;
322 	return l2->offset <= l1->offset;
323 }
324 
325 static bool
ff_lseg_merge(struct pnfs_layout_segment * new,struct pnfs_layout_segment * old)326 ff_lseg_merge(struct pnfs_layout_segment *new,
327 		struct pnfs_layout_segment *old)
328 {
329 	u64 new_end, old_end;
330 
331 	if (test_bit(NFS_LSEG_LAYOUTRETURN, &old->pls_flags))
332 		return false;
333 	if (new->pls_range.iomode != old->pls_range.iomode)
334 		return false;
335 	old_end = pnfs_calc_offset_end(old->pls_range.offset,
336 			old->pls_range.length);
337 	if (old_end < new->pls_range.offset)
338 		return false;
339 	new_end = pnfs_calc_offset_end(new->pls_range.offset,
340 			new->pls_range.length);
341 	if (new_end < old->pls_range.offset)
342 		return false;
343 	if (!ff_lseg_match_mirrors(new, old))
344 		return false;
345 
346 	/* Mergeable: copy info from 'old' to 'new' */
347 	if (new_end < old_end)
348 		new_end = old_end;
349 	if (new->pls_range.offset < old->pls_range.offset)
350 		new->pls_range.offset = old->pls_range.offset;
351 	new->pls_range.length = pnfs_calc_offset_length(new->pls_range.offset,
352 			new_end);
353 	if (test_bit(NFS_LSEG_ROC, &old->pls_flags))
354 		set_bit(NFS_LSEG_ROC, &new->pls_flags);
355 	return true;
356 }
357 
358 static void
ff_layout_add_lseg(struct pnfs_layout_hdr * lo,struct pnfs_layout_segment * lseg,struct list_head * free_me)359 ff_layout_add_lseg(struct pnfs_layout_hdr *lo,
360 		struct pnfs_layout_segment *lseg,
361 		struct list_head *free_me)
362 {
363 	pnfs_generic_layout_insert_lseg(lo, lseg,
364 			ff_lseg_range_is_after,
365 			ff_lseg_merge,
366 			free_me);
367 }
368 
ff_layout_sort_mirrors(struct nfs4_ff_layout_segment * fls)369 static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls)
370 {
371 	int i, j;
372 
373 	for (i = 0; i < fls->mirror_array_cnt - 1; i++) {
374 		for (j = i + 1; j < fls->mirror_array_cnt; j++)
375 			if (fls->mirror_array[i]->efficiency <
376 			    fls->mirror_array[j]->efficiency)
377 				swap(fls->mirror_array[i],
378 				     fls->mirror_array[j]);
379 	}
380 }
381 
382 static struct pnfs_layout_segment *
ff_layout_alloc_lseg(struct pnfs_layout_hdr * lh,struct nfs4_layoutget_res * lgr,gfp_t gfp_flags)383 ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh,
384 		     struct nfs4_layoutget_res *lgr,
385 		     gfp_t gfp_flags)
386 {
387 	struct pnfs_layout_segment *ret;
388 	struct nfs4_ff_layout_segment *fls = NULL;
389 	struct xdr_stream stream;
390 	struct xdr_buf buf;
391 	struct page *scratch;
392 	u64 stripe_unit;
393 	u32 mirror_array_cnt;
394 	__be32 *p;
395 	int i, rc;
396 
397 	dprintk("--> %s\n", __func__);
398 	scratch = alloc_page(gfp_flags);
399 	if (!scratch)
400 		return ERR_PTR(-ENOMEM);
401 
402 	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages,
403 			      lgr->layoutp->len);
404 	xdr_set_scratch_page(&stream, scratch);
405 
406 	/* stripe unit and mirror_array_cnt */
407 	rc = -EIO;
408 	p = xdr_inline_decode(&stream, 8 + 4);
409 	if (!p)
410 		goto out_err_free;
411 
412 	p = xdr_decode_hyper(p, &stripe_unit);
413 	mirror_array_cnt = be32_to_cpup(p++);
414 	dprintk("%s: stripe_unit=%llu mirror_array_cnt=%u\n", __func__,
415 		stripe_unit, mirror_array_cnt);
416 
417 	if (mirror_array_cnt > NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT ||
418 	    mirror_array_cnt == 0)
419 		goto out_err_free;
420 
421 	rc = -ENOMEM;
422 	fls = kzalloc(struct_size(fls, mirror_array, mirror_array_cnt),
423 			gfp_flags);
424 	if (!fls)
425 		goto out_err_free;
426 
427 	fls->mirror_array_cnt = mirror_array_cnt;
428 	fls->stripe_unit = stripe_unit;
429 
430 	for (i = 0; i < fls->mirror_array_cnt; i++) {
431 		struct nfs4_ff_layout_mirror *mirror;
432 		struct cred *kcred;
433 		const struct cred __rcu *cred;
434 		kuid_t uid;
435 		kgid_t gid;
436 		u32 ds_count, fh_count, id;
437 		int j;
438 
439 		rc = -EIO;
440 		p = xdr_inline_decode(&stream, 4);
441 		if (!p)
442 			goto out_err_free;
443 		ds_count = be32_to_cpup(p);
444 
445 		/* FIXME: allow for striping? */
446 		if (ds_count != 1)
447 			goto out_err_free;
448 
449 		fls->mirror_array[i] = ff_layout_alloc_mirror(gfp_flags);
450 		if (fls->mirror_array[i] == NULL) {
451 			rc = -ENOMEM;
452 			goto out_err_free;
453 		}
454 
455 		fls->mirror_array[i]->ds_count = ds_count;
456 
457 		/* deviceid */
458 		rc = decode_deviceid(&stream, &fls->mirror_array[i]->devid);
459 		if (rc)
460 			goto out_err_free;
461 
462 		/* efficiency */
463 		rc = -EIO;
464 		p = xdr_inline_decode(&stream, 4);
465 		if (!p)
466 			goto out_err_free;
467 		fls->mirror_array[i]->efficiency = be32_to_cpup(p);
468 
469 		/* stateid */
470 		rc = decode_pnfs_stateid(&stream, &fls->mirror_array[i]->stateid);
471 		if (rc)
472 			goto out_err_free;
473 
474 		/* fh */
475 		rc = -EIO;
476 		p = xdr_inline_decode(&stream, 4);
477 		if (!p)
478 			goto out_err_free;
479 		fh_count = be32_to_cpup(p);
480 
481 		fls->mirror_array[i]->fh_versions =
482 			kcalloc(fh_count, sizeof(struct nfs_fh),
483 				gfp_flags);
484 		if (fls->mirror_array[i]->fh_versions == NULL) {
485 			rc = -ENOMEM;
486 			goto out_err_free;
487 		}
488 
489 		for (j = 0; j < fh_count; j++) {
490 			rc = decode_nfs_fh(&stream,
491 					   &fls->mirror_array[i]->fh_versions[j]);
492 			if (rc)
493 				goto out_err_free;
494 		}
495 
496 		fls->mirror_array[i]->fh_versions_cnt = fh_count;
497 
498 		/* user */
499 		rc = decode_name(&stream, &id);
500 		if (rc)
501 			goto out_err_free;
502 
503 		uid = make_kuid(&init_user_ns, id);
504 
505 		/* group */
506 		rc = decode_name(&stream, &id);
507 		if (rc)
508 			goto out_err_free;
509 
510 		gid = make_kgid(&init_user_ns, id);
511 
512 		if (gfp_flags & __GFP_FS)
513 			kcred = prepare_kernel_cred(&init_task);
514 		else {
515 			unsigned int nofs_flags = memalloc_nofs_save();
516 			kcred = prepare_kernel_cred(&init_task);
517 			memalloc_nofs_restore(nofs_flags);
518 		}
519 		rc = -ENOMEM;
520 		if (!kcred)
521 			goto out_err_free;
522 		kcred->fsuid = uid;
523 		kcred->fsgid = gid;
524 		cred = RCU_INITIALIZER(kcred);
525 
526 		if (lgr->range.iomode == IOMODE_READ)
527 			rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
528 		else
529 			rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
530 
531 		mirror = ff_layout_add_mirror(lh, fls->mirror_array[i]);
532 		if (mirror != fls->mirror_array[i]) {
533 			/* swap cred ptrs so free_mirror will clean up old */
534 			if (lgr->range.iomode == IOMODE_READ) {
535 				cred = xchg(&mirror->ro_cred, cred);
536 				rcu_assign_pointer(fls->mirror_array[i]->ro_cred, cred);
537 			} else {
538 				cred = xchg(&mirror->rw_cred, cred);
539 				rcu_assign_pointer(fls->mirror_array[i]->rw_cred, cred);
540 			}
541 			ff_layout_free_mirror(fls->mirror_array[i]);
542 			fls->mirror_array[i] = mirror;
543 		}
544 
545 		dprintk("%s: iomode %s uid %u gid %u\n", __func__,
546 			lgr->range.iomode == IOMODE_READ ? "READ" : "RW",
547 			from_kuid(&init_user_ns, uid),
548 			from_kgid(&init_user_ns, gid));
549 	}
550 
551 	p = xdr_inline_decode(&stream, 4);
552 	if (!p)
553 		goto out_sort_mirrors;
554 	fls->flags = be32_to_cpup(p);
555 
556 	p = xdr_inline_decode(&stream, 4);
557 	if (!p)
558 		goto out_sort_mirrors;
559 	for (i=0; i < fls->mirror_array_cnt; i++)
560 		fls->mirror_array[i]->report_interval = be32_to_cpup(p);
561 
562 out_sort_mirrors:
563 	ff_layout_sort_mirrors(fls);
564 	ret = &fls->generic_hdr;
565 	dprintk("<-- %s (success)\n", __func__);
566 out_free_page:
567 	__free_page(scratch);
568 	return ret;
569 out_err_free:
570 	_ff_layout_free_lseg(fls);
571 	ret = ERR_PTR(rc);
572 	dprintk("<-- %s (%d)\n", __func__, rc);
573 	goto out_free_page;
574 }
575 
576 static void
ff_layout_free_lseg(struct pnfs_layout_segment * lseg)577 ff_layout_free_lseg(struct pnfs_layout_segment *lseg)
578 {
579 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
580 
581 	dprintk("--> %s\n", __func__);
582 
583 	if (lseg->pls_range.iomode == IOMODE_RW) {
584 		struct nfs4_flexfile_layout *ffl;
585 		struct inode *inode;
586 
587 		ffl = FF_LAYOUT_FROM_HDR(lseg->pls_layout);
588 		inode = ffl->generic_hdr.plh_inode;
589 		spin_lock(&inode->i_lock);
590 		pnfs_generic_ds_cinfo_release_lseg(&ffl->commit_info, lseg);
591 		spin_unlock(&inode->i_lock);
592 	}
593 	_ff_layout_free_lseg(fls);
594 }
595 
596 static void
nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)597 nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
598 {
599 	/* first IO request? */
600 	if (atomic_inc_return(&timer->n_ops) == 1) {
601 		timer->start_time = now;
602 	}
603 }
604 
605 static ktime_t
nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer * timer,ktime_t now)606 nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer, ktime_t now)
607 {
608 	ktime_t start;
609 
610 	if (atomic_dec_return(&timer->n_ops) < 0)
611 		WARN_ON_ONCE(1);
612 
613 	start = timer->start_time;
614 	timer->start_time = now;
615 	return ktime_sub(now, start);
616 }
617 
618 static bool
nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror * mirror,struct nfs4_ff_layoutstat * layoutstat,ktime_t now)619 nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror,
620 			    struct nfs4_ff_layoutstat *layoutstat,
621 			    ktime_t now)
622 {
623 	s64 report_interval = FF_LAYOUTSTATS_REPORT_INTERVAL;
624 	struct nfs4_flexfile_layout *ffl = FF_LAYOUT_FROM_HDR(mirror->layout);
625 
626 	nfs4_ff_start_busy_timer(&layoutstat->busy_timer, now);
627 	if (!mirror->start_time)
628 		mirror->start_time = now;
629 	if (mirror->report_interval != 0)
630 		report_interval = (s64)mirror->report_interval * 1000LL;
631 	else if (layoutstats_timer != 0)
632 		report_interval = (s64)layoutstats_timer * 1000LL;
633 	if (ktime_to_ms(ktime_sub(now, ffl->last_report_time)) >=
634 			report_interval) {
635 		ffl->last_report_time = now;
636 		return true;
637 	}
638 
639 	return false;
640 }
641 
642 static void
nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat * layoutstat,__u64 requested)643 nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat,
644 		__u64 requested)
645 {
646 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
647 
648 	iostat->ops_requested++;
649 	iostat->bytes_requested += requested;
650 }
651 
652 static void
nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat * layoutstat,__u64 requested,__u64 completed,ktime_t time_completed,ktime_t time_started)653 nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat,
654 		__u64 requested,
655 		__u64 completed,
656 		ktime_t time_completed,
657 		ktime_t time_started)
658 {
659 	struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat;
660 	ktime_t completion_time = ktime_sub(time_completed, time_started);
661 	ktime_t timer;
662 
663 	iostat->ops_completed++;
664 	iostat->bytes_completed += completed;
665 	iostat->bytes_not_delivered += requested - completed;
666 
667 	timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer, time_completed);
668 	iostat->total_busy_time =
669 			ktime_add(iostat->total_busy_time, timer);
670 	iostat->aggregate_completion_time =
671 			ktime_add(iostat->aggregate_completion_time,
672 					completion_time);
673 }
674 
675 static void
nfs4_ff_layout_stat_io_start_read(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)676 nfs4_ff_layout_stat_io_start_read(struct inode *inode,
677 		struct nfs4_ff_layout_mirror *mirror,
678 		__u64 requested, ktime_t now)
679 {
680 	bool report;
681 
682 	spin_lock(&mirror->lock);
683 	report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat, now);
684 	nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested);
685 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
686 	spin_unlock(&mirror->lock);
687 
688 	if (report)
689 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
690 }
691 
692 static void
nfs4_ff_layout_stat_io_end_read(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed)693 nfs4_ff_layout_stat_io_end_read(struct rpc_task *task,
694 		struct nfs4_ff_layout_mirror *mirror,
695 		__u64 requested,
696 		__u64 completed)
697 {
698 	spin_lock(&mirror->lock);
699 	nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat,
700 			requested, completed,
701 			ktime_get(), task->tk_start);
702 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
703 	spin_unlock(&mirror->lock);
704 }
705 
706 static void
nfs4_ff_layout_stat_io_start_write(struct inode * inode,struct nfs4_ff_layout_mirror * mirror,__u64 requested,ktime_t now)707 nfs4_ff_layout_stat_io_start_write(struct inode *inode,
708 		struct nfs4_ff_layout_mirror *mirror,
709 		__u64 requested, ktime_t now)
710 {
711 	bool report;
712 
713 	spin_lock(&mirror->lock);
714 	report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat, now);
715 	nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested);
716 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
717 	spin_unlock(&mirror->lock);
718 
719 	if (report)
720 		pnfs_report_layoutstat(inode, nfs_io_gfp_mask());
721 }
722 
723 static void
nfs4_ff_layout_stat_io_end_write(struct rpc_task * task,struct nfs4_ff_layout_mirror * mirror,__u64 requested,__u64 completed,enum nfs3_stable_how committed)724 nfs4_ff_layout_stat_io_end_write(struct rpc_task *task,
725 		struct nfs4_ff_layout_mirror *mirror,
726 		__u64 requested,
727 		__u64 completed,
728 		enum nfs3_stable_how committed)
729 {
730 	if (committed == NFS_UNSTABLE)
731 		requested = completed = 0;
732 
733 	spin_lock(&mirror->lock);
734 	nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat,
735 			requested, completed, ktime_get(), task->tk_start);
736 	set_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags);
737 	spin_unlock(&mirror->lock);
738 }
739 
740 static void
ff_layout_mark_ds_unreachable(struct pnfs_layout_segment * lseg,u32 idx)741 ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx)
742 {
743 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
744 
745 	if (devid)
746 		nfs4_mark_deviceid_unavailable(devid);
747 }
748 
749 static void
ff_layout_mark_ds_reachable(struct pnfs_layout_segment * lseg,u32 idx)750 ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx)
751 {
752 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
753 
754 	if (devid)
755 		nfs4_mark_deviceid_available(devid);
756 }
757 
758 static struct nfs4_pnfs_ds *
ff_layout_choose_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx,bool check_device)759 ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
760 			     u32 start_idx, u32 *best_idx,
761 			     bool check_device)
762 {
763 	struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
764 	struct nfs4_ff_layout_mirror *mirror;
765 	struct nfs4_pnfs_ds *ds;
766 	u32 idx;
767 
768 	/* mirrors are initially sorted by efficiency */
769 	for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
770 		mirror = FF_LAYOUT_COMP(lseg, idx);
771 		ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
772 		if (!ds)
773 			continue;
774 
775 		if (check_device &&
776 		    nfs4_test_deviceid_unavailable(&mirror->mirror_ds->id_node))
777 			continue;
778 
779 		*best_idx = idx;
780 		return ds;
781 	}
782 
783 	return NULL;
784 }
785 
786 static struct nfs4_pnfs_ds *
ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)787 ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg,
788 				 u32 start_idx, u32 *best_idx)
789 {
790 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false);
791 }
792 
793 static struct nfs4_pnfs_ds *
ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)794 ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg,
795 				   u32 start_idx, u32 *best_idx)
796 {
797 	return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true);
798 }
799 
800 static struct nfs4_pnfs_ds *
ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment * lseg,u32 start_idx,u32 * best_idx)801 ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg,
802 				  u32 start_idx, u32 *best_idx)
803 {
804 	struct nfs4_pnfs_ds *ds;
805 
806 	ds = ff_layout_choose_valid_ds_for_read(lseg, start_idx, best_idx);
807 	if (ds)
808 		return ds;
809 	return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx);
810 }
811 
812 static struct nfs4_pnfs_ds *
ff_layout_get_ds_for_read(struct nfs_pageio_descriptor * pgio,u32 * best_idx)813 ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio,
814 			  u32 *best_idx)
815 {
816 	struct pnfs_layout_segment *lseg = pgio->pg_lseg;
817 	struct nfs4_pnfs_ds *ds;
818 
819 	ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx,
820 					       best_idx);
821 	if (ds || !pgio->pg_mirror_idx)
822 		return ds;
823 	return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx);
824 }
825 
826 static void
ff_layout_pg_get_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req,bool strict_iomode)827 ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio,
828 		      struct nfs_page *req,
829 		      bool strict_iomode)
830 {
831 	pnfs_put_lseg(pgio->pg_lseg);
832 	pgio->pg_lseg =
833 		pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
834 				   req_offset(req), req->wb_bytes, IOMODE_READ,
835 				   strict_iomode, nfs_io_gfp_mask());
836 	if (IS_ERR(pgio->pg_lseg)) {
837 		pgio->pg_error = PTR_ERR(pgio->pg_lseg);
838 		pgio->pg_lseg = NULL;
839 	}
840 }
841 
842 static void
ff_layout_pg_init_read(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)843 ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
844 			struct nfs_page *req)
845 {
846 	struct nfs_pgio_mirror *pgm;
847 	struct nfs4_ff_layout_mirror *mirror;
848 	struct nfs4_pnfs_ds *ds;
849 	u32 ds_idx;
850 
851 	if (NFS_SERVER(pgio->pg_inode)->flags &
852 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
853 		pgio->pg_maxretrans = io_maxretrans;
854 retry:
855 	pnfs_generic_pg_check_layout(pgio, req);
856 	/* Use full layout for now */
857 	if (!pgio->pg_lseg) {
858 		ff_layout_pg_get_read(pgio, req, false);
859 		if (!pgio->pg_lseg)
860 			goto out_nolseg;
861 	}
862 	if (ff_layout_avoid_read_on_rw(pgio->pg_lseg)) {
863 		ff_layout_pg_get_read(pgio, req, true);
864 		if (!pgio->pg_lseg)
865 			goto out_nolseg;
866 	}
867 	/* Reset wb_nio, since getting layout segment was successful */
868 	req->wb_nio = 0;
869 
870 	ds = ff_layout_get_ds_for_read(pgio, &ds_idx);
871 	if (!ds) {
872 		if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
873 			goto out_mds;
874 		pnfs_generic_pg_cleanup(pgio);
875 		/* Sleep for 1 second before retrying */
876 		ssleep(1);
877 		goto retry;
878 	}
879 
880 	mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx);
881 	pgm = &pgio->pg_mirrors[0];
882 	pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
883 
884 	pgio->pg_mirror_idx = ds_idx;
885 	return;
886 out_nolseg:
887 	if (pgio->pg_error < 0) {
888 		if (pgio->pg_error != -EAGAIN)
889 			return;
890 		/* Retry getting layout segment if lower layer returned -EAGAIN */
891 		if (pgio->pg_maxretrans && req->wb_nio++ > pgio->pg_maxretrans) {
892 			if (NFS_SERVER(pgio->pg_inode)->flags & NFS_MOUNT_SOFTERR)
893 				pgio->pg_error = -ETIMEDOUT;
894 			else
895 				pgio->pg_error = -EIO;
896 			return;
897 		}
898 		pgio->pg_error = 0;
899 		/* Sleep for 1 second before retrying */
900 		ssleep(1);
901 		goto retry;
902 	}
903 out_mds:
904 	trace_pnfs_mds_fallback_pg_init_read(pgio->pg_inode,
905 			0, NFS4_MAX_UINT64, IOMODE_READ,
906 			NFS_I(pgio->pg_inode)->layout,
907 			pgio->pg_lseg);
908 	pgio->pg_maxretrans = 0;
909 	nfs_pageio_reset_read_mds(pgio);
910 }
911 
912 static void
ff_layout_pg_init_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)913 ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
914 			struct nfs_page *req)
915 {
916 	struct nfs4_ff_layout_mirror *mirror;
917 	struct nfs_pgio_mirror *pgm;
918 	struct nfs4_pnfs_ds *ds;
919 	u32 i;
920 
921 retry:
922 	pnfs_generic_pg_check_layout(pgio, req);
923 	if (!pgio->pg_lseg) {
924 		pgio->pg_lseg =
925 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
926 					   req_offset(req), req->wb_bytes,
927 					   IOMODE_RW, false, nfs_io_gfp_mask());
928 		if (IS_ERR(pgio->pg_lseg)) {
929 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
930 			pgio->pg_lseg = NULL;
931 			return;
932 		}
933 	}
934 	/* If no lseg, fall back to write through mds */
935 	if (pgio->pg_lseg == NULL)
936 		goto out_mds;
937 
938 	/* Use a direct mapping of ds_idx to pgio mirror_idx */
939 	if (pgio->pg_mirror_count != FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg))
940 		goto out_eagain;
941 
942 	for (i = 0; i < pgio->pg_mirror_count; i++) {
943 		mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
944 		ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
945 		if (!ds) {
946 			if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
947 				goto out_mds;
948 			pnfs_generic_pg_cleanup(pgio);
949 			/* Sleep for 1 second before retrying */
950 			ssleep(1);
951 			goto retry;
952 		}
953 		pgm = &pgio->pg_mirrors[i];
954 		pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
955 	}
956 
957 	if (NFS_SERVER(pgio->pg_inode)->flags &
958 			(NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
959 		pgio->pg_maxretrans = io_maxretrans;
960 	return;
961 out_eagain:
962 	pnfs_generic_pg_cleanup(pgio);
963 	pgio->pg_error = -EAGAIN;
964 	return;
965 out_mds:
966 	trace_pnfs_mds_fallback_pg_init_write(pgio->pg_inode,
967 			0, NFS4_MAX_UINT64, IOMODE_RW,
968 			NFS_I(pgio->pg_inode)->layout,
969 			pgio->pg_lseg);
970 	pgio->pg_maxretrans = 0;
971 	nfs_pageio_reset_write_mds(pgio);
972 	pgio->pg_error = -EAGAIN;
973 }
974 
975 static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor * pgio,struct nfs_page * req)976 ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
977 				    struct nfs_page *req)
978 {
979 	if (!pgio->pg_lseg) {
980 		pgio->pg_lseg =
981 			pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req),
982 					   req_offset(req), req->wb_bytes,
983 					   IOMODE_RW, false, nfs_io_gfp_mask());
984 		if (IS_ERR(pgio->pg_lseg)) {
985 			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
986 			pgio->pg_lseg = NULL;
987 			goto out;
988 		}
989 	}
990 	if (pgio->pg_lseg)
991 		return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
992 
993 	trace_pnfs_mds_fallback_pg_get_mirror_count(pgio->pg_inode,
994 			0, NFS4_MAX_UINT64, IOMODE_RW,
995 			NFS_I(pgio->pg_inode)->layout,
996 			pgio->pg_lseg);
997 	/* no lseg means that pnfs is not in use, so no mirroring here */
998 	nfs_pageio_reset_write_mds(pgio);
999 out:
1000 	return 1;
1001 }
1002 
1003 static u32
ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1004 ff_layout_pg_set_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1005 {
1006 	u32 old = desc->pg_mirror_idx;
1007 
1008 	desc->pg_mirror_idx = idx;
1009 	return old;
1010 }
1011 
1012 static struct nfs_pgio_mirror *
ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor * desc,u32 idx)1013 ff_layout_pg_get_mirror_write(struct nfs_pageio_descriptor *desc, u32 idx)
1014 {
1015 	return &desc->pg_mirrors[idx];
1016 }
1017 
1018 static const struct nfs_pageio_ops ff_layout_pg_read_ops = {
1019 	.pg_init = ff_layout_pg_init_read,
1020 	.pg_test = pnfs_generic_pg_test,
1021 	.pg_doio = pnfs_generic_pg_readpages,
1022 	.pg_cleanup = pnfs_generic_pg_cleanup,
1023 };
1024 
1025 static const struct nfs_pageio_ops ff_layout_pg_write_ops = {
1026 	.pg_init = ff_layout_pg_init_write,
1027 	.pg_test = pnfs_generic_pg_test,
1028 	.pg_doio = pnfs_generic_pg_writepages,
1029 	.pg_get_mirror_count = ff_layout_pg_get_mirror_count_write,
1030 	.pg_cleanup = pnfs_generic_pg_cleanup,
1031 	.pg_get_mirror = ff_layout_pg_get_mirror_write,
1032 	.pg_set_mirror = ff_layout_pg_set_mirror_write,
1033 };
1034 
ff_layout_reset_write(struct nfs_pgio_header * hdr,bool retry_pnfs)1035 static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs)
1036 {
1037 	struct rpc_task *task = &hdr->task;
1038 
1039 	pnfs_layoutcommit_inode(hdr->inode, false);
1040 
1041 	if (retry_pnfs) {
1042 		dprintk("%s Reset task %5u for i/o through pNFS "
1043 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1044 			hdr->task.tk_pid,
1045 			hdr->inode->i_sb->s_id,
1046 			(unsigned long long)NFS_FILEID(hdr->inode),
1047 			hdr->args.count,
1048 			(unsigned long long)hdr->args.offset);
1049 
1050 		hdr->completion_ops->reschedule_io(hdr);
1051 		return;
1052 	}
1053 
1054 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1055 		dprintk("%s Reset task %5u for i/o through MDS "
1056 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1057 			hdr->task.tk_pid,
1058 			hdr->inode->i_sb->s_id,
1059 			(unsigned long long)NFS_FILEID(hdr->inode),
1060 			hdr->args.count,
1061 			(unsigned long long)hdr->args.offset);
1062 
1063 		trace_pnfs_mds_fallback_write_done(hdr->inode,
1064 				hdr->args.offset, hdr->args.count,
1065 				IOMODE_RW, NFS_I(hdr->inode)->layout,
1066 				hdr->lseg);
1067 		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
1068 	}
1069 }
1070 
ff_layout_resend_pnfs_read(struct nfs_pgio_header * hdr)1071 static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr)
1072 {
1073 	u32 idx = hdr->pgio_mirror_idx + 1;
1074 	u32 new_idx = 0;
1075 
1076 	if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx, &new_idx))
1077 		ff_layout_send_layouterror(hdr->lseg);
1078 	else
1079 		pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1080 	pnfs_read_resend_pnfs(hdr, new_idx);
1081 }
1082 
ff_layout_reset_read(struct nfs_pgio_header * hdr)1083 static void ff_layout_reset_read(struct nfs_pgio_header *hdr)
1084 {
1085 	struct rpc_task *task = &hdr->task;
1086 
1087 	pnfs_layoutcommit_inode(hdr->inode, false);
1088 	pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg);
1089 
1090 	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1091 		dprintk("%s Reset task %5u for i/o through MDS "
1092 			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
1093 			hdr->task.tk_pid,
1094 			hdr->inode->i_sb->s_id,
1095 			(unsigned long long)NFS_FILEID(hdr->inode),
1096 			hdr->args.count,
1097 			(unsigned long long)hdr->args.offset);
1098 
1099 		trace_pnfs_mds_fallback_read_done(hdr->inode,
1100 				hdr->args.offset, hdr->args.count,
1101 				IOMODE_READ, NFS_I(hdr->inode)->layout,
1102 				hdr->lseg);
1103 		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
1104 	}
1105 }
1106 
ff_layout_async_handle_error_v4(struct rpc_task * task,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1107 static int ff_layout_async_handle_error_v4(struct rpc_task *task,
1108 					   struct nfs4_state *state,
1109 					   struct nfs_client *clp,
1110 					   struct pnfs_layout_segment *lseg,
1111 					   u32 idx)
1112 {
1113 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
1114 	struct inode *inode = lo->plh_inode;
1115 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1116 	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
1117 
1118 	switch (task->tk_status) {
1119 	case -NFS4ERR_BADSESSION:
1120 	case -NFS4ERR_BADSLOT:
1121 	case -NFS4ERR_BAD_HIGH_SLOT:
1122 	case -NFS4ERR_DEADSESSION:
1123 	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1124 	case -NFS4ERR_SEQ_FALSE_RETRY:
1125 	case -NFS4ERR_SEQ_MISORDERED:
1126 		dprintk("%s ERROR %d, Reset session. Exchangeid "
1127 			"flags 0x%x\n", __func__, task->tk_status,
1128 			clp->cl_exchange_flags);
1129 		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
1130 		break;
1131 	case -NFS4ERR_DELAY:
1132 	case -NFS4ERR_GRACE:
1133 		rpc_delay(task, FF_LAYOUT_POLL_RETRY_MAX);
1134 		break;
1135 	case -NFS4ERR_RETRY_UNCACHED_REP:
1136 		break;
1137 	/* Invalidate Layout errors */
1138 	case -NFS4ERR_PNFS_NO_LAYOUT:
1139 	case -ESTALE:           /* mapped NFS4ERR_STALE */
1140 	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
1141 	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
1142 	case -NFS4ERR_FHEXPIRED:
1143 	case -NFS4ERR_WRONG_TYPE:
1144 		dprintk("%s Invalid layout error %d\n", __func__,
1145 			task->tk_status);
1146 		/*
1147 		 * Destroy layout so new i/o will get a new layout.
1148 		 * Layout will not be destroyed until all current lseg
1149 		 * references are put. Mark layout as invalid to resend failed
1150 		 * i/o and all i/o waiting on the slot table to the MDS until
1151 		 * layout is destroyed and a new valid layout is obtained.
1152 		 */
1153 		pnfs_destroy_layout(NFS_I(inode));
1154 		rpc_wake_up(&tbl->slot_tbl_waitq);
1155 		goto reset;
1156 	/* RPC connection errors */
1157 	case -ENETDOWN:
1158 	case -ENETUNREACH:
1159 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1160 			return -NFS4ERR_FATAL_IOERROR;
1161 		fallthrough;
1162 	case -ECONNREFUSED:
1163 	case -EHOSTDOWN:
1164 	case -EHOSTUNREACH:
1165 	case -EIO:
1166 	case -ETIMEDOUT:
1167 	case -EPIPE:
1168 	case -EPROTO:
1169 	case -ENODEV:
1170 		dprintk("%s DS connection error %d\n", __func__,
1171 			task->tk_status);
1172 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1173 				&devid->deviceid);
1174 		rpc_wake_up(&tbl->slot_tbl_waitq);
1175 		fallthrough;
1176 	default:
1177 		if (ff_layout_avoid_mds_available_ds(lseg))
1178 			return -NFS4ERR_RESET_TO_PNFS;
1179 reset:
1180 		dprintk("%s Retry through MDS. Error %d\n", __func__,
1181 			task->tk_status);
1182 		return -NFS4ERR_RESET_TO_MDS;
1183 	}
1184 	task->tk_status = 0;
1185 	return -EAGAIN;
1186 }
1187 
1188 /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
ff_layout_async_handle_error_v3(struct rpc_task * task,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1189 static int ff_layout_async_handle_error_v3(struct rpc_task *task,
1190 					   struct nfs_client *clp,
1191 					   struct pnfs_layout_segment *lseg,
1192 					   u32 idx)
1193 {
1194 	struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx);
1195 
1196 	switch (task->tk_status) {
1197 	/* File access problems. Don't mark the device as unavailable */
1198 	case -EACCES:
1199 	case -ESTALE:
1200 	case -EISDIR:
1201 	case -EBADHANDLE:
1202 	case -ELOOP:
1203 	case -ENOSPC:
1204 		break;
1205 	case -EJUKEBOX:
1206 		nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
1207 		goto out_retry;
1208 	case -ENETDOWN:
1209 	case -ENETUNREACH:
1210 		if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
1211 			return -NFS4ERR_FATAL_IOERROR;
1212 		fallthrough;
1213 	default:
1214 		dprintk("%s DS connection error %d\n", __func__,
1215 			task->tk_status);
1216 		nfs4_delete_deviceid(devid->ld, devid->nfs_client,
1217 				&devid->deviceid);
1218 	}
1219 	/* FIXME: Need to prevent infinite looping here. */
1220 	return -NFS4ERR_RESET_TO_PNFS;
1221 out_retry:
1222 	task->tk_status = 0;
1223 	rpc_restart_call_prepare(task);
1224 	rpc_delay(task, NFS_JUKEBOX_RETRY_TIME);
1225 	return -EAGAIN;
1226 }
1227 
ff_layout_async_handle_error(struct rpc_task * task,struct nfs4_state * state,struct nfs_client * clp,struct pnfs_layout_segment * lseg,u32 idx)1228 static int ff_layout_async_handle_error(struct rpc_task *task,
1229 					struct nfs4_state *state,
1230 					struct nfs_client *clp,
1231 					struct pnfs_layout_segment *lseg,
1232 					u32 idx)
1233 {
1234 	int vers = clp->cl_nfs_mod->rpc_vers->number;
1235 
1236 	if (task->tk_status >= 0) {
1237 		ff_layout_mark_ds_reachable(lseg, idx);
1238 		return 0;
1239 	}
1240 
1241 	/* Handle the case of an invalid layout segment */
1242 	if (!pnfs_is_valid_lseg(lseg))
1243 		return -NFS4ERR_RESET_TO_PNFS;
1244 
1245 	switch (vers) {
1246 	case 3:
1247 		return ff_layout_async_handle_error_v3(task, clp, lseg, idx);
1248 	case 4:
1249 		return ff_layout_async_handle_error_v4(task, state, clp,
1250 						       lseg, idx);
1251 	default:
1252 		/* should never happen */
1253 		WARN_ON_ONCE(1);
1254 		return 0;
1255 	}
1256 }
1257 
ff_layout_io_track_ds_error(struct pnfs_layout_segment * lseg,u32 idx,u64 offset,u64 length,u32 * op_status,int opnum,int error)1258 static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
1259 					u32 idx, u64 offset, u64 length,
1260 					u32 *op_status, int opnum, int error)
1261 {
1262 	struct nfs4_ff_layout_mirror *mirror;
1263 	u32 status = *op_status;
1264 	int err;
1265 
1266 	if (status == 0) {
1267 		switch (error) {
1268 		case -ETIMEDOUT:
1269 		case -EPFNOSUPPORT:
1270 		case -EPROTONOSUPPORT:
1271 		case -EOPNOTSUPP:
1272 		case -EINVAL:
1273 		case -ECONNREFUSED:
1274 		case -ECONNRESET:
1275 		case -EHOSTDOWN:
1276 		case -EHOSTUNREACH:
1277 		case -ENETDOWN:
1278 		case -ENETUNREACH:
1279 		case -EADDRINUSE:
1280 		case -ENOBUFS:
1281 		case -EPIPE:
1282 		case -EPERM:
1283 		case -EPROTO:
1284 		case -ENODEV:
1285 			*op_status = status = NFS4ERR_NXIO;
1286 			break;
1287 		case -EACCES:
1288 			*op_status = status = NFS4ERR_ACCESS;
1289 			break;
1290 		default:
1291 			return;
1292 		}
1293 	}
1294 
1295 	mirror = FF_LAYOUT_COMP(lseg, idx);
1296 	err = ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout),
1297 				       mirror, offset, length, status, opnum,
1298 				       nfs_io_gfp_mask());
1299 
1300 	switch (status) {
1301 	case NFS4ERR_DELAY:
1302 	case NFS4ERR_GRACE:
1303 		break;
1304 	case NFS4ERR_NXIO:
1305 		ff_layout_mark_ds_unreachable(lseg, idx);
1306 		/*
1307 		 * Don't return the layout if this is a read and we still
1308 		 * have layouts to try
1309 		 */
1310 		if (opnum == OP_READ)
1311 			break;
1312 		fallthrough;
1313 	default:
1314 		pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode,
1315 						  lseg);
1316 	}
1317 
1318 	dprintk("%s: err %d op %d status %u\n", __func__, err, opnum, status);
1319 }
1320 
1321 /* NFS_PROTO call done callback routines */
ff_layout_read_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1322 static int ff_layout_read_done_cb(struct rpc_task *task,
1323 				struct nfs_pgio_header *hdr)
1324 {
1325 	int err;
1326 
1327 	if (task->tk_status < 0) {
1328 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1329 					    hdr->args.offset, hdr->args.count,
1330 					    &hdr->res.op_status, OP_READ,
1331 					    task->tk_status);
1332 		trace_ff_layout_read_error(hdr, task->tk_status);
1333 	}
1334 
1335 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1336 					   hdr->ds_clp, hdr->lseg,
1337 					   hdr->pgio_mirror_idx);
1338 
1339 	trace_nfs4_pnfs_read(hdr, err);
1340 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1341 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1342 	switch (err) {
1343 	case -NFS4ERR_RESET_TO_PNFS:
1344 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1345 		return task->tk_status;
1346 	case -NFS4ERR_RESET_TO_MDS:
1347 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1348 		return task->tk_status;
1349 	case -EAGAIN:
1350 		goto out_eagain;
1351 	case -NFS4ERR_FATAL_IOERROR:
1352 		task->tk_status = -EIO;
1353 		return 0;
1354 	}
1355 
1356 	return 0;
1357 out_eagain:
1358 	rpc_restart_call_prepare(task);
1359 	return -EAGAIN;
1360 }
1361 
1362 static bool
ff_layout_need_layoutcommit(struct pnfs_layout_segment * lseg)1363 ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg)
1364 {
1365 	return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT);
1366 }
1367 
1368 /*
1369  * We reference the rpc_cred of the first WRITE that triggers the need for
1370  * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
1371  * rfc5661 is not clear about which credential should be used.
1372  *
1373  * Flexlayout client should treat DS replied FILE_SYNC as DATA_SYNC, so
1374  * to follow http://www.rfc-editor.org/errata_search.php?rfc=5661&eid=2751
1375  * we always send layoutcommit after DS writes.
1376  */
1377 static void
ff_layout_set_layoutcommit(struct inode * inode,struct pnfs_layout_segment * lseg,loff_t end_offset)1378 ff_layout_set_layoutcommit(struct inode *inode,
1379 		struct pnfs_layout_segment *lseg,
1380 		loff_t end_offset)
1381 {
1382 	if (!ff_layout_need_layoutcommit(lseg))
1383 		return;
1384 
1385 	pnfs_set_layoutcommit(inode, lseg, end_offset);
1386 	dprintk("%s inode %lu pls_end_pos %llu\n", __func__, inode->i_ino,
1387 		(unsigned long long) NFS_I(inode)->layout->plh_lwb);
1388 }
1389 
ff_layout_read_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1390 static void ff_layout_read_record_layoutstats_start(struct rpc_task *task,
1391 		struct nfs_pgio_header *hdr)
1392 {
1393 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1394 		return;
1395 	nfs4_ff_layout_stat_io_start_read(hdr->inode,
1396 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1397 			hdr->args.count,
1398 			task->tk_start);
1399 }
1400 
ff_layout_read_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1401 static void ff_layout_read_record_layoutstats_done(struct rpc_task *task,
1402 		struct nfs_pgio_header *hdr)
1403 {
1404 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1405 		return;
1406 	nfs4_ff_layout_stat_io_end_read(task,
1407 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1408 			hdr->args.count,
1409 			hdr->res.count);
1410 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1411 }
1412 
ff_layout_read_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1413 static int ff_layout_read_prepare_common(struct rpc_task *task,
1414 					 struct nfs_pgio_header *hdr)
1415 {
1416 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1417 		rpc_exit(task, -EIO);
1418 		return -EIO;
1419 	}
1420 
1421 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1422 		rpc_exit(task, -EAGAIN);
1423 		return -EAGAIN;
1424 	}
1425 
1426 	ff_layout_read_record_layoutstats_start(task, hdr);
1427 	return 0;
1428 }
1429 
1430 /*
1431  * Call ops for the async read/write cases
1432  * In the case of dense layouts, the offset needs to be reset to its
1433  * original value.
1434  */
ff_layout_read_prepare_v3(struct rpc_task * task,void * data)1435 static void ff_layout_read_prepare_v3(struct rpc_task *task, void *data)
1436 {
1437 	struct nfs_pgio_header *hdr = data;
1438 
1439 	if (ff_layout_read_prepare_common(task, hdr))
1440 		return;
1441 
1442 	rpc_call_start(task);
1443 }
1444 
ff_layout_read_prepare_v4(struct rpc_task * task,void * data)1445 static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
1446 {
1447 	struct nfs_pgio_header *hdr = data;
1448 
1449 	if (nfs4_setup_sequence(hdr->ds_clp,
1450 				&hdr->args.seq_args,
1451 				&hdr->res.seq_res,
1452 				task))
1453 		return;
1454 
1455 	ff_layout_read_prepare_common(task, hdr);
1456 }
1457 
ff_layout_read_call_done(struct rpc_task * task,void * data)1458 static void ff_layout_read_call_done(struct rpc_task *task, void *data)
1459 {
1460 	struct nfs_pgio_header *hdr = data;
1461 
1462 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1463 	    task->tk_status == 0) {
1464 		nfs4_sequence_done(task, &hdr->res.seq_res);
1465 		return;
1466 	}
1467 
1468 	/* Note this may cause RPC to be resent */
1469 	hdr->mds_ops->rpc_call_done(task, hdr);
1470 }
1471 
ff_layout_read_count_stats(struct rpc_task * task,void * data)1472 static void ff_layout_read_count_stats(struct rpc_task *task, void *data)
1473 {
1474 	struct nfs_pgio_header *hdr = data;
1475 
1476 	ff_layout_read_record_layoutstats_done(task, hdr);
1477 	rpc_count_iostats_metrics(task,
1478 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_READ]);
1479 }
1480 
ff_layout_read_release(void * data)1481 static void ff_layout_read_release(void *data)
1482 {
1483 	struct nfs_pgio_header *hdr = data;
1484 
1485 	ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
1486 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
1487 		ff_layout_resend_pnfs_read(hdr);
1488 	else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1489 		ff_layout_reset_read(hdr);
1490 	pnfs_generic_rw_release(data);
1491 }
1492 
1493 
ff_layout_write_done_cb(struct rpc_task * task,struct nfs_pgio_header * hdr)1494 static int ff_layout_write_done_cb(struct rpc_task *task,
1495 				struct nfs_pgio_header *hdr)
1496 {
1497 	loff_t end_offs = 0;
1498 	int err;
1499 
1500 	if (task->tk_status < 0) {
1501 		ff_layout_io_track_ds_error(hdr->lseg, hdr->pgio_mirror_idx,
1502 					    hdr->args.offset, hdr->args.count,
1503 					    &hdr->res.op_status, OP_WRITE,
1504 					    task->tk_status);
1505 		trace_ff_layout_write_error(hdr, task->tk_status);
1506 	}
1507 
1508 	err = ff_layout_async_handle_error(task, hdr->args.context->state,
1509 					   hdr->ds_clp, hdr->lseg,
1510 					   hdr->pgio_mirror_idx);
1511 
1512 	trace_nfs4_pnfs_write(hdr, err);
1513 	clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1514 	clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1515 	switch (err) {
1516 	case -NFS4ERR_RESET_TO_PNFS:
1517 		set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
1518 		return task->tk_status;
1519 	case -NFS4ERR_RESET_TO_MDS:
1520 		set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
1521 		return task->tk_status;
1522 	case -EAGAIN:
1523 		return -EAGAIN;
1524 	case -NFS4ERR_FATAL_IOERROR:
1525 		task->tk_status = -EIO;
1526 		return 0;
1527 	}
1528 
1529 	if (hdr->res.verf->committed == NFS_FILE_SYNC ||
1530 	    hdr->res.verf->committed == NFS_DATA_SYNC)
1531 		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
1532 
1533 	/* Note: if the write is unstable, don't set end_offs until commit */
1534 	ff_layout_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
1535 
1536 	/* zero out fattr since we don't care DS attr at all */
1537 	hdr->fattr.valid = 0;
1538 	if (task->tk_status >= 0)
1539 		nfs_writeback_update_inode(hdr);
1540 
1541 	return 0;
1542 }
1543 
ff_layout_commit_done_cb(struct rpc_task * task,struct nfs_commit_data * data)1544 static int ff_layout_commit_done_cb(struct rpc_task *task,
1545 				     struct nfs_commit_data *data)
1546 {
1547 	int err;
1548 
1549 	if (task->tk_status < 0) {
1550 		ff_layout_io_track_ds_error(data->lseg, data->ds_commit_index,
1551 					    data->args.offset, data->args.count,
1552 					    &data->res.op_status, OP_COMMIT,
1553 					    task->tk_status);
1554 		trace_ff_layout_commit_error(data, task->tk_status);
1555 	}
1556 
1557 	err = ff_layout_async_handle_error(task, NULL, data->ds_clp,
1558 					   data->lseg, data->ds_commit_index);
1559 
1560 	trace_nfs4_pnfs_commit_ds(data, err);
1561 	switch (err) {
1562 	case -NFS4ERR_RESET_TO_PNFS:
1563 		pnfs_generic_prepare_to_resend_writes(data);
1564 		return -EAGAIN;
1565 	case -NFS4ERR_RESET_TO_MDS:
1566 		pnfs_generic_prepare_to_resend_writes(data);
1567 		return -EAGAIN;
1568 	case -EAGAIN:
1569 		rpc_restart_call_prepare(task);
1570 		return -EAGAIN;
1571 	case -NFS4ERR_FATAL_IOERROR:
1572 		task->tk_status = -EIO;
1573 		return 0;
1574 	}
1575 
1576 	ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
1577 
1578 	return 0;
1579 }
1580 
ff_layout_write_record_layoutstats_start(struct rpc_task * task,struct nfs_pgio_header * hdr)1581 static void ff_layout_write_record_layoutstats_start(struct rpc_task *task,
1582 		struct nfs_pgio_header *hdr)
1583 {
1584 	if (test_and_set_bit(NFS_IOHDR_STAT, &hdr->flags))
1585 		return;
1586 	nfs4_ff_layout_stat_io_start_write(hdr->inode,
1587 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1588 			hdr->args.count,
1589 			task->tk_start);
1590 }
1591 
ff_layout_write_record_layoutstats_done(struct rpc_task * task,struct nfs_pgio_header * hdr)1592 static void ff_layout_write_record_layoutstats_done(struct rpc_task *task,
1593 		struct nfs_pgio_header *hdr)
1594 {
1595 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &hdr->flags))
1596 		return;
1597 	nfs4_ff_layout_stat_io_end_write(task,
1598 			FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx),
1599 			hdr->args.count, hdr->res.count,
1600 			hdr->res.verf->committed);
1601 	set_bit(NFS_LSEG_LAYOUTRETURN, &hdr->lseg->pls_flags);
1602 }
1603 
ff_layout_write_prepare_common(struct rpc_task * task,struct nfs_pgio_header * hdr)1604 static int ff_layout_write_prepare_common(struct rpc_task *task,
1605 					  struct nfs_pgio_header *hdr)
1606 {
1607 	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
1608 		rpc_exit(task, -EIO);
1609 		return -EIO;
1610 	}
1611 
1612 	if (!pnfs_is_valid_lseg(hdr->lseg)) {
1613 		rpc_exit(task, -EAGAIN);
1614 		return -EAGAIN;
1615 	}
1616 
1617 	ff_layout_write_record_layoutstats_start(task, hdr);
1618 	return 0;
1619 }
1620 
ff_layout_write_prepare_v3(struct rpc_task * task,void * data)1621 static void ff_layout_write_prepare_v3(struct rpc_task *task, void *data)
1622 {
1623 	struct nfs_pgio_header *hdr = data;
1624 
1625 	if (ff_layout_write_prepare_common(task, hdr))
1626 		return;
1627 
1628 	rpc_call_start(task);
1629 }
1630 
ff_layout_write_prepare_v4(struct rpc_task * task,void * data)1631 static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data)
1632 {
1633 	struct nfs_pgio_header *hdr = data;
1634 
1635 	if (nfs4_setup_sequence(hdr->ds_clp,
1636 				&hdr->args.seq_args,
1637 				&hdr->res.seq_res,
1638 				task))
1639 		return;
1640 
1641 	ff_layout_write_prepare_common(task, hdr);
1642 }
1643 
ff_layout_write_call_done(struct rpc_task * task,void * data)1644 static void ff_layout_write_call_done(struct rpc_task *task, void *data)
1645 {
1646 	struct nfs_pgio_header *hdr = data;
1647 
1648 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
1649 	    task->tk_status == 0) {
1650 		nfs4_sequence_done(task, &hdr->res.seq_res);
1651 		return;
1652 	}
1653 
1654 	/* Note this may cause RPC to be resent */
1655 	hdr->mds_ops->rpc_call_done(task, hdr);
1656 }
1657 
ff_layout_write_count_stats(struct rpc_task * task,void * data)1658 static void ff_layout_write_count_stats(struct rpc_task *task, void *data)
1659 {
1660 	struct nfs_pgio_header *hdr = data;
1661 
1662 	ff_layout_write_record_layoutstats_done(task, hdr);
1663 	rpc_count_iostats_metrics(task,
1664 	    &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]);
1665 }
1666 
ff_layout_write_release(void * data)1667 static void ff_layout_write_release(void *data)
1668 {
1669 	struct nfs_pgio_header *hdr = data;
1670 
1671 	ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
1672 	if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) {
1673 		ff_layout_send_layouterror(hdr->lseg);
1674 		ff_layout_reset_write(hdr, true);
1675 	} else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
1676 		ff_layout_reset_write(hdr, false);
1677 	pnfs_generic_rw_release(data);
1678 }
1679 
ff_layout_commit_record_layoutstats_start(struct rpc_task * task,struct nfs_commit_data * cdata)1680 static void ff_layout_commit_record_layoutstats_start(struct rpc_task *task,
1681 		struct nfs_commit_data *cdata)
1682 {
1683 	if (test_and_set_bit(NFS_IOHDR_STAT, &cdata->flags))
1684 		return;
1685 	nfs4_ff_layout_stat_io_start_write(cdata->inode,
1686 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1687 			0, task->tk_start);
1688 }
1689 
ff_layout_commit_record_layoutstats_done(struct rpc_task * task,struct nfs_commit_data * cdata)1690 static void ff_layout_commit_record_layoutstats_done(struct rpc_task *task,
1691 		struct nfs_commit_data *cdata)
1692 {
1693 	struct nfs_page *req;
1694 	__u64 count = 0;
1695 
1696 	if (!test_and_clear_bit(NFS_IOHDR_STAT, &cdata->flags))
1697 		return;
1698 
1699 	if (task->tk_status == 0) {
1700 		list_for_each_entry(req, &cdata->pages, wb_list)
1701 			count += req->wb_bytes;
1702 	}
1703 	nfs4_ff_layout_stat_io_end_write(task,
1704 			FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index),
1705 			count, count, NFS_FILE_SYNC);
1706 	set_bit(NFS_LSEG_LAYOUTRETURN, &cdata->lseg->pls_flags);
1707 }
1708 
ff_layout_commit_prepare_common(struct rpc_task * task,struct nfs_commit_data * cdata)1709 static int ff_layout_commit_prepare_common(struct rpc_task *task,
1710 					   struct nfs_commit_data *cdata)
1711 {
1712 	if (!pnfs_is_valid_lseg(cdata->lseg)) {
1713 		rpc_exit(task, -EAGAIN);
1714 		return -EAGAIN;
1715 	}
1716 
1717 	ff_layout_commit_record_layoutstats_start(task, cdata);
1718 	return 0;
1719 }
1720 
ff_layout_commit_prepare_v3(struct rpc_task * task,void * data)1721 static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data)
1722 {
1723 	if (ff_layout_commit_prepare_common(task, data))
1724 		return;
1725 
1726 	rpc_call_start(task);
1727 }
1728 
ff_layout_commit_prepare_v4(struct rpc_task * task,void * data)1729 static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data)
1730 {
1731 	struct nfs_commit_data *wdata = data;
1732 
1733 	if (nfs4_setup_sequence(wdata->ds_clp,
1734 				&wdata->args.seq_args,
1735 				&wdata->res.seq_res,
1736 				task))
1737 		return;
1738 	ff_layout_commit_prepare_common(task, data);
1739 }
1740 
ff_layout_commit_done(struct rpc_task * task,void * data)1741 static void ff_layout_commit_done(struct rpc_task *task, void *data)
1742 {
1743 	pnfs_generic_write_commit_done(task, data);
1744 }
1745 
ff_layout_commit_count_stats(struct rpc_task * task,void * data)1746 static void ff_layout_commit_count_stats(struct rpc_task *task, void *data)
1747 {
1748 	struct nfs_commit_data *cdata = data;
1749 
1750 	ff_layout_commit_record_layoutstats_done(task, cdata);
1751 	rpc_count_iostats_metrics(task,
1752 	    &NFS_CLIENT(cdata->inode)->cl_metrics[NFSPROC4_CLNT_COMMIT]);
1753 }
1754 
ff_layout_commit_release(void * data)1755 static void ff_layout_commit_release(void *data)
1756 {
1757 	struct nfs_commit_data *cdata = data;
1758 
1759 	ff_layout_commit_record_layoutstats_done(&cdata->task, cdata);
1760 	pnfs_generic_commit_release(data);
1761 }
1762 
1763 static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
1764 	.rpc_call_prepare = ff_layout_read_prepare_v3,
1765 	.rpc_call_done = ff_layout_read_call_done,
1766 	.rpc_count_stats = ff_layout_read_count_stats,
1767 	.rpc_release = ff_layout_read_release,
1768 };
1769 
1770 static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
1771 	.rpc_call_prepare = ff_layout_read_prepare_v4,
1772 	.rpc_call_done = ff_layout_read_call_done,
1773 	.rpc_count_stats = ff_layout_read_count_stats,
1774 	.rpc_release = ff_layout_read_release,
1775 };
1776 
1777 static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
1778 	.rpc_call_prepare = ff_layout_write_prepare_v3,
1779 	.rpc_call_done = ff_layout_write_call_done,
1780 	.rpc_count_stats = ff_layout_write_count_stats,
1781 	.rpc_release = ff_layout_write_release,
1782 };
1783 
1784 static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
1785 	.rpc_call_prepare = ff_layout_write_prepare_v4,
1786 	.rpc_call_done = ff_layout_write_call_done,
1787 	.rpc_count_stats = ff_layout_write_count_stats,
1788 	.rpc_release = ff_layout_write_release,
1789 };
1790 
1791 static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = {
1792 	.rpc_call_prepare = ff_layout_commit_prepare_v3,
1793 	.rpc_call_done = ff_layout_commit_done,
1794 	.rpc_count_stats = ff_layout_commit_count_stats,
1795 	.rpc_release = ff_layout_commit_release,
1796 };
1797 
1798 static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = {
1799 	.rpc_call_prepare = ff_layout_commit_prepare_v4,
1800 	.rpc_call_done = ff_layout_commit_done,
1801 	.rpc_count_stats = ff_layout_commit_count_stats,
1802 	.rpc_release = ff_layout_commit_release,
1803 };
1804 
1805 static enum pnfs_try_status
ff_layout_read_pagelist(struct nfs_pgio_header * hdr)1806 ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
1807 {
1808 	struct pnfs_layout_segment *lseg = hdr->lseg;
1809 	struct nfs4_pnfs_ds *ds;
1810 	struct rpc_clnt *ds_clnt;
1811 	struct nfsd_file *localio;
1812 	struct nfs4_ff_layout_mirror *mirror;
1813 	const struct cred *ds_cred;
1814 	loff_t offset = hdr->args.offset;
1815 	u32 idx = hdr->pgio_mirror_idx;
1816 	int vers;
1817 	struct nfs_fh *fh;
1818 
1819 	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
1820 		__func__, hdr->inode->i_ino,
1821 		hdr->args.pgbase, (size_t)hdr->args.count, offset);
1822 
1823 	mirror = FF_LAYOUT_COMP(lseg, idx);
1824 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
1825 	if (!ds)
1826 		goto out_failed;
1827 
1828 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1829 						   hdr->inode);
1830 	if (IS_ERR(ds_clnt))
1831 		goto out_failed;
1832 
1833 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1834 	if (!ds_cred)
1835 		goto out_failed;
1836 
1837 	vers = nfs4_ff_layout_ds_version(mirror);
1838 
1839 	dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
1840 		ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
1841 
1842 	hdr->pgio_done_cb = ff_layout_read_done_cb;
1843 	refcount_inc(&ds->ds_clp->cl_count);
1844 	hdr->ds_clp = ds->ds_clp;
1845 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1846 	if (fh)
1847 		hdr->args.fh = fh;
1848 
1849 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1850 
1851 	/*
1852 	 * Note that if we ever decide to split across DSes,
1853 	 * then we may need to handle dense-like offsets.
1854 	 */
1855 	hdr->args.offset = offset;
1856 	hdr->mds_offset = offset;
1857 
1858 	/* Start IO accounting for local read */
1859 	localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh, FMODE_READ);
1860 	if (localio) {
1861 		hdr->task.tk_start = ktime_get();
1862 		ff_layout_read_record_layoutstats_start(&hdr->task, hdr);
1863 	}
1864 
1865 	/* Perform an asynchronous read to ds */
1866 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1867 			  vers == 3 ? &ff_layout_read_call_ops_v3 :
1868 				      &ff_layout_read_call_ops_v4,
1869 			  0, RPC_TASK_SOFTCONN, localio);
1870 	put_cred(ds_cred);
1871 	return PNFS_ATTEMPTED;
1872 
1873 out_failed:
1874 	if (ff_layout_avoid_mds_available_ds(lseg))
1875 		return PNFS_TRY_AGAIN;
1876 	trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
1877 			hdr->args.offset, hdr->args.count,
1878 			IOMODE_READ, NFS_I(hdr->inode)->layout, lseg);
1879 	return PNFS_NOT_ATTEMPTED;
1880 }
1881 
1882 /* Perform async writes. */
1883 static enum pnfs_try_status
ff_layout_write_pagelist(struct nfs_pgio_header * hdr,int sync)1884 ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
1885 {
1886 	struct pnfs_layout_segment *lseg = hdr->lseg;
1887 	struct nfs4_pnfs_ds *ds;
1888 	struct rpc_clnt *ds_clnt;
1889 	struct nfsd_file *localio;
1890 	struct nfs4_ff_layout_mirror *mirror;
1891 	const struct cred *ds_cred;
1892 	loff_t offset = hdr->args.offset;
1893 	int vers;
1894 	struct nfs_fh *fh;
1895 	u32 idx = hdr->pgio_mirror_idx;
1896 
1897 	mirror = FF_LAYOUT_COMP(lseg, idx);
1898 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1899 	if (!ds)
1900 		goto out_failed;
1901 
1902 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1903 						   hdr->inode);
1904 	if (IS_ERR(ds_clnt))
1905 		goto out_failed;
1906 
1907 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, hdr->cred);
1908 	if (!ds_cred)
1909 		goto out_failed;
1910 
1911 	vers = nfs4_ff_layout_ds_version(mirror);
1912 
1913 	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
1914 		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
1915 		offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
1916 		vers);
1917 
1918 	hdr->pgio_done_cb = ff_layout_write_done_cb;
1919 	refcount_inc(&ds->ds_clp->cl_count);
1920 	hdr->ds_clp = ds->ds_clp;
1921 	hdr->ds_commit_idx = idx;
1922 	fh = nfs4_ff_layout_select_ds_fh(mirror);
1923 	if (fh)
1924 		hdr->args.fh = fh;
1925 
1926 	nfs4_ff_layout_select_ds_stateid(mirror, &hdr->args.stateid);
1927 
1928 	/*
1929 	 * Note that if we ever decide to split across DSes,
1930 	 * then we may need to handle dense-like offsets.
1931 	 */
1932 	hdr->args.offset = offset;
1933 
1934 	/* Start IO accounting for local write */
1935 	localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
1936 				   FMODE_READ|FMODE_WRITE);
1937 	if (localio) {
1938 		hdr->task.tk_start = ktime_get();
1939 		ff_layout_write_record_layoutstats_start(&hdr->task, hdr);
1940 	}
1941 
1942 	/* Perform an asynchronous write */
1943 	nfs_initiate_pgio(ds_clnt, hdr, ds_cred, ds->ds_clp->rpc_ops,
1944 			  vers == 3 ? &ff_layout_write_call_ops_v3 :
1945 				      &ff_layout_write_call_ops_v4,
1946 			  sync, RPC_TASK_SOFTCONN, localio);
1947 	put_cred(ds_cred);
1948 	return PNFS_ATTEMPTED;
1949 
1950 out_failed:
1951 	if (ff_layout_avoid_mds_available_ds(lseg))
1952 		return PNFS_TRY_AGAIN;
1953 	trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
1954 			hdr->args.offset, hdr->args.count,
1955 			IOMODE_RW, NFS_I(hdr->inode)->layout, lseg);
1956 	return PNFS_NOT_ATTEMPTED;
1957 }
1958 
calc_ds_index_from_commit(struct pnfs_layout_segment * lseg,u32 i)1959 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1960 {
1961 	return i;
1962 }
1963 
1964 static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment * lseg,u32 i)1965 select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
1966 {
1967 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
1968 
1969 	/* FIXME: Assume that there is only one NFS version available
1970 	 * for the DS.
1971 	 */
1972 	return &flseg->mirror_array[i]->fh_versions[0];
1973 }
1974 
ff_layout_initiate_commit(struct nfs_commit_data * data,int how)1975 static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
1976 {
1977 	struct pnfs_layout_segment *lseg = data->lseg;
1978 	struct nfs4_pnfs_ds *ds;
1979 	struct rpc_clnt *ds_clnt;
1980 	struct nfsd_file *localio;
1981 	struct nfs4_ff_layout_mirror *mirror;
1982 	const struct cred *ds_cred;
1983 	u32 idx;
1984 	int vers, ret;
1985 	struct nfs_fh *fh;
1986 
1987 	if (!lseg || !(pnfs_is_valid_lseg(lseg) ||
1988 	    test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)))
1989 		goto out_err;
1990 
1991 	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
1992 	mirror = FF_LAYOUT_COMP(lseg, idx);
1993 	ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
1994 	if (!ds)
1995 		goto out_err;
1996 
1997 	ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
1998 						   data->inode);
1999 	if (IS_ERR(ds_clnt))
2000 		goto out_err;
2001 
2002 	ds_cred = ff_layout_get_ds_cred(mirror, &lseg->pls_range, data->cred);
2003 	if (!ds_cred)
2004 		goto out_err;
2005 
2006 	vers = nfs4_ff_layout_ds_version(mirror);
2007 
2008 	dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
2009 		data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
2010 		vers);
2011 	data->commit_done_cb = ff_layout_commit_done_cb;
2012 	data->cred = ds_cred;
2013 	refcount_inc(&ds->ds_clp->cl_count);
2014 	data->ds_clp = ds->ds_clp;
2015 	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
2016 	if (fh)
2017 		data->args.fh = fh;
2018 
2019 	/* Start IO accounting for local commit */
2020 	localio = ff_local_open_fh(lseg, idx, ds->ds_clp, ds_cred, fh,
2021 				   FMODE_READ|FMODE_WRITE);
2022 	if (localio) {
2023 		data->task.tk_start = ktime_get();
2024 		ff_layout_commit_record_layoutstats_start(&data->task, data);
2025 	}
2026 
2027 	ret = nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops,
2028 				   vers == 3 ? &ff_layout_commit_call_ops_v3 :
2029 					       &ff_layout_commit_call_ops_v4,
2030 				   how, RPC_TASK_SOFTCONN, localio);
2031 	put_cred(ds_cred);
2032 	return ret;
2033 out_err:
2034 	pnfs_generic_prepare_to_resend_writes(data);
2035 	pnfs_generic_commit_release(data);
2036 	return -EAGAIN;
2037 }
2038 
2039 static int
ff_layout_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo)2040 ff_layout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
2041 			   int how, struct nfs_commit_info *cinfo)
2042 {
2043 	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
2044 					    ff_layout_initiate_commit);
2045 }
2046 
ff_layout_match_rw(const struct rpc_task * task,const struct nfs_pgio_header * hdr,const struct pnfs_layout_segment * lseg)2047 static bool ff_layout_match_rw(const struct rpc_task *task,
2048 			       const struct nfs_pgio_header *hdr,
2049 			       const struct pnfs_layout_segment *lseg)
2050 {
2051 	return hdr->lseg == lseg;
2052 }
2053 
ff_layout_match_commit(const struct rpc_task * task,const struct nfs_commit_data * cdata,const struct pnfs_layout_segment * lseg)2054 static bool ff_layout_match_commit(const struct rpc_task *task,
2055 				   const struct nfs_commit_data *cdata,
2056 				   const struct pnfs_layout_segment *lseg)
2057 {
2058 	return cdata->lseg == lseg;
2059 }
2060 
ff_layout_match_io(const struct rpc_task * task,const void * data)2061 static bool ff_layout_match_io(const struct rpc_task *task, const void *data)
2062 {
2063 	const struct rpc_call_ops *ops = task->tk_ops;
2064 
2065 	if (ops == &ff_layout_read_call_ops_v3 ||
2066 	    ops == &ff_layout_read_call_ops_v4 ||
2067 	    ops == &ff_layout_write_call_ops_v3 ||
2068 	    ops == &ff_layout_write_call_ops_v4)
2069 		return ff_layout_match_rw(task, task->tk_calldata, data);
2070 	if (ops == &ff_layout_commit_call_ops_v3 ||
2071 	    ops == &ff_layout_commit_call_ops_v4)
2072 		return ff_layout_match_commit(task, task->tk_calldata, data);
2073 	return false;
2074 }
2075 
ff_layout_cancel_io(struct pnfs_layout_segment * lseg)2076 static void ff_layout_cancel_io(struct pnfs_layout_segment *lseg)
2077 {
2078 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2079 	struct nfs4_ff_layout_mirror *mirror;
2080 	struct nfs4_ff_layout_ds *mirror_ds;
2081 	struct nfs4_pnfs_ds *ds;
2082 	struct nfs_client *ds_clp;
2083 	struct rpc_clnt *clnt;
2084 	u32 idx;
2085 
2086 	for (idx = 0; idx < flseg->mirror_array_cnt; idx++) {
2087 		mirror = flseg->mirror_array[idx];
2088 		mirror_ds = mirror->mirror_ds;
2089 		if (IS_ERR_OR_NULL(mirror_ds))
2090 			continue;
2091 		ds = mirror->mirror_ds->ds;
2092 		if (!ds)
2093 			continue;
2094 		ds_clp = ds->ds_clp;
2095 		if (!ds_clp)
2096 			continue;
2097 		clnt = ds_clp->cl_rpcclient;
2098 		if (!clnt)
2099 			continue;
2100 		if (!rpc_cancel_tasks(clnt, -EAGAIN, ff_layout_match_io, lseg))
2101 			continue;
2102 		rpc_clnt_disconnect(clnt);
2103 	}
2104 }
2105 
2106 static struct pnfs_ds_commit_info *
ff_layout_get_ds_info(struct inode * inode)2107 ff_layout_get_ds_info(struct inode *inode)
2108 {
2109 	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
2110 
2111 	if (layout == NULL)
2112 		return NULL;
2113 
2114 	return &FF_LAYOUT_FROM_HDR(layout)->commit_info;
2115 }
2116 
2117 static void
ff_layout_setup_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)2118 ff_layout_setup_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2119 		struct pnfs_layout_segment *lseg)
2120 {
2121 	struct nfs4_ff_layout_segment *flseg = FF_LAYOUT_LSEG(lseg);
2122 	struct inode *inode = lseg->pls_layout->plh_inode;
2123 	struct pnfs_commit_array *array, *new;
2124 
2125 	new = pnfs_alloc_commit_array(flseg->mirror_array_cnt,
2126 				      nfs_io_gfp_mask());
2127 	if (new) {
2128 		spin_lock(&inode->i_lock);
2129 		array = pnfs_add_commit_array(fl_cinfo, new, lseg);
2130 		spin_unlock(&inode->i_lock);
2131 		if (array != new)
2132 			pnfs_free_commit_array(new);
2133 	}
2134 }
2135 
2136 static void
ff_layout_release_ds_info(struct pnfs_ds_commit_info * fl_cinfo,struct inode * inode)2137 ff_layout_release_ds_info(struct pnfs_ds_commit_info *fl_cinfo,
2138 		struct inode *inode)
2139 {
2140 	spin_lock(&inode->i_lock);
2141 	pnfs_generic_ds_cinfo_destroy(fl_cinfo);
2142 	spin_unlock(&inode->i_lock);
2143 }
2144 
2145 static void
ff_layout_free_deviceid_node(struct nfs4_deviceid_node * d)2146 ff_layout_free_deviceid_node(struct nfs4_deviceid_node *d)
2147 {
2148 	nfs4_ff_layout_free_deviceid(container_of(d, struct nfs4_ff_layout_ds,
2149 						  id_node));
2150 }
2151 
ff_layout_encode_ioerr(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,const struct nfs4_flexfile_layoutreturn_args * ff_args)2152 static int ff_layout_encode_ioerr(struct xdr_stream *xdr,
2153 				  const struct nfs4_layoutreturn_args *args,
2154 				  const struct nfs4_flexfile_layoutreturn_args *ff_args)
2155 {
2156 	__be32 *start;
2157 
2158 	start = xdr_reserve_space(xdr, 4);
2159 	if (unlikely(!start))
2160 		return -E2BIG;
2161 
2162 	*start = cpu_to_be32(ff_args->num_errors);
2163 	/* This assume we always return _ALL_ layouts */
2164 	return ff_layout_encode_ds_ioerr(xdr, &ff_args->errors);
2165 }
2166 
2167 static void
ff_layout_encode_ff_iostat_head(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2168 ff_layout_encode_ff_iostat_head(struct xdr_stream *xdr,
2169 			    const nfs4_stateid *stateid,
2170 			    const struct nfs42_layoutstat_devinfo *devinfo)
2171 {
2172 	__be32 *p;
2173 
2174 	p = xdr_reserve_space(xdr, 8 + 8);
2175 	p = xdr_encode_hyper(p, devinfo->offset);
2176 	p = xdr_encode_hyper(p, devinfo->length);
2177 	encode_opaque_fixed(xdr, stateid->data, NFS4_STATEID_SIZE);
2178 	p = xdr_reserve_space(xdr, 4*8);
2179 	p = xdr_encode_hyper(p, devinfo->read_count);
2180 	p = xdr_encode_hyper(p, devinfo->read_bytes);
2181 	p = xdr_encode_hyper(p, devinfo->write_count);
2182 	p = xdr_encode_hyper(p, devinfo->write_bytes);
2183 	encode_opaque_fixed(xdr, devinfo->dev_id.data, NFS4_DEVICEID4_SIZE);
2184 }
2185 
2186 static void
ff_layout_encode_ff_iostat(struct xdr_stream * xdr,const nfs4_stateid * stateid,const struct nfs42_layoutstat_devinfo * devinfo)2187 ff_layout_encode_ff_iostat(struct xdr_stream *xdr,
2188 			    const nfs4_stateid *stateid,
2189 			    const struct nfs42_layoutstat_devinfo *devinfo)
2190 {
2191 	ff_layout_encode_ff_iostat_head(xdr, stateid, devinfo);
2192 	ff_layout_encode_ff_layoutupdate(xdr, devinfo,
2193 			devinfo->ld_private.data);
2194 }
2195 
2196 /* report nothing for now */
ff_layout_encode_iostats_array(struct xdr_stream * xdr,const struct nfs4_layoutreturn_args * args,struct nfs4_flexfile_layoutreturn_args * ff_args)2197 static void ff_layout_encode_iostats_array(struct xdr_stream *xdr,
2198 		const struct nfs4_layoutreturn_args *args,
2199 		struct nfs4_flexfile_layoutreturn_args *ff_args)
2200 {
2201 	__be32 *p;
2202 	int i;
2203 
2204 	p = xdr_reserve_space(xdr, 4);
2205 	*p = cpu_to_be32(ff_args->num_dev);
2206 	for (i = 0; i < ff_args->num_dev; i++)
2207 		ff_layout_encode_ff_iostat(xdr,
2208 				&args->layout->plh_stateid,
2209 				&ff_args->devinfo[i]);
2210 }
2211 
2212 static void
ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo * devinfo,unsigned int num_entries)2213 ff_layout_free_iostats_array(struct nfs42_layoutstat_devinfo *devinfo,
2214 		unsigned int num_entries)
2215 {
2216 	unsigned int i;
2217 
2218 	for (i = 0; i < num_entries; i++) {
2219 		if (!devinfo[i].ld_private.ops)
2220 			continue;
2221 		if (!devinfo[i].ld_private.ops->free)
2222 			continue;
2223 		devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
2224 	}
2225 }
2226 
2227 static struct nfs4_deviceid_node *
ff_layout_alloc_deviceid_node(struct nfs_server * server,struct pnfs_device * pdev,gfp_t gfp_flags)2228 ff_layout_alloc_deviceid_node(struct nfs_server *server,
2229 			      struct pnfs_device *pdev, gfp_t gfp_flags)
2230 {
2231 	struct nfs4_ff_layout_ds *dsaddr;
2232 
2233 	dsaddr = nfs4_ff_alloc_deviceid_node(server, pdev, gfp_flags);
2234 	if (!dsaddr)
2235 		return NULL;
2236 	return &dsaddr->id_node;
2237 }
2238 
2239 static void
ff_layout_encode_layoutreturn(struct xdr_stream * xdr,const void * voidargs,const struct nfs4_xdr_opaque_data * ff_opaque)2240 ff_layout_encode_layoutreturn(struct xdr_stream *xdr,
2241 		const void *voidargs,
2242 		const struct nfs4_xdr_opaque_data *ff_opaque)
2243 {
2244 	const struct nfs4_layoutreturn_args *args = voidargs;
2245 	struct nfs4_flexfile_layoutreturn_args *ff_args = ff_opaque->data;
2246 	struct xdr_buf tmp_buf = {
2247 		.head = {
2248 			[0] = {
2249 				.iov_base = page_address(ff_args->pages[0]),
2250 			},
2251 		},
2252 		.buflen = PAGE_SIZE,
2253 	};
2254 	struct xdr_stream tmp_xdr;
2255 	__be32 *start;
2256 
2257 	dprintk("%s: Begin\n", __func__);
2258 
2259 	xdr_init_encode(&tmp_xdr, &tmp_buf, NULL, NULL);
2260 
2261 	ff_layout_encode_ioerr(&tmp_xdr, args, ff_args);
2262 	ff_layout_encode_iostats_array(&tmp_xdr, args, ff_args);
2263 
2264 	start = xdr_reserve_space(xdr, 4);
2265 	*start = cpu_to_be32(tmp_buf.len);
2266 	xdr_write_pages(xdr, ff_args->pages, 0, tmp_buf.len);
2267 
2268 	dprintk("%s: Return\n", __func__);
2269 }
2270 
2271 static void
ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data * args)2272 ff_layout_free_layoutreturn(struct nfs4_xdr_opaque_data *args)
2273 {
2274 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2275 
2276 	if (!args->data)
2277 		return;
2278 	ff_args = args->data;
2279 	args->data = NULL;
2280 
2281 	ff_layout_free_ds_ioerr(&ff_args->errors);
2282 	ff_layout_free_iostats_array(ff_args->devinfo, ff_args->num_dev);
2283 
2284 	put_page(ff_args->pages[0]);
2285 	kfree(ff_args);
2286 }
2287 
2288 static const struct nfs4_xdr_opaque_ops layoutreturn_ops = {
2289 	.encode = ff_layout_encode_layoutreturn,
2290 	.free = ff_layout_free_layoutreturn,
2291 };
2292 
2293 static int
ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args * args)2294 ff_layout_prepare_layoutreturn(struct nfs4_layoutreturn_args *args)
2295 {
2296 	struct nfs4_flexfile_layoutreturn_args *ff_args;
2297 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(args->layout);
2298 
2299 	ff_args = kmalloc(sizeof(*ff_args), nfs_io_gfp_mask());
2300 	if (!ff_args)
2301 		goto out_nomem;
2302 	ff_args->pages[0] = alloc_page(nfs_io_gfp_mask());
2303 	if (!ff_args->pages[0])
2304 		goto out_nomem_free;
2305 
2306 	INIT_LIST_HEAD(&ff_args->errors);
2307 	ff_args->num_errors = ff_layout_fetch_ds_ioerr(args->layout,
2308 			&args->range, &ff_args->errors,
2309 			FF_LAYOUTRETURN_MAXERR);
2310 
2311 	spin_lock(&args->inode->i_lock);
2312 	ff_args->num_dev = ff_layout_mirror_prepare_stats(
2313 		&ff_layout->generic_hdr, &ff_args->devinfo[0],
2314 		ARRAY_SIZE(ff_args->devinfo), NFS4_FF_OP_LAYOUTRETURN);
2315 	spin_unlock(&args->inode->i_lock);
2316 
2317 	args->ld_private->ops = &layoutreturn_ops;
2318 	args->ld_private->data = ff_args;
2319 	return 0;
2320 out_nomem_free:
2321 	kfree(ff_args);
2322 out_nomem:
2323 	return -ENOMEM;
2324 }
2325 
2326 #ifdef CONFIG_NFS_V4_2
2327 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2328 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2329 {
2330 	struct pnfs_layout_hdr *lo = lseg->pls_layout;
2331 	struct nfs42_layout_error *errors;
2332 	LIST_HEAD(head);
2333 
2334 	if (!nfs_server_capable(lo->plh_inode, NFS_CAP_LAYOUTERROR))
2335 		return;
2336 	ff_layout_fetch_ds_ioerr(lo, &lseg->pls_range, &head, -1);
2337 	if (list_empty(&head))
2338 		return;
2339 
2340 	errors = kmalloc_array(NFS42_LAYOUTERROR_MAX, sizeof(*errors),
2341 			       nfs_io_gfp_mask());
2342 	if (errors != NULL) {
2343 		const struct nfs4_ff_layout_ds_err *pos;
2344 		size_t n = 0;
2345 
2346 		list_for_each_entry(pos, &head, list) {
2347 			errors[n].offset = pos->offset;
2348 			errors[n].length = pos->length;
2349 			nfs4_stateid_copy(&errors[n].stateid, &pos->stateid);
2350 			errors[n].errors[0].dev_id = pos->deviceid;
2351 			errors[n].errors[0].status = pos->status;
2352 			errors[n].errors[0].opnum = pos->opnum;
2353 			n++;
2354 			if (!list_is_last(&pos->list, &head) &&
2355 			    n < NFS42_LAYOUTERROR_MAX)
2356 				continue;
2357 			if (nfs42_proc_layouterror(lseg, errors, n) < 0)
2358 				break;
2359 			n = 0;
2360 		}
2361 		kfree(errors);
2362 	}
2363 	ff_layout_free_ds_ioerr(&head);
2364 }
2365 #else
2366 void
ff_layout_send_layouterror(struct pnfs_layout_segment * lseg)2367 ff_layout_send_layouterror(struct pnfs_layout_segment *lseg)
2368 {
2369 }
2370 #endif
2371 
2372 static int
ff_layout_ntop4(const struct sockaddr * sap,char * buf,const size_t buflen)2373 ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen)
2374 {
2375 	const struct sockaddr_in *sin = (struct sockaddr_in *)sap;
2376 
2377 	return snprintf(buf, buflen, "%pI4", &sin->sin_addr);
2378 }
2379 
2380 static size_t
ff_layout_ntop6_noscopeid(const struct sockaddr * sap,char * buf,const int buflen)2381 ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf,
2382 			  const int buflen)
2383 {
2384 	const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
2385 	const struct in6_addr *addr = &sin6->sin6_addr;
2386 
2387 	/*
2388 	 * RFC 4291, Section 2.2.2
2389 	 *
2390 	 * Shorthanded ANY address
2391 	 */
2392 	if (ipv6_addr_any(addr))
2393 		return snprintf(buf, buflen, "::");
2394 
2395 	/*
2396 	 * RFC 4291, Section 2.2.2
2397 	 *
2398 	 * Shorthanded loopback address
2399 	 */
2400 	if (ipv6_addr_loopback(addr))
2401 		return snprintf(buf, buflen, "::1");
2402 
2403 	/*
2404 	 * RFC 4291, Section 2.2.3
2405 	 *
2406 	 * Special presentation address format for mapped v4
2407 	 * addresses.
2408 	 */
2409 	if (ipv6_addr_v4mapped(addr))
2410 		return snprintf(buf, buflen, "::ffff:%pI4",
2411 					&addr->s6_addr32[3]);
2412 
2413 	/*
2414 	 * RFC 4291, Section 2.2.1
2415 	 */
2416 	return snprintf(buf, buflen, "%pI6c", addr);
2417 }
2418 
2419 /* Derived from rpc_sockaddr2uaddr */
2420 static void
ff_layout_encode_netaddr(struct xdr_stream * xdr,struct nfs4_pnfs_ds_addr * da)2421 ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da)
2422 {
2423 	struct sockaddr *sap = (struct sockaddr *)&da->da_addr;
2424 	char portbuf[RPCBIND_MAXUADDRPLEN];
2425 	char addrbuf[RPCBIND_MAXUADDRLEN];
2426 	unsigned short port;
2427 	int len, netid_len;
2428 	__be32 *p;
2429 
2430 	switch (sap->sa_family) {
2431 	case AF_INET:
2432 		if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0)
2433 			return;
2434 		port = ntohs(((struct sockaddr_in *)sap)->sin_port);
2435 		break;
2436 	case AF_INET6:
2437 		if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0)
2438 			return;
2439 		port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
2440 		break;
2441 	default:
2442 		WARN_ON_ONCE(1);
2443 		return;
2444 	}
2445 
2446 	snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff);
2447 	len = strlcat(addrbuf, portbuf, sizeof(addrbuf));
2448 
2449 	netid_len = strlen(da->da_netid);
2450 	p = xdr_reserve_space(xdr, 4 + netid_len);
2451 	xdr_encode_opaque(p, da->da_netid, netid_len);
2452 
2453 	p = xdr_reserve_space(xdr, 4 + len);
2454 	xdr_encode_opaque(p, addrbuf, len);
2455 }
2456 
2457 static void
ff_layout_encode_nfstime(struct xdr_stream * xdr,ktime_t t)2458 ff_layout_encode_nfstime(struct xdr_stream *xdr,
2459 			 ktime_t t)
2460 {
2461 	struct timespec64 ts;
2462 	__be32 *p;
2463 
2464 	p = xdr_reserve_space(xdr, 12);
2465 	ts = ktime_to_timespec64(t);
2466 	p = xdr_encode_hyper(p, ts.tv_sec);
2467 	*p++ = cpu_to_be32(ts.tv_nsec);
2468 }
2469 
2470 static void
ff_layout_encode_io_latency(struct xdr_stream * xdr,struct nfs4_ff_io_stat * stat)2471 ff_layout_encode_io_latency(struct xdr_stream *xdr,
2472 			    struct nfs4_ff_io_stat *stat)
2473 {
2474 	__be32 *p;
2475 
2476 	p = xdr_reserve_space(xdr, 5 * 8);
2477 	p = xdr_encode_hyper(p, stat->ops_requested);
2478 	p = xdr_encode_hyper(p, stat->bytes_requested);
2479 	p = xdr_encode_hyper(p, stat->ops_completed);
2480 	p = xdr_encode_hyper(p, stat->bytes_completed);
2481 	p = xdr_encode_hyper(p, stat->bytes_not_delivered);
2482 	ff_layout_encode_nfstime(xdr, stat->total_busy_time);
2483 	ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time);
2484 }
2485 
2486 static void
ff_layout_encode_ff_layoutupdate(struct xdr_stream * xdr,const struct nfs42_layoutstat_devinfo * devinfo,struct nfs4_ff_layout_mirror * mirror)2487 ff_layout_encode_ff_layoutupdate(struct xdr_stream *xdr,
2488 			      const struct nfs42_layoutstat_devinfo *devinfo,
2489 			      struct nfs4_ff_layout_mirror *mirror)
2490 {
2491 	struct nfs4_pnfs_ds_addr *da;
2492 	struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds;
2493 	struct nfs_fh *fh = &mirror->fh_versions[0];
2494 	__be32 *p;
2495 
2496 	da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node);
2497 	dprintk("%s: DS %s: encoding address %s\n",
2498 		__func__, ds->ds_remotestr, da->da_remotestr);
2499 	/* netaddr4 */
2500 	ff_layout_encode_netaddr(xdr, da);
2501 	/* nfs_fh4 */
2502 	p = xdr_reserve_space(xdr, 4 + fh->size);
2503 	xdr_encode_opaque(p, fh->data, fh->size);
2504 	/* ff_io_latency4 read */
2505 	spin_lock(&mirror->lock);
2506 	ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat);
2507 	/* ff_io_latency4 write */
2508 	ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat);
2509 	spin_unlock(&mirror->lock);
2510 	/* nfstime4 */
2511 	ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time));
2512 	/* bool */
2513 	p = xdr_reserve_space(xdr, 4);
2514 	*p = cpu_to_be32(false);
2515 }
2516 
2517 static void
ff_layout_encode_layoutstats(struct xdr_stream * xdr,const void * args,const struct nfs4_xdr_opaque_data * opaque)2518 ff_layout_encode_layoutstats(struct xdr_stream *xdr, const void *args,
2519 			     const struct nfs4_xdr_opaque_data *opaque)
2520 {
2521 	struct nfs42_layoutstat_devinfo *devinfo = container_of(opaque,
2522 			struct nfs42_layoutstat_devinfo, ld_private);
2523 	__be32 *start;
2524 
2525 	/* layoutupdate length */
2526 	start = xdr_reserve_space(xdr, 4);
2527 	ff_layout_encode_ff_layoutupdate(xdr, devinfo, opaque->data);
2528 
2529 	*start = cpu_to_be32((xdr->p - start - 1) * 4);
2530 }
2531 
2532 static void
ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data * opaque)2533 ff_layout_free_layoutstats(struct nfs4_xdr_opaque_data *opaque)
2534 {
2535 	struct nfs4_ff_layout_mirror *mirror = opaque->data;
2536 
2537 	ff_layout_put_mirror(mirror);
2538 }
2539 
2540 static const struct nfs4_xdr_opaque_ops layoutstat_ops = {
2541 	.encode = ff_layout_encode_layoutstats,
2542 	.free	= ff_layout_free_layoutstats,
2543 };
2544 
2545 static int
ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr * lo,struct nfs42_layoutstat_devinfo * devinfo,int dev_limit,enum nfs4_ff_op_type type)2546 ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
2547 			       struct nfs42_layoutstat_devinfo *devinfo,
2548 			       int dev_limit, enum nfs4_ff_op_type type)
2549 {
2550 	struct nfs4_flexfile_layout *ff_layout = FF_LAYOUT_FROM_HDR(lo);
2551 	struct nfs4_ff_layout_mirror *mirror;
2552 	struct nfs4_deviceid_node *dev;
2553 	int i = 0;
2554 
2555 	list_for_each_entry(mirror, &ff_layout->mirrors, mirrors) {
2556 		if (i >= dev_limit)
2557 			break;
2558 		if (IS_ERR_OR_NULL(mirror->mirror_ds))
2559 			continue;
2560 		if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL,
2561 					&mirror->flags) &&
2562 		    type != NFS4_FF_OP_LAYOUTRETURN)
2563 			continue;
2564 		/* mirror refcount put in cleanup_layoutstats */
2565 		if (!refcount_inc_not_zero(&mirror->ref))
2566 			continue;
2567 		dev = &mirror->mirror_ds->id_node;
2568 		memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);
2569 		devinfo->offset = 0;
2570 		devinfo->length = NFS4_MAX_UINT64;
2571 		spin_lock(&mirror->lock);
2572 		devinfo->read_count = mirror->read_stat.io_stat.ops_completed;
2573 		devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed;
2574 		devinfo->write_count = mirror->write_stat.io_stat.ops_completed;
2575 		devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed;
2576 		spin_unlock(&mirror->lock);
2577 		devinfo->layout_type = LAYOUT_FLEX_FILES;
2578 		devinfo->ld_private.ops = &layoutstat_ops;
2579 		devinfo->ld_private.data = mirror;
2580 
2581 		devinfo++;
2582 		i++;
2583 	}
2584 	return i;
2585 }
2586 
ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args * args)2587 static int ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args)
2588 {
2589 	struct pnfs_layout_hdr *lo;
2590 	struct nfs4_flexfile_layout *ff_layout;
2591 	const int dev_count = PNFS_LAYOUTSTATS_MAXDEV;
2592 
2593 	/* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */
2594 	args->devinfo = kmalloc_array(dev_count, sizeof(*args->devinfo),
2595 				      nfs_io_gfp_mask());
2596 	if (!args->devinfo)
2597 		return -ENOMEM;
2598 
2599 	spin_lock(&args->inode->i_lock);
2600 	lo = NFS_I(args->inode)->layout;
2601 	if (lo && pnfs_layout_is_valid(lo)) {
2602 		ff_layout = FF_LAYOUT_FROM_HDR(lo);
2603 		args->num_dev = ff_layout_mirror_prepare_stats(
2604 			&ff_layout->generic_hdr, &args->devinfo[0], dev_count,
2605 			NFS4_FF_OP_LAYOUTSTATS);
2606 	} else
2607 		args->num_dev = 0;
2608 	spin_unlock(&args->inode->i_lock);
2609 	if (!args->num_dev) {
2610 		kfree(args->devinfo);
2611 		args->devinfo = NULL;
2612 		return -ENOENT;
2613 	}
2614 
2615 	return 0;
2616 }
2617 
2618 static int
ff_layout_set_layoutdriver(struct nfs_server * server,const struct nfs_fh * dummy)2619 ff_layout_set_layoutdriver(struct nfs_server *server,
2620 		const struct nfs_fh *dummy)
2621 {
2622 #if IS_ENABLED(CONFIG_NFS_V4_2)
2623 	server->caps |= NFS_CAP_LAYOUTSTATS | NFS_CAP_REBOOT_LAYOUTRETURN;
2624 #endif
2625 	return 0;
2626 }
2627 
2628 static const struct pnfs_commit_ops ff_layout_commit_ops = {
2629 	.setup_ds_info		= ff_layout_setup_ds_info,
2630 	.release_ds_info	= ff_layout_release_ds_info,
2631 	.mark_request_commit	= pnfs_layout_mark_request_commit,
2632 	.clear_request_commit	= pnfs_generic_clear_request_commit,
2633 	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
2634 	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
2635 	.commit_pagelist	= ff_layout_commit_pagelist,
2636 };
2637 
2638 static struct pnfs_layoutdriver_type flexfilelayout_type = {
2639 	.id			= LAYOUT_FLEX_FILES,
2640 	.name			= "LAYOUT_FLEX_FILES",
2641 	.owner			= THIS_MODULE,
2642 	.flags			= PNFS_LAYOUTGET_ON_OPEN,
2643 	.max_layoutget_response	= 4096, /* 1 page or so... */
2644 	.set_layoutdriver	= ff_layout_set_layoutdriver,
2645 	.alloc_layout_hdr	= ff_layout_alloc_layout_hdr,
2646 	.free_layout_hdr	= ff_layout_free_layout_hdr,
2647 	.alloc_lseg		= ff_layout_alloc_lseg,
2648 	.free_lseg		= ff_layout_free_lseg,
2649 	.add_lseg		= ff_layout_add_lseg,
2650 	.pg_read_ops		= &ff_layout_pg_read_ops,
2651 	.pg_write_ops		= &ff_layout_pg_write_ops,
2652 	.get_ds_info		= ff_layout_get_ds_info,
2653 	.free_deviceid_node	= ff_layout_free_deviceid_node,
2654 	.read_pagelist		= ff_layout_read_pagelist,
2655 	.write_pagelist		= ff_layout_write_pagelist,
2656 	.alloc_deviceid_node    = ff_layout_alloc_deviceid_node,
2657 	.prepare_layoutreturn   = ff_layout_prepare_layoutreturn,
2658 	.sync			= pnfs_nfs_generic_sync,
2659 	.prepare_layoutstats	= ff_layout_prepare_layoutstats,
2660 	.cancel_io		= ff_layout_cancel_io,
2661 };
2662 
nfs4flexfilelayout_init(void)2663 static int __init nfs4flexfilelayout_init(void)
2664 {
2665 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Registering...\n",
2666 	       __func__);
2667 	return pnfs_register_layoutdriver(&flexfilelayout_type);
2668 }
2669 
nfs4flexfilelayout_exit(void)2670 static void __exit nfs4flexfilelayout_exit(void)
2671 {
2672 	printk(KERN_INFO "%s: NFSv4 Flexfile Layout Driver Unregistering...\n",
2673 	       __func__);
2674 	pnfs_unregister_layoutdriver(&flexfilelayout_type);
2675 }
2676 
2677 MODULE_ALIAS("nfs-layouttype4-4");
2678 
2679 MODULE_LICENSE("GPL");
2680 MODULE_DESCRIPTION("The NFSv4 flexfile layout driver");
2681 
2682 module_init(nfs4flexfilelayout_init);
2683 module_exit(nfs4flexfilelayout_exit);
2684 
2685 module_param(io_maxretrans, ushort, 0644);
2686 MODULE_PARM_DESC(io_maxretrans, "The  number of times the NFSv4.1 client "
2687 			"retries an I/O request before returning an error. ");
2688