1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Common NFS I/O operations for the pnfs file based
4 * layout drivers.
5 *
6 * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
7 *
8 * Tom Haynes <loghyr@primarydata.com>
9 */
10
11 #include <linux/nfs_fs.h>
12 #include <linux/nfs_page.h>
13 #include <linux/sunrpc/addr.h>
14 #include <linux/module.h>
15
16 #include "nfs4session.h"
17 #include "internal.h"
18 #include "pnfs.h"
19 #include "netns.h"
20 #include "nfs4trace.h"
21
22 #define NFSDBG_FACILITY NFSDBG_PNFS
23
pnfs_generic_rw_release(void * data)24 void pnfs_generic_rw_release(void *data)
25 {
26 struct nfs_pgio_header *hdr = data;
27
28 nfs_put_client(hdr->ds_clp);
29 hdr->mds_ops->rpc_release(data);
30 }
31 EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
32
33 /* Fake up some data that will cause nfs_commit_release to retry the writes. */
pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data * data)34 void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
35 {
36 struct nfs_writeverf *verf = data->res.verf;
37
38 data->task.tk_status = 0;
39 memset(&verf->verifier, 0, sizeof(verf->verifier));
40 verf->committed = NFS_UNSTABLE;
41 }
42 EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
43
pnfs_generic_write_commit_done(struct rpc_task * task,void * data)44 void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
45 {
46 struct nfs_commit_data *wdata = data;
47
48 /* Note this may cause RPC to be resent */
49 wdata->mds_ops->rpc_call_done(task, data);
50 }
51 EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
52
pnfs_generic_commit_release(void * calldata)53 void pnfs_generic_commit_release(void *calldata)
54 {
55 struct nfs_commit_data *data = calldata;
56
57 data->completion_ops->completion(data);
58 pnfs_put_lseg(data->lseg);
59 nfs_put_client(data->ds_clp);
60 nfs_commitdata_release(data);
61 }
62 EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
63
64 static struct pnfs_layout_segment *
pnfs_free_bucket_lseg(struct pnfs_commit_bucket * bucket)65 pnfs_free_bucket_lseg(struct pnfs_commit_bucket *bucket)
66 {
67 if (list_empty(&bucket->committing) && list_empty(&bucket->written)) {
68 struct pnfs_layout_segment *freeme = bucket->lseg;
69 bucket->lseg = NULL;
70 return freeme;
71 }
72 return NULL;
73 }
74
75 /* The generic layer is about to remove the req from the commit list.
76 * If this will make the bucket empty, it will need to put the lseg reference.
77 * Note this must be called holding nfsi->commit_mutex
78 */
79 void
pnfs_generic_clear_request_commit(struct nfs_page * req,struct nfs_commit_info * cinfo)80 pnfs_generic_clear_request_commit(struct nfs_page *req,
81 struct nfs_commit_info *cinfo)
82 {
83 struct pnfs_commit_bucket *bucket = NULL;
84
85 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
86 goto out;
87 cinfo->ds->nwritten--;
88 if (list_is_singular(&req->wb_list))
89 bucket = list_first_entry(&req->wb_list,
90 struct pnfs_commit_bucket, written);
91 out:
92 nfs_request_remove_commit_list(req, cinfo);
93 if (bucket)
94 pnfs_put_lseg(pnfs_free_bucket_lseg(bucket));
95 }
96 EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
97
98 struct pnfs_commit_array *
pnfs_alloc_commit_array(size_t n,gfp_t gfp_flags)99 pnfs_alloc_commit_array(size_t n, gfp_t gfp_flags)
100 {
101 struct pnfs_commit_array *p;
102 struct pnfs_commit_bucket *b;
103
104 p = kmalloc(struct_size(p, buckets, n), gfp_flags);
105 if (!p)
106 return NULL;
107 p->nbuckets = n;
108 INIT_LIST_HEAD(&p->cinfo_list);
109 INIT_LIST_HEAD(&p->lseg_list);
110 p->lseg = NULL;
111 for (b = &p->buckets[0]; n != 0; b++, n--) {
112 INIT_LIST_HEAD(&b->written);
113 INIT_LIST_HEAD(&b->committing);
114 b->lseg = NULL;
115 b->direct_verf.committed = NFS_INVALID_STABLE_HOW;
116 }
117 return p;
118 }
119 EXPORT_SYMBOL_GPL(pnfs_alloc_commit_array);
120
121 void
pnfs_free_commit_array(struct pnfs_commit_array * p)122 pnfs_free_commit_array(struct pnfs_commit_array *p)
123 {
124 kfree_rcu(p, rcu);
125 }
126 EXPORT_SYMBOL_GPL(pnfs_free_commit_array);
127
128 static struct pnfs_commit_array *
pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)129 pnfs_find_commit_array_by_lseg(struct pnfs_ds_commit_info *fl_cinfo,
130 struct pnfs_layout_segment *lseg)
131 {
132 struct pnfs_commit_array *array;
133
134 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
135 if (array->lseg == lseg)
136 return array;
137 }
138 return NULL;
139 }
140
141 struct pnfs_commit_array *
pnfs_add_commit_array(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_commit_array * new,struct pnfs_layout_segment * lseg)142 pnfs_add_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
143 struct pnfs_commit_array *new,
144 struct pnfs_layout_segment *lseg)
145 {
146 struct pnfs_commit_array *array;
147
148 array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
149 if (array)
150 return array;
151 new->lseg = lseg;
152 refcount_set(&new->refcount, 1);
153 list_add_rcu(&new->cinfo_list, &fl_cinfo->commits);
154 list_add(&new->lseg_list, &lseg->pls_commits);
155 return new;
156 }
157 EXPORT_SYMBOL_GPL(pnfs_add_commit_array);
158
159 static struct pnfs_commit_array *
pnfs_lookup_commit_array(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)160 pnfs_lookup_commit_array(struct pnfs_ds_commit_info *fl_cinfo,
161 struct pnfs_layout_segment *lseg)
162 {
163 struct pnfs_commit_array *array;
164
165 rcu_read_lock();
166 array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
167 if (!array) {
168 rcu_read_unlock();
169 fl_cinfo->ops->setup_ds_info(fl_cinfo, lseg);
170 rcu_read_lock();
171 array = pnfs_find_commit_array_by_lseg(fl_cinfo, lseg);
172 }
173 rcu_read_unlock();
174 return array;
175 }
176
177 static void
pnfs_release_commit_array_locked(struct pnfs_commit_array * array)178 pnfs_release_commit_array_locked(struct pnfs_commit_array *array)
179 {
180 list_del_rcu(&array->cinfo_list);
181 list_del(&array->lseg_list);
182 pnfs_free_commit_array(array);
183 }
184
185 static void
pnfs_put_commit_array_locked(struct pnfs_commit_array * array)186 pnfs_put_commit_array_locked(struct pnfs_commit_array *array)
187 {
188 if (refcount_dec_and_test(&array->refcount))
189 pnfs_release_commit_array_locked(array);
190 }
191
192 static void
pnfs_put_commit_array(struct pnfs_commit_array * array,struct inode * inode)193 pnfs_put_commit_array(struct pnfs_commit_array *array, struct inode *inode)
194 {
195 if (refcount_dec_and_lock(&array->refcount, &inode->i_lock)) {
196 pnfs_release_commit_array_locked(array);
197 spin_unlock(&inode->i_lock);
198 }
199 }
200
201 static struct pnfs_commit_array *
pnfs_get_commit_array(struct pnfs_commit_array * array)202 pnfs_get_commit_array(struct pnfs_commit_array *array)
203 {
204 if (refcount_inc_not_zero(&array->refcount))
205 return array;
206 return NULL;
207 }
208
209 static void
pnfs_remove_and_free_commit_array(struct pnfs_commit_array * array)210 pnfs_remove_and_free_commit_array(struct pnfs_commit_array *array)
211 {
212 array->lseg = NULL;
213 list_del_init(&array->lseg_list);
214 pnfs_put_commit_array_locked(array);
215 }
216
217 void
pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info * fl_cinfo,struct pnfs_layout_segment * lseg)218 pnfs_generic_ds_cinfo_release_lseg(struct pnfs_ds_commit_info *fl_cinfo,
219 struct pnfs_layout_segment *lseg)
220 {
221 struct pnfs_commit_array *array, *tmp;
222
223 list_for_each_entry_safe(array, tmp, &lseg->pls_commits, lseg_list)
224 pnfs_remove_and_free_commit_array(array);
225 }
226 EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_release_lseg);
227
228 void
pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info * fl_cinfo)229 pnfs_generic_ds_cinfo_destroy(struct pnfs_ds_commit_info *fl_cinfo)
230 {
231 struct pnfs_commit_array *array, *tmp;
232
233 list_for_each_entry_safe(array, tmp, &fl_cinfo->commits, cinfo_list)
234 pnfs_remove_and_free_commit_array(array);
235 }
236 EXPORT_SYMBOL_GPL(pnfs_generic_ds_cinfo_destroy);
237
238 /*
239 * Locks the nfs_page requests for commit and moves them to
240 * @bucket->committing.
241 */
242 static int
pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket * bucket,struct nfs_commit_info * cinfo,int max)243 pnfs_bucket_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
244 struct nfs_commit_info *cinfo,
245 int max)
246 {
247 struct list_head *src = &bucket->written;
248 struct list_head *dst = &bucket->committing;
249 int ret;
250
251 lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
252 ret = nfs_scan_commit_list(src, dst, cinfo, max);
253 if (ret) {
254 cinfo->ds->nwritten -= ret;
255 cinfo->ds->ncommitting += ret;
256 }
257 return ret;
258 }
259
pnfs_bucket_scan_array(struct nfs_commit_info * cinfo,struct pnfs_commit_bucket * buckets,unsigned int nbuckets,int max)260 static int pnfs_bucket_scan_array(struct nfs_commit_info *cinfo,
261 struct pnfs_commit_bucket *buckets,
262 unsigned int nbuckets,
263 int max)
264 {
265 unsigned int i;
266 int rv = 0, cnt;
267
268 for (i = 0; i < nbuckets && max != 0; i++) {
269 cnt = pnfs_bucket_scan_ds_commit_list(&buckets[i], cinfo, max);
270 rv += cnt;
271 max -= cnt;
272 }
273 return rv;
274 }
275
276 /* Move reqs from written to committing lists, returning count
277 * of number moved.
278 */
pnfs_generic_scan_commit_lists(struct nfs_commit_info * cinfo,int max)279 int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo, int max)
280 {
281 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
282 struct pnfs_commit_array *array;
283 int rv = 0, cnt;
284
285 rcu_read_lock();
286 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
287 if (!array->lseg || !pnfs_get_commit_array(array))
288 continue;
289 rcu_read_unlock();
290 cnt = pnfs_bucket_scan_array(cinfo, array->buckets,
291 array->nbuckets, max);
292 rcu_read_lock();
293 pnfs_put_commit_array(array, cinfo->inode);
294 rv += cnt;
295 max -= cnt;
296 if (!max)
297 break;
298 }
299 rcu_read_unlock();
300 return rv;
301 }
302 EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
303
304 static unsigned int
pnfs_bucket_recover_commit_reqs(struct list_head * dst,struct pnfs_commit_bucket * buckets,unsigned int nbuckets,struct nfs_commit_info * cinfo)305 pnfs_bucket_recover_commit_reqs(struct list_head *dst,
306 struct pnfs_commit_bucket *buckets,
307 unsigned int nbuckets,
308 struct nfs_commit_info *cinfo)
309 {
310 struct pnfs_commit_bucket *b;
311 struct pnfs_layout_segment *freeme;
312 unsigned int nwritten, ret = 0;
313 unsigned int i;
314
315 restart:
316 for (i = 0, b = buckets; i < nbuckets; i++, b++) {
317 nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
318 if (!nwritten)
319 continue;
320 ret += nwritten;
321 freeme = pnfs_free_bucket_lseg(b);
322 if (freeme) {
323 pnfs_put_lseg(freeme);
324 goto restart;
325 }
326 }
327 return ret;
328 }
329
330 /* Pull everything off the committing lists and dump into @dst. */
pnfs_generic_recover_commit_reqs(struct list_head * dst,struct nfs_commit_info * cinfo)331 void pnfs_generic_recover_commit_reqs(struct list_head *dst,
332 struct nfs_commit_info *cinfo)
333 {
334 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
335 struct pnfs_commit_array *array;
336 unsigned int nwritten;
337
338 lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
339 rcu_read_lock();
340 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
341 if (!array->lseg || !pnfs_get_commit_array(array))
342 continue;
343 rcu_read_unlock();
344 nwritten = pnfs_bucket_recover_commit_reqs(dst,
345 array->buckets,
346 array->nbuckets,
347 cinfo);
348 rcu_read_lock();
349 pnfs_put_commit_array(array, cinfo->inode);
350 fl_cinfo->nwritten -= nwritten;
351 }
352 rcu_read_unlock();
353 }
354 EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
355
356 static struct pnfs_layout_segment *
pnfs_bucket_get_committing(struct list_head * head,struct pnfs_commit_bucket * bucket,struct nfs_commit_info * cinfo)357 pnfs_bucket_get_committing(struct list_head *head,
358 struct pnfs_commit_bucket *bucket,
359 struct nfs_commit_info *cinfo)
360 {
361 struct pnfs_layout_segment *lseg;
362 struct list_head *pos;
363
364 list_for_each(pos, &bucket->committing)
365 cinfo->ds->ncommitting--;
366 list_splice_init(&bucket->committing, head);
367 lseg = pnfs_free_bucket_lseg(bucket);
368 if (!lseg)
369 lseg = pnfs_get_lseg(bucket->lseg);
370 return lseg;
371 }
372
373 static struct nfs_commit_data *
pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket * bucket,struct nfs_commit_info * cinfo)374 pnfs_bucket_fetch_commitdata(struct pnfs_commit_bucket *bucket,
375 struct nfs_commit_info *cinfo)
376 {
377 struct nfs_commit_data *data = nfs_commitdata_alloc();
378
379 if (!data)
380 return NULL;
381 data->lseg = pnfs_bucket_get_committing(&data->pages, bucket, cinfo);
382 return data;
383 }
384
pnfs_generic_retry_commit(struct pnfs_commit_bucket * buckets,unsigned int nbuckets,struct nfs_commit_info * cinfo,unsigned int idx)385 static void pnfs_generic_retry_commit(struct pnfs_commit_bucket *buckets,
386 unsigned int nbuckets,
387 struct nfs_commit_info *cinfo,
388 unsigned int idx)
389 {
390 struct pnfs_commit_bucket *bucket;
391 struct pnfs_layout_segment *freeme;
392 LIST_HEAD(pages);
393
394 for (bucket = buckets; idx < nbuckets; bucket++, idx++) {
395 if (list_empty(&bucket->committing))
396 continue;
397 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
398 freeme = pnfs_bucket_get_committing(&pages, bucket, cinfo);
399 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
400 nfs_retry_commit(&pages, freeme, cinfo, idx);
401 pnfs_put_lseg(freeme);
402 }
403 }
404
405 static unsigned int
pnfs_bucket_alloc_ds_commits(struct list_head * list,struct pnfs_commit_bucket * buckets,unsigned int nbuckets,struct nfs_commit_info * cinfo)406 pnfs_bucket_alloc_ds_commits(struct list_head *list,
407 struct pnfs_commit_bucket *buckets,
408 unsigned int nbuckets,
409 struct nfs_commit_info *cinfo)
410 {
411 struct pnfs_commit_bucket *bucket;
412 struct nfs_commit_data *data;
413 unsigned int i;
414 unsigned int nreq = 0;
415
416 for (i = 0, bucket = buckets; i < nbuckets; i++, bucket++) {
417 if (list_empty(&bucket->committing))
418 continue;
419 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
420 if (!list_empty(&bucket->committing)) {
421 data = pnfs_bucket_fetch_commitdata(bucket, cinfo);
422 if (!data)
423 goto out_error;
424 data->ds_commit_index = i;
425 list_add_tail(&data->list, list);
426 nreq++;
427 }
428 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
429 }
430 return nreq;
431 out_error:
432 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
433 /* Clean up on error */
434 pnfs_generic_retry_commit(buckets, nbuckets, cinfo, i);
435 return nreq;
436 }
437
438 static unsigned int
pnfs_alloc_ds_commits_list(struct list_head * list,struct pnfs_ds_commit_info * fl_cinfo,struct nfs_commit_info * cinfo)439 pnfs_alloc_ds_commits_list(struct list_head *list,
440 struct pnfs_ds_commit_info *fl_cinfo,
441 struct nfs_commit_info *cinfo)
442 {
443 struct pnfs_commit_array *array;
444 unsigned int ret = 0;
445
446 rcu_read_lock();
447 list_for_each_entry_rcu(array, &fl_cinfo->commits, cinfo_list) {
448 if (!array->lseg || !pnfs_get_commit_array(array))
449 continue;
450 rcu_read_unlock();
451 ret += pnfs_bucket_alloc_ds_commits(list, array->buckets,
452 array->nbuckets, cinfo);
453 rcu_read_lock();
454 pnfs_put_commit_array(array, cinfo->inode);
455 }
456 rcu_read_unlock();
457 return ret;
458 }
459
460 /* This follows nfs_commit_list pretty closely */
461 int
pnfs_generic_commit_pagelist(struct inode * inode,struct list_head * mds_pages,int how,struct nfs_commit_info * cinfo,int (* initiate_commit)(struct nfs_commit_data * data,int how))462 pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
463 int how, struct nfs_commit_info *cinfo,
464 int (*initiate_commit)(struct nfs_commit_data *data,
465 int how))
466 {
467 struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
468 struct nfs_commit_data *data, *tmp;
469 LIST_HEAD(list);
470 unsigned int nreq = 0;
471
472 if (!list_empty(mds_pages)) {
473 data = nfs_commitdata_alloc();
474 if (!data) {
475 nfs_retry_commit(mds_pages, NULL, cinfo, -1);
476 return -ENOMEM;
477 }
478 data->ds_commit_index = -1;
479 list_splice_init(mds_pages, &data->pages);
480 list_add_tail(&data->list, &list);
481 nreq++;
482 }
483
484 nreq += pnfs_alloc_ds_commits_list(&list, fl_cinfo, cinfo);
485 if (nreq == 0)
486 goto out;
487
488 list_for_each_entry_safe(data, tmp, &list, list) {
489 list_del(&data->list);
490 if (data->ds_commit_index < 0) {
491 nfs_init_commit(data, NULL, NULL, cinfo);
492 nfs_initiate_commit(NFS_CLIENT(inode), data,
493 NFS_PROTO(data->inode),
494 data->mds_ops, how,
495 RPC_TASK_CRED_NOREF, NULL);
496 } else {
497 nfs_init_commit(data, NULL, data->lseg, cinfo);
498 initiate_commit(data, how);
499 }
500 }
501 out:
502 return PNFS_ATTEMPTED;
503 }
504 EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
505
506 /*
507 * Data server cache
508 *
509 * Data servers can be mapped to different device ids, but should
510 * never be shared between net namespaces.
511 *
512 * nfs4_pnfs_ds reference counting:
513 * - set to 1 on allocation
514 * - incremented when a device id maps a data server already in the cache.
515 * - decremented when deviceid is removed from the cache.
516 */
517
518 /* Debug routines */
519 static void
print_ds(struct nfs4_pnfs_ds * ds)520 print_ds(struct nfs4_pnfs_ds *ds)
521 {
522 if (ds == NULL) {
523 printk(KERN_WARNING "%s NULL device\n", __func__);
524 return;
525 }
526 printk(KERN_WARNING " ds %s\n"
527 " ref count %d\n"
528 " client %p\n"
529 " cl_exchange_flags %x\n",
530 ds->ds_remotestr,
531 refcount_read(&ds->ds_count), ds->ds_clp,
532 ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
533 }
534
535 static bool
same_sockaddr(struct sockaddr * addr1,struct sockaddr * addr2)536 same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
537 {
538 struct sockaddr_in *a, *b;
539 struct sockaddr_in6 *a6, *b6;
540
541 if (addr1->sa_family != addr2->sa_family)
542 return false;
543
544 switch (addr1->sa_family) {
545 case AF_INET:
546 a = (struct sockaddr_in *)addr1;
547 b = (struct sockaddr_in *)addr2;
548
549 if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
550 a->sin_port == b->sin_port)
551 return true;
552 break;
553
554 case AF_INET6:
555 a6 = (struct sockaddr_in6 *)addr1;
556 b6 = (struct sockaddr_in6 *)addr2;
557
558 /* LINKLOCAL addresses must have matching scope_id */
559 if (ipv6_addr_src_scope(&a6->sin6_addr) ==
560 IPV6_ADDR_SCOPE_LINKLOCAL &&
561 a6->sin6_scope_id != b6->sin6_scope_id)
562 return false;
563
564 if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
565 a6->sin6_port == b6->sin6_port)
566 return true;
567 break;
568
569 default:
570 dprintk("%s: unhandled address family: %u\n",
571 __func__, addr1->sa_family);
572 return false;
573 }
574
575 return false;
576 }
577
578 /*
579 * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
580 * declare a match.
581 */
582 static bool
_same_data_server_addrs_locked(const struct list_head * dsaddrs1,const struct list_head * dsaddrs2)583 _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
584 const struct list_head *dsaddrs2)
585 {
586 struct nfs4_pnfs_ds_addr *da1, *da2;
587 struct sockaddr *sa1, *sa2;
588 bool match = false;
589
590 list_for_each_entry(da1, dsaddrs1, da_node) {
591 sa1 = (struct sockaddr *)&da1->da_addr;
592 match = false;
593 list_for_each_entry(da2, dsaddrs2, da_node) {
594 sa2 = (struct sockaddr *)&da2->da_addr;
595 match = same_sockaddr(sa1, sa2);
596 if (match)
597 break;
598 }
599 if (!match)
600 break;
601 }
602 return match;
603 }
604
605 /*
606 * Lookup DS by addresses. nfs4_ds_cache_lock is held
607 */
608 static struct nfs4_pnfs_ds *
_data_server_lookup_locked(const struct nfs_net * nn,const struct list_head * dsaddrs)609 _data_server_lookup_locked(const struct nfs_net *nn, const struct list_head *dsaddrs)
610 {
611 struct nfs4_pnfs_ds *ds;
612
613 list_for_each_entry(ds, &nn->nfs4_data_server_cache, ds_node)
614 if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
615 return ds;
616 return NULL;
617 }
618
nfs4_pnfs_ds_addr_alloc(gfp_t gfp_flags)619 static struct nfs4_pnfs_ds_addr *nfs4_pnfs_ds_addr_alloc(gfp_t gfp_flags)
620 {
621 struct nfs4_pnfs_ds_addr *da = kzalloc(sizeof(*da), gfp_flags);
622 if (da)
623 INIT_LIST_HEAD(&da->da_node);
624 return da;
625 }
626
nfs4_pnfs_ds_addr_free(struct nfs4_pnfs_ds_addr * da)627 static void nfs4_pnfs_ds_addr_free(struct nfs4_pnfs_ds_addr *da)
628 {
629 kfree(da->da_remotestr);
630 kfree(da->da_netid);
631 kfree(da);
632 }
633
destroy_ds(struct nfs4_pnfs_ds * ds)634 static void destroy_ds(struct nfs4_pnfs_ds *ds)
635 {
636 struct nfs4_pnfs_ds_addr *da;
637
638 dprintk("--> %s\n", __func__);
639 ifdebug(FACILITY)
640 print_ds(ds);
641
642 nfs_put_client(ds->ds_clp);
643
644 while (!list_empty(&ds->ds_addrs)) {
645 da = list_first_entry(&ds->ds_addrs,
646 struct nfs4_pnfs_ds_addr,
647 da_node);
648 list_del_init(&da->da_node);
649 nfs4_pnfs_ds_addr_free(da);
650 }
651
652 kfree(ds->ds_remotestr);
653 kfree(ds);
654 }
655
nfs4_pnfs_ds_put(struct nfs4_pnfs_ds * ds)656 void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
657 {
658 struct nfs_net *nn = net_generic(ds->ds_net, nfs_net_id);
659
660 if (refcount_dec_and_lock(&ds->ds_count, &nn->nfs4_data_server_lock)) {
661 list_del_init(&ds->ds_node);
662 spin_unlock(&nn->nfs4_data_server_lock);
663 destroy_ds(ds);
664 }
665 }
666 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
667
668 /*
669 * Create a string with a human readable address and port to avoid
670 * complicated setup around many dprinks.
671 */
672 static char *
nfs4_pnfs_remotestr(struct list_head * dsaddrs,gfp_t gfp_flags)673 nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
674 {
675 struct nfs4_pnfs_ds_addr *da;
676 char *remotestr;
677 size_t len;
678 char *p;
679
680 len = 3; /* '{', '}' and eol */
681 list_for_each_entry(da, dsaddrs, da_node) {
682 len += strlen(da->da_remotestr) + 1; /* string plus comma */
683 }
684
685 remotestr = kzalloc(len, gfp_flags);
686 if (!remotestr)
687 return NULL;
688
689 p = remotestr;
690 *(p++) = '{';
691 len--;
692 list_for_each_entry(da, dsaddrs, da_node) {
693 size_t ll = strlen(da->da_remotestr);
694
695 if (ll > len)
696 goto out_err;
697
698 memcpy(p, da->da_remotestr, ll);
699 p += ll;
700 len -= ll;
701
702 if (len < 1)
703 goto out_err;
704 (*p++) = ',';
705 len--;
706 }
707 if (len < 2)
708 goto out_err;
709 *(p++) = '}';
710 *p = '\0';
711 return remotestr;
712 out_err:
713 kfree(remotestr);
714 return NULL;
715 }
716
717 /*
718 * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
719 * uncached and return cached struct nfs4_pnfs_ds.
720 */
721 struct nfs4_pnfs_ds *
nfs4_pnfs_ds_add(const struct net * net,struct list_head * dsaddrs,gfp_t gfp_flags)722 nfs4_pnfs_ds_add(const struct net *net, struct list_head *dsaddrs, gfp_t gfp_flags)
723 {
724 struct nfs_net *nn = net_generic(net, nfs_net_id);
725 struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
726 char *remotestr;
727
728 if (list_empty(dsaddrs)) {
729 dprintk("%s: no addresses defined\n", __func__);
730 goto out;
731 }
732
733 ds = kzalloc(sizeof(*ds), gfp_flags);
734 if (!ds)
735 goto out;
736
737 /* this is only used for debugging, so it's ok if its NULL */
738 remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
739
740 spin_lock(&nn->nfs4_data_server_lock);
741 tmp_ds = _data_server_lookup_locked(nn, dsaddrs);
742 if (tmp_ds == NULL) {
743 INIT_LIST_HEAD(&ds->ds_addrs);
744 list_splice_init(dsaddrs, &ds->ds_addrs);
745 ds->ds_remotestr = remotestr;
746 refcount_set(&ds->ds_count, 1);
747 INIT_LIST_HEAD(&ds->ds_node);
748 ds->ds_net = net;
749 ds->ds_clp = NULL;
750 list_add(&ds->ds_node, &nn->nfs4_data_server_cache);
751 dprintk("%s add new data server %s\n", __func__,
752 ds->ds_remotestr);
753 } else {
754 kfree(remotestr);
755 kfree(ds);
756 refcount_inc(&tmp_ds->ds_count);
757 dprintk("%s data server %s found, inc'ed ds_count to %d\n",
758 __func__, tmp_ds->ds_remotestr,
759 refcount_read(&tmp_ds->ds_count));
760 ds = tmp_ds;
761 }
762 spin_unlock(&nn->nfs4_data_server_lock);
763 out:
764 return ds;
765 }
766 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
767
nfs4_wait_ds_connect(struct nfs4_pnfs_ds * ds)768 static int nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
769 {
770 might_sleep();
771 return wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING, TASK_KILLABLE);
772 }
773
nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds * ds)774 static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
775 {
776 smp_mb__before_atomic();
777 clear_and_wake_up_bit(NFS4DS_CONNECTING, &ds->ds_state);
778 }
779
780 static struct nfs_client *(*get_v3_ds_connect)(
781 struct nfs_server *mds_srv,
782 const struct sockaddr_storage *ds_addr,
783 int ds_addrlen,
784 int ds_proto,
785 unsigned int ds_timeo,
786 unsigned int ds_retrans);
787
load_v3_ds_connect(void)788 static bool load_v3_ds_connect(void)
789 {
790 if (!get_v3_ds_connect) {
791 get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
792 WARN_ON_ONCE(!get_v3_ds_connect);
793 }
794
795 return(get_v3_ds_connect != NULL);
796 }
797
nfs4_pnfs_v3_ds_connect_unload(void)798 void nfs4_pnfs_v3_ds_connect_unload(void)
799 {
800 if (get_v3_ds_connect) {
801 symbol_put(nfs3_set_ds_client);
802 get_v3_ds_connect = NULL;
803 }
804 }
805
_nfs4_pnfs_v3_ds_connect(struct nfs_server * mds_srv,struct nfs4_pnfs_ds * ds,unsigned int timeo,unsigned int retrans)806 static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
807 struct nfs4_pnfs_ds *ds,
808 unsigned int timeo,
809 unsigned int retrans)
810 {
811 struct nfs_client *clp = ERR_PTR(-EIO);
812 struct nfs4_pnfs_ds_addr *da;
813 unsigned long connect_timeout = timeo * (retrans + 1) * HZ / 10;
814 int status = 0;
815
816 dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
817
818 if (!load_v3_ds_connect())
819 return -EPROTONOSUPPORT;
820
821 list_for_each_entry(da, &ds->ds_addrs, da_node) {
822 dprintk("%s: DS %s: trying address %s\n",
823 __func__, ds->ds_remotestr, da->da_remotestr);
824
825 if (!IS_ERR(clp)) {
826 struct xprt_create xprt_args = {
827 .ident = da->da_transport,
828 .net = clp->cl_net,
829 .dstaddr = (struct sockaddr *)&da->da_addr,
830 .addrlen = da->da_addrlen,
831 .servername = clp->cl_hostname,
832 .connect_timeout = connect_timeout,
833 .reconnect_timeout = connect_timeout,
834 .xprtsec = clp->cl_xprtsec,
835 };
836
837 if (da->da_transport != clp->cl_proto &&
838 clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
839 continue;
840 if (da->da_transport == XPRT_TRANSPORT_TCP &&
841 mds_srv->nfs_client->cl_proto == XPRT_TRANSPORT_TCP_TLS)
842 xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
843
844 if (da->da_addr.ss_family != clp->cl_addr.ss_family)
845 continue;
846 /* Add this address as an alias */
847 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
848 rpc_clnt_test_and_add_xprt, NULL);
849 continue;
850 }
851 if (da->da_transport == XPRT_TRANSPORT_TCP &&
852 mds_srv->nfs_client->cl_proto == XPRT_TRANSPORT_TCP_TLS)
853 da->da_transport = XPRT_TRANSPORT_TCP_TLS;
854 clp = get_v3_ds_connect(mds_srv,
855 &da->da_addr,
856 da->da_addrlen, da->da_transport,
857 timeo, retrans);
858 if (IS_ERR(clp))
859 continue;
860 clp->cl_rpcclient->cl_softerr = 0;
861 clp->cl_rpcclient->cl_softrtry = 0;
862 }
863
864 if (IS_ERR(clp)) {
865 status = PTR_ERR(clp);
866 goto out;
867 }
868
869 smp_wmb();
870 WRITE_ONCE(ds->ds_clp, clp);
871 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
872 out:
873 return status;
874 }
875
_nfs4_pnfs_v4_ds_connect(struct nfs_server * mds_srv,struct nfs4_pnfs_ds * ds,unsigned int timeo,unsigned int retrans,u32 minor_version)876 static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
877 struct nfs4_pnfs_ds *ds,
878 unsigned int timeo,
879 unsigned int retrans,
880 u32 minor_version)
881 {
882 struct nfs_client *clp = ERR_PTR(-EIO);
883 struct nfs4_pnfs_ds_addr *da;
884 int status = 0;
885
886 dprintk("--> %s DS %s\n", __func__, ds->ds_remotestr);
887
888 list_for_each_entry(da, &ds->ds_addrs, da_node) {
889 char servername[48];
890
891 dprintk("%s: DS %s: trying address %s\n",
892 __func__, ds->ds_remotestr, da->da_remotestr);
893
894 if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
895 struct xprt_create xprt_args = {
896 .ident = da->da_transport,
897 .net = clp->cl_net,
898 .dstaddr = (struct sockaddr *)&da->da_addr,
899 .addrlen = da->da_addrlen,
900 .servername = clp->cl_hostname,
901 .xprtsec = clp->cl_xprtsec,
902 };
903 struct nfs4_add_xprt_data xprtdata = {
904 .clp = clp,
905 };
906 struct rpc_add_xprt_test rpcdata = {
907 .add_xprt_test = clp->cl_mvops->session_trunk,
908 .data = &xprtdata,
909 };
910
911 if (da->da_transport != clp->cl_proto &&
912 clp->cl_proto != XPRT_TRANSPORT_TCP_TLS)
913 continue;
914 if (da->da_transport == XPRT_TRANSPORT_TCP &&
915 mds_srv->nfs_client->cl_proto ==
916 XPRT_TRANSPORT_TCP_TLS) {
917 struct sockaddr *addr =
918 (struct sockaddr *)&da->da_addr;
919 struct sockaddr_in *sin =
920 (struct sockaddr_in *)&da->da_addr;
921 struct sockaddr_in6 *sin6 =
922 (struct sockaddr_in6 *)&da->da_addr;
923
924 /* for NFS with TLS we need to supply a correct
925 * servername of the trunked transport, not the
926 * servername of the main transport stored in
927 * clp->cl_hostname. And set the protocol to
928 * indicate to use TLS
929 */
930 servername[0] = '\0';
931 switch(addr->sa_family) {
932 case AF_INET:
933 snprintf(servername, sizeof(servername),
934 "%pI4", &sin->sin_addr.s_addr);
935 break;
936 case AF_INET6:
937 snprintf(servername, sizeof(servername),
938 "%pI6", &sin6->sin6_addr);
939 break;
940 default:
941 /* do not consider this address */
942 continue;
943 }
944 xprt_args.ident = XPRT_TRANSPORT_TCP_TLS;
945 xprt_args.servername = servername;
946 }
947 if (da->da_addr.ss_family != clp->cl_addr.ss_family)
948 continue;
949
950 /**
951 * Test this address for session trunking and
952 * add as an alias
953 */
954 xprtdata.cred = nfs4_get_clid_cred(clp);
955 rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
956 rpc_clnt_setup_test_and_add_xprt,
957 &rpcdata);
958 if (xprtdata.cred)
959 put_cred(xprtdata.cred);
960 } else {
961 if (da->da_transport == XPRT_TRANSPORT_TCP &&
962 mds_srv->nfs_client->cl_proto ==
963 XPRT_TRANSPORT_TCP_TLS)
964 da->da_transport = XPRT_TRANSPORT_TCP_TLS;
965 clp = nfs4_set_ds_client(mds_srv,
966 &da->da_addr,
967 da->da_addrlen,
968 da->da_transport, timeo,
969 retrans, minor_version);
970 if (IS_ERR(clp))
971 continue;
972
973 status = nfs4_init_ds_session(clp,
974 mds_srv->nfs_client->cl_lease_time);
975 if (status) {
976 nfs_put_client(clp);
977 clp = ERR_PTR(-EIO);
978 continue;
979 }
980
981 }
982 }
983
984 if (IS_ERR(clp)) {
985 status = PTR_ERR(clp);
986 goto out;
987 }
988
989 smp_wmb();
990 WRITE_ONCE(ds->ds_clp, clp);
991 dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
992 out:
993 return status;
994 }
995
996 /*
997 * Create an rpc connection to the nfs4_pnfs_ds data server.
998 * Currently only supports IPv4 and IPv6 addresses.
999 * If connection fails, make devid unavailable and return a -errno.
1000 */
nfs4_pnfs_ds_connect(struct nfs_server * mds_srv,struct nfs4_pnfs_ds * ds,struct nfs4_deviceid_node * devid,unsigned int timeo,unsigned int retrans,u32 version,u32 minor_version)1001 int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
1002 struct nfs4_deviceid_node *devid, unsigned int timeo,
1003 unsigned int retrans, u32 version, u32 minor_version)
1004 {
1005 int err;
1006
1007 do {
1008 err = nfs4_wait_ds_connect(ds);
1009 if (err || ds->ds_clp)
1010 goto out;
1011 if (nfs4_test_deviceid_unavailable(devid)) {
1012 err = -ENODEV;
1013 goto out;
1014 }
1015 } while (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) != 0);
1016
1017 if (ds->ds_clp)
1018 goto connect_done;
1019
1020 switch (version) {
1021 case 3:
1022 err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, retrans);
1023 break;
1024 case 4:
1025 err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo, retrans,
1026 minor_version);
1027 break;
1028 default:
1029 dprintk("%s: unsupported DS version %d\n", __func__, version);
1030 err = -EPROTONOSUPPORT;
1031 }
1032
1033 connect_done:
1034 nfs4_clear_ds_conn_bit(ds);
1035 out:
1036 /*
1037 * At this point the ds->ds_clp should be ready, but it might have
1038 * hit an error.
1039 */
1040 if (!err) {
1041 if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) {
1042 WARN_ON_ONCE(ds->ds_clp ||
1043 !nfs4_test_deviceid_unavailable(devid));
1044 err = -EINVAL;
1045 } else
1046 err = nfs_client_init_status(ds->ds_clp);
1047 }
1048
1049 trace_pnfs_ds_connect(ds->ds_remotestr, err);
1050 return err;
1051 }
1052 EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
1053
1054 /*
1055 * Currently only supports ipv4, ipv6 and one multi-path address.
1056 */
1057 struct nfs4_pnfs_ds_addr *
nfs4_decode_mp_ds_addr(struct net * net,struct xdr_stream * xdr,gfp_t gfp_flags)1058 nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
1059 {
1060 struct nfs4_pnfs_ds_addr *da = NULL;
1061 char *buf, *portstr;
1062 __be16 port;
1063 ssize_t nlen, rlen;
1064 int tmp[2];
1065 char *netid;
1066 size_t len;
1067 char *startsep = "";
1068 char *endsep = "";
1069
1070
1071 /* r_netid */
1072 nlen = xdr_stream_decode_string_dup(xdr, &netid, XDR_MAX_NETOBJ,
1073 gfp_flags);
1074 if (unlikely(nlen < 0))
1075 goto out_err;
1076
1077 /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
1078 /* port is ".ABC.DEF", 8 chars max */
1079 rlen = xdr_stream_decode_string_dup(xdr, &buf, INET6_ADDRSTRLEN +
1080 IPV6_SCOPE_ID_LEN + 8, gfp_flags);
1081 if (unlikely(rlen < 0))
1082 goto out_free_netid;
1083
1084 /* replace port '.' with '-' */
1085 portstr = strrchr(buf, '.');
1086 if (!portstr) {
1087 dprintk("%s: Failed finding expected dot in port\n",
1088 __func__);
1089 goto out_free_buf;
1090 }
1091 *portstr = '-';
1092
1093 /* find '.' between address and port */
1094 portstr = strrchr(buf, '.');
1095 if (!portstr) {
1096 dprintk("%s: Failed finding expected dot between address and "
1097 "port\n", __func__);
1098 goto out_free_buf;
1099 }
1100 *portstr = '\0';
1101
1102 da = nfs4_pnfs_ds_addr_alloc(gfp_flags);
1103 if (unlikely(!da))
1104 goto out_free_buf;
1105
1106 if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
1107 sizeof(da->da_addr))) {
1108 dprintk("%s: error parsing address %s\n", __func__, buf);
1109 goto out_free_da;
1110 }
1111
1112 portstr++;
1113 sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
1114 port = htons((tmp[0] << 8) | (tmp[1]));
1115
1116 switch (da->da_addr.ss_family) {
1117 case AF_INET:
1118 ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
1119 da->da_addrlen = sizeof(struct sockaddr_in);
1120 break;
1121
1122 case AF_INET6:
1123 ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
1124 da->da_addrlen = sizeof(struct sockaddr_in6);
1125 startsep = "[";
1126 endsep = "]";
1127 break;
1128
1129 default:
1130 dprintk("%s: unsupported address family: %u\n",
1131 __func__, da->da_addr.ss_family);
1132 goto out_free_da;
1133 }
1134
1135 da->da_transport = xprt_find_transport_ident(netid);
1136 if (da->da_transport < 0) {
1137 dprintk("%s: ERROR: unknown r_netid \"%s\"\n",
1138 __func__, netid);
1139 goto out_free_da;
1140 }
1141
1142 da->da_netid = netid;
1143
1144 /* save human readable address */
1145 len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
1146 da->da_remotestr = kzalloc(len, gfp_flags);
1147
1148 /* NULL is ok, only used for dprintk */
1149 if (da->da_remotestr)
1150 snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
1151 buf, endsep, ntohs(port));
1152
1153 dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
1154 kfree(buf);
1155 return da;
1156
1157 out_free_da:
1158 kfree(da);
1159 out_free_buf:
1160 dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
1161 kfree(buf);
1162 out_free_netid:
1163 kfree(netid);
1164 out_err:
1165 return NULL;
1166 }
1167 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
1168
1169 void
pnfs_layout_mark_request_commit(struct nfs_page * req,struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo,u32 ds_commit_idx)1170 pnfs_layout_mark_request_commit(struct nfs_page *req,
1171 struct pnfs_layout_segment *lseg,
1172 struct nfs_commit_info *cinfo,
1173 u32 ds_commit_idx)
1174 {
1175 struct list_head *list;
1176 struct pnfs_commit_array *array;
1177 struct pnfs_commit_bucket *bucket;
1178
1179 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1180 array = pnfs_lookup_commit_array(cinfo->ds, lseg);
1181 if (!array || !pnfs_is_valid_lseg(lseg))
1182 goto out_resched;
1183 bucket = &array->buckets[ds_commit_idx];
1184 list = &bucket->written;
1185 /* Non-empty buckets hold a reference on the lseg. That ref
1186 * is normally transferred to the COMMIT call and released
1187 * there. It could also be released if the last req is pulled
1188 * off due to a rewrite, in which case it will be done in
1189 * pnfs_common_clear_request_commit
1190 */
1191 if (!bucket->lseg)
1192 bucket->lseg = pnfs_get_lseg(lseg);
1193 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1194 cinfo->ds->nwritten++;
1195
1196 nfs_request_add_commit_list_locked(req, list, cinfo);
1197 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1198 nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
1199 return;
1200 out_resched:
1201 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1202 cinfo->completion_ops->resched_write(cinfo, req);
1203 }
1204 EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
1205
1206 int
pnfs_nfs_generic_sync(struct inode * inode,bool datasync)1207 pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
1208 {
1209 int ret;
1210
1211 if (!pnfs_layoutcommit_outstanding(inode))
1212 return 0;
1213 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1214 if (ret < 0)
1215 return ret;
1216 if (datasync)
1217 return 0;
1218 return pnfs_layoutcommit_inode(inode, true);
1219 }
1220 EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);
1221
1222