1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2014 Anna Schumaker <Anna.Schumaker@Netapp.com>
4 */
5 #include <linux/fs.h>
6 #include <linux/sunrpc/addr.h>
7 #include <linux/sunrpc/sched.h>
8 #include <linux/nfs.h>
9 #include <linux/nfs3.h>
10 #include <linux/nfs4.h>
11 #include <linux/nfs_xdr.h>
12 #include <linux/nfs_fs.h>
13 #include "nfs4_fs.h"
14 #include "nfs42.h"
15 #include "iostat.h"
16 #include "pnfs.h"
17 #include "nfs4session.h"
18 #include "internal.h"
19 #include "delegation.h"
20 #include "nfs4trace.h"
21
22 #define NFSDBG_FACILITY NFSDBG_PROC
23 static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
24 static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid,
25 u64 *copied);
26
nfs42_set_netaddr(struct file * filep,struct nfs42_netaddr * naddr)27 static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
28 {
29 struct nfs_client *clp = (NFS_SERVER(file_inode(filep)))->nfs_client;
30 unsigned short port = 2049;
31
32 rcu_read_lock();
33 naddr->netid_len = scnprintf(naddr->netid,
34 sizeof(naddr->netid), "%s",
35 rpc_peeraddr2str(clp->cl_rpcclient,
36 RPC_DISPLAY_NETID));
37 naddr->addr_len = scnprintf(naddr->addr,
38 sizeof(naddr->addr),
39 "%s.%u.%u",
40 rpc_peeraddr2str(clp->cl_rpcclient,
41 RPC_DISPLAY_ADDR),
42 port >> 8, port & 255);
43 rcu_read_unlock();
44 }
45
_nfs42_proc_fallocate(struct rpc_message * msg,struct file * filep,struct nfs_lock_context * lock,loff_t offset,loff_t len)46 static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
47 struct nfs_lock_context *lock, loff_t offset, loff_t len)
48 {
49 struct inode *inode = file_inode(filep);
50 struct nfs_server *server = NFS_SERVER(inode);
51 u32 bitmask[NFS_BITMASK_SZ];
52 struct nfs42_falloc_args args = {
53 .falloc_fh = NFS_FH(inode),
54 .falloc_offset = offset,
55 .falloc_length = len,
56 .falloc_bitmask = bitmask,
57 };
58 struct nfs42_falloc_res res = {
59 .falloc_server = server,
60 };
61 int status;
62
63 msg->rpc_argp = &args;
64 msg->rpc_resp = &res;
65
66 status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
67 lock, FMODE_WRITE);
68 if (status) {
69 if (status == -EAGAIN)
70 status = -NFS4ERR_BAD_STATEID;
71 return status;
72 }
73
74 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode,
75 NFS_INO_INVALID_BLOCKS);
76
77 res.falloc_fattr = nfs_alloc_fattr();
78 if (!res.falloc_fattr)
79 return -ENOMEM;
80
81 status = nfs4_call_sync(server->client, server, msg,
82 &args.seq_args, &res.seq_res, 0);
83 if (status == 0) {
84 if (nfs_should_remove_suid(inode)) {
85 spin_lock(&inode->i_lock);
86 nfs_set_cache_invalid(inode,
87 NFS_INO_REVAL_FORCED | NFS_INO_INVALID_MODE);
88 spin_unlock(&inode->i_lock);
89 }
90 status = nfs_post_op_update_inode_force_wcc(inode,
91 res.falloc_fattr);
92 }
93 if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
94 trace_nfs4_fallocate(inode, &args, status);
95 else
96 trace_nfs4_deallocate(inode, &args, status);
97 kfree(res.falloc_fattr);
98 return status;
99 }
100
nfs42_proc_fallocate(struct rpc_message * msg,struct file * filep,loff_t offset,loff_t len)101 static int nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
102 loff_t offset, loff_t len)
103 {
104 struct inode *inode = file_inode(filep);
105 struct nfs_server *server = NFS_SERVER(inode);
106 struct nfs4_exception exception = { };
107 struct nfs_lock_context *lock;
108 int err;
109
110 lock = nfs_get_lock_context(nfs_file_open_context(filep));
111 if (IS_ERR(lock))
112 return PTR_ERR(lock);
113
114 exception.inode = inode;
115 exception.state = lock->open_context->state;
116
117 err = nfs_sync_inode(inode);
118 if (err)
119 goto out;
120
121 do {
122 err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
123 if (err == -ENOTSUPP) {
124 err = -EOPNOTSUPP;
125 break;
126 }
127 err = nfs4_handle_exception(server, err, &exception);
128 } while (exception.retry);
129 out:
130 nfs_put_lock_context(lock);
131 return err;
132 }
133
nfs42_proc_allocate(struct file * filep,loff_t offset,loff_t len)134 int nfs42_proc_allocate(struct file *filep, loff_t offset, loff_t len)
135 {
136 struct rpc_message msg = {
137 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE],
138 };
139 struct inode *inode = file_inode(filep);
140 int err;
141
142 if (!nfs_server_capable(inode, NFS_CAP_ALLOCATE))
143 return -EOPNOTSUPP;
144
145 inode_lock(inode);
146
147 err = nfs42_proc_fallocate(&msg, filep, offset, len);
148 if (err == -EOPNOTSUPP)
149 NFS_SERVER(inode)->caps &= ~NFS_CAP_ALLOCATE;
150
151 inode_unlock(inode);
152 return err;
153 }
154
nfs42_proc_deallocate(struct file * filep,loff_t offset,loff_t len)155 int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
156 {
157 struct rpc_message msg = {
158 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DEALLOCATE],
159 };
160 struct inode *inode = file_inode(filep);
161 int err;
162
163 if (!nfs_server_capable(inode, NFS_CAP_DEALLOCATE))
164 return -EOPNOTSUPP;
165
166 inode_lock(inode);
167
168 err = nfs42_proc_fallocate(&msg, filep, offset, len);
169 if (err == 0)
170 truncate_pagecache_range(inode, offset, (offset + len) -1);
171 if (err == -EOPNOTSUPP)
172 NFS_SERVER(inode)->caps &= ~NFS_CAP_DEALLOCATE;
173
174 inode_unlock(inode);
175 return err;
176 }
177
nfs4_copy_dequeue_callback(struct nfs_server * dst_server,struct nfs_server * src_server,struct nfs4_copy_state * copy)178 static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server,
179 struct nfs_server *src_server,
180 struct nfs4_copy_state *copy)
181 {
182 spin_lock(&dst_server->nfs_client->cl_lock);
183 list_del_init(©->copies);
184 spin_unlock(&dst_server->nfs_client->cl_lock);
185 if (dst_server != src_server) {
186 spin_lock(&src_server->nfs_client->cl_lock);
187 list_del_init(©->src_copies);
188 spin_unlock(&src_server->nfs_client->cl_lock);
189 }
190 }
191
handle_async_copy(struct nfs42_copy_res * res,struct nfs_server * dst_server,struct nfs_server * src_server,struct file * src,struct file * dst,nfs4_stateid * src_stateid,bool * restart)192 static int handle_async_copy(struct nfs42_copy_res *res,
193 struct nfs_server *dst_server,
194 struct nfs_server *src_server,
195 struct file *src,
196 struct file *dst,
197 nfs4_stateid *src_stateid,
198 bool *restart)
199 {
200 struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
201 struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
202 struct nfs_open_context *src_ctx = nfs_file_open_context(src);
203 struct nfs_client *clp = dst_server->nfs_client;
204 unsigned long timeout = 3 * HZ;
205 int status = NFS4_OK;
206 u64 copied;
207
208 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
209 if (!copy)
210 return -ENOMEM;
211
212 spin_lock(&dst_server->nfs_client->cl_lock);
213 list_for_each_entry(iter,
214 &dst_server->nfs_client->pending_cb_stateids,
215 copies) {
216 if (memcmp(&res->write_res.stateid, &iter->stateid,
217 NFS4_STATEID_SIZE))
218 continue;
219 tmp_copy = iter;
220 list_del(&iter->copies);
221 break;
222 }
223 if (tmp_copy) {
224 spin_unlock(&dst_server->nfs_client->cl_lock);
225 kfree(copy);
226 copy = tmp_copy;
227 goto out;
228 }
229
230 memcpy(©->stateid, &res->write_res.stateid, NFS4_STATEID_SIZE);
231 init_completion(©->completion);
232 copy->parent_dst_state = dst_ctx->state;
233 copy->parent_src_state = src_ctx->state;
234
235 list_add_tail(©->copies, &dst_server->ss_copies);
236 spin_unlock(&dst_server->nfs_client->cl_lock);
237
238 if (dst_server != src_server) {
239 spin_lock(&src_server->nfs_client->cl_lock);
240 list_add_tail(©->src_copies, &src_server->ss_src_copies);
241 spin_unlock(&src_server->nfs_client->cl_lock);
242 }
243
244 wait:
245 status = wait_for_completion_interruptible_timeout(©->completion,
246 timeout);
247 if (!status)
248 goto timeout;
249 nfs4_copy_dequeue_callback(dst_server, src_server, copy);
250 if (status == -ERESTARTSYS) {
251 goto out_cancel;
252 } else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
253 status = -EAGAIN;
254 *restart = true;
255 goto out_cancel;
256 }
257 out:
258 res->write_res.count = copy->count;
259 /* Copy out the updated write verifier provided by CB_OFFLOAD. */
260 memcpy(&res->write_res.verifier, ©->verf, sizeof(copy->verf));
261 status = -copy->error;
262
263 out_free:
264 kfree(copy);
265 return status;
266 out_cancel:
267 nfs42_do_offload_cancel_async(dst, ©->stateid);
268 if (!nfs42_files_from_same_server(src, dst))
269 nfs42_do_offload_cancel_async(src, src_stateid);
270 goto out_free;
271 timeout:
272 timeout <<= 1;
273 if (timeout > (clp->cl_lease_time >> 1))
274 timeout = clp->cl_lease_time >> 1;
275 status = nfs42_proc_offload_status(dst, ©->stateid, &copied);
276 if (status == -EINPROGRESS)
277 goto wait;
278 nfs4_copy_dequeue_callback(dst_server, src_server, copy);
279 switch (status) {
280 case 0:
281 /* The server recognized the copy stateid, so it hasn't
282 * rebooted. Don't overwrite the verifier returned in the
283 * COPY result. */
284 res->write_res.count = copied;
285 goto out_free;
286 case -EREMOTEIO:
287 /* COPY operation failed on the server. */
288 status = -EOPNOTSUPP;
289 res->write_res.count = copied;
290 goto out_free;
291 case -EBADF:
292 /* Server did not recognize the copy stateid. It has
293 * probably restarted and lost the plot. */
294 res->write_res.count = 0;
295 status = -EOPNOTSUPP;
296 break;
297 case -EOPNOTSUPP:
298 /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when
299 * it has signed up for an async COPY, so server is not
300 * spec-compliant. */
301 res->write_res.count = 0;
302 }
303 goto out_free;
304 }
305
process_copy_commit(struct file * dst,loff_t pos_dst,struct nfs42_copy_res * res)306 static int process_copy_commit(struct file *dst, loff_t pos_dst,
307 struct nfs42_copy_res *res)
308 {
309 struct nfs_commitres cres;
310 int status = -ENOMEM;
311
312 cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL);
313 if (!cres.verf)
314 goto out;
315
316 status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres);
317 if (status)
318 goto out_free;
319 if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
320 &cres.verf->verifier)) {
321 dprintk("commit verf differs from copy verf\n");
322 status = -EAGAIN;
323 }
324 out_free:
325 kfree(cres.verf);
326 out:
327 return status;
328 }
329
330 /**
331 * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload
332 * @inode: pointer to destination inode
333 * @pos: destination offset
334 * @len: copy length
335 *
336 * Punch a hole in the inode page cache, so that the NFS client will
337 * know to retrieve new data.
338 * Update the file size if necessary, and then mark the inode as having
339 * invalid cached values for change attribute, ctime, mtime and space used.
340 */
nfs42_copy_dest_done(struct inode * inode,loff_t pos,loff_t len)341 static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
342 {
343 loff_t newsize = pos + len;
344 loff_t end = newsize - 1;
345
346 WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
347 pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
348
349 spin_lock(&inode->i_lock);
350 if (newsize > i_size_read(inode))
351 i_size_write(inode, newsize);
352 nfs_set_cache_invalid(inode, NFS_INO_INVALID_CHANGE |
353 NFS_INO_INVALID_CTIME |
354 NFS_INO_INVALID_MTIME |
355 NFS_INO_INVALID_BLOCKS);
356 spin_unlock(&inode->i_lock);
357 }
358
_nfs42_proc_copy(struct file * src,struct nfs_lock_context * src_lock,struct file * dst,struct nfs_lock_context * dst_lock,struct nfs42_copy_args * args,struct nfs42_copy_res * res,struct nl4_server * nss,nfs4_stateid * cnr_stateid,bool * restart)359 static ssize_t _nfs42_proc_copy(struct file *src,
360 struct nfs_lock_context *src_lock,
361 struct file *dst,
362 struct nfs_lock_context *dst_lock,
363 struct nfs42_copy_args *args,
364 struct nfs42_copy_res *res,
365 struct nl4_server *nss,
366 nfs4_stateid *cnr_stateid,
367 bool *restart)
368 {
369 struct rpc_message msg = {
370 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY],
371 .rpc_argp = args,
372 .rpc_resp = res,
373 };
374 struct inode *dst_inode = file_inode(dst);
375 struct inode *src_inode = file_inode(src);
376 struct nfs_server *dst_server = NFS_SERVER(dst_inode);
377 struct nfs_server *src_server = NFS_SERVER(src_inode);
378 loff_t pos_src = args->src_pos;
379 loff_t pos_dst = args->dst_pos;
380 size_t count = args->count;
381 ssize_t status;
382
383 if (nss) {
384 args->cp_src = nss;
385 nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
386 } else {
387 status = nfs4_set_rw_stateid(&args->src_stateid,
388 src_lock->open_context, src_lock, FMODE_READ);
389 if (status) {
390 if (status == -EAGAIN)
391 status = -NFS4ERR_BAD_STATEID;
392 return status;
393 }
394 }
395 status = nfs_filemap_write_and_wait_range(src->f_mapping,
396 pos_src, pos_src + (loff_t)count - 1);
397 if (status)
398 return status;
399
400 status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
401 dst_lock, FMODE_WRITE);
402 if (status) {
403 if (status == -EAGAIN)
404 status = -NFS4ERR_BAD_STATEID;
405 return status;
406 }
407
408 status = nfs_sync_inode(dst_inode);
409 if (status)
410 return status;
411
412 res->commit_res.verf = NULL;
413 if (args->sync) {
414 res->commit_res.verf =
415 kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL);
416 if (!res->commit_res.verf)
417 return -ENOMEM;
418 }
419 set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
420 &src_lock->open_context->state->flags);
421 set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
422 &dst_lock->open_context->state->flags);
423
424 status = nfs4_call_sync(dst_server->client, dst_server, &msg,
425 &args->seq_args, &res->seq_res, 0);
426 trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status);
427 if (status == -ENOTSUPP)
428 dst_server->caps &= ~NFS_CAP_COPY;
429 if (status)
430 goto out;
431
432 if (args->sync &&
433 nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
434 &res->commit_res.verf->verifier)) {
435 status = -EAGAIN;
436 goto out;
437 }
438
439 if (!res->synchronous) {
440 status = handle_async_copy(res, dst_server, src_server, src,
441 dst, &args->src_stateid, restart);
442 if (status)
443 goto out;
444 }
445
446 if ((!res->synchronous || !args->sync) &&
447 res->write_res.verifier.committed != NFS_FILE_SYNC) {
448 status = process_copy_commit(dst, pos_dst, res);
449 if (status)
450 goto out;
451 }
452
453 nfs42_copy_dest_done(dst_inode, pos_dst, res->write_res.count);
454 nfs_invalidate_atime(src_inode);
455 status = res->write_res.count;
456 out:
457 if (args->sync)
458 kfree(res->commit_res.verf);
459 return status;
460 }
461
nfs42_proc_copy(struct file * src,loff_t pos_src,struct file * dst,loff_t pos_dst,size_t count,struct nl4_server * nss,nfs4_stateid * cnr_stateid,bool sync)462 ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
463 struct file *dst, loff_t pos_dst, size_t count,
464 struct nl4_server *nss,
465 nfs4_stateid *cnr_stateid, bool sync)
466 {
467 struct nfs_server *server = NFS_SERVER(file_inode(dst));
468 struct nfs_lock_context *src_lock;
469 struct nfs_lock_context *dst_lock;
470 struct nfs42_copy_args args = {
471 .src_fh = NFS_FH(file_inode(src)),
472 .src_pos = pos_src,
473 .dst_fh = NFS_FH(file_inode(dst)),
474 .dst_pos = pos_dst,
475 .count = count,
476 .sync = sync,
477 };
478 struct nfs42_copy_res res;
479 struct nfs4_exception src_exception = {
480 .inode = file_inode(src),
481 .stateid = &args.src_stateid,
482 };
483 struct nfs4_exception dst_exception = {
484 .inode = file_inode(dst),
485 .stateid = &args.dst_stateid,
486 };
487 ssize_t err, err2;
488 bool restart = false;
489
490 src_lock = nfs_get_lock_context(nfs_file_open_context(src));
491 if (IS_ERR(src_lock))
492 return PTR_ERR(src_lock);
493
494 src_exception.state = src_lock->open_context->state;
495
496 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst));
497 if (IS_ERR(dst_lock)) {
498 err = PTR_ERR(dst_lock);
499 goto out_put_src_lock;
500 }
501
502 dst_exception.state = dst_lock->open_context->state;
503
504 do {
505 inode_lock(file_inode(dst));
506 err = _nfs42_proc_copy(src, src_lock,
507 dst, dst_lock,
508 &args, &res,
509 nss, cnr_stateid, &restart);
510 inode_unlock(file_inode(dst));
511
512 if (err >= 0)
513 break;
514 if ((err == -ENOTSUPP ||
515 err == -NFS4ERR_OFFLOAD_DENIED) &&
516 nfs42_files_from_same_server(src, dst)) {
517 err = -EOPNOTSUPP;
518 break;
519 } else if (err == -EAGAIN) {
520 if (!restart) {
521 dst_exception.retry = 1;
522 continue;
523 }
524 break;
525 } else if (err == -NFS4ERR_OFFLOAD_NO_REQS &&
526 args.sync != res.synchronous) {
527 args.sync = res.synchronous;
528 dst_exception.retry = 1;
529 continue;
530 } else if ((err == -ESTALE ||
531 err == -NFS4ERR_OFFLOAD_DENIED ||
532 err == -ENOTSUPP) &&
533 !nfs42_files_from_same_server(src, dst)) {
534 nfs42_do_offload_cancel_async(src, &args.src_stateid);
535 err = -EOPNOTSUPP;
536 break;
537 }
538
539 err2 = nfs4_handle_exception(server, err, &src_exception);
540 err = nfs4_handle_exception(server, err, &dst_exception);
541 if (!err)
542 err = err2;
543 } while (src_exception.retry || dst_exception.retry);
544
545 nfs_put_lock_context(dst_lock);
546 out_put_src_lock:
547 nfs_put_lock_context(src_lock);
548 return err;
549 }
550
551 struct nfs42_offload_data {
552 struct nfs_server *seq_server;
553 struct nfs42_offload_status_args args;
554 struct nfs42_offload_status_res res;
555 };
556
nfs42_offload_prepare(struct rpc_task * task,void * calldata)557 static void nfs42_offload_prepare(struct rpc_task *task, void *calldata)
558 {
559 struct nfs42_offload_data *data = calldata;
560
561 nfs4_setup_sequence(data->seq_server->nfs_client,
562 &data->args.osa_seq_args,
563 &data->res.osr_seq_res, task);
564 }
565
nfs42_offload_cancel_done(struct rpc_task * task,void * calldata)566 static void nfs42_offload_cancel_done(struct rpc_task *task, void *calldata)
567 {
568 struct nfs42_offload_data *data = calldata;
569
570 trace_nfs4_offload_cancel(&data->args, task->tk_status);
571 nfs41_sequence_done(task, &data->res.osr_seq_res);
572 if (task->tk_status &&
573 nfs4_async_handle_error(task, data->seq_server, NULL,
574 NULL) == -EAGAIN)
575 rpc_restart_call_prepare(task);
576 }
577
nfs42_offload_release(void * data)578 static void nfs42_offload_release(void *data)
579 {
580 kfree(data);
581 }
582
583 static const struct rpc_call_ops nfs42_offload_cancel_ops = {
584 .rpc_call_prepare = nfs42_offload_prepare,
585 .rpc_call_done = nfs42_offload_cancel_done,
586 .rpc_release = nfs42_offload_release,
587 };
588
nfs42_do_offload_cancel_async(struct file * dst,nfs4_stateid * stateid)589 static int nfs42_do_offload_cancel_async(struct file *dst,
590 nfs4_stateid *stateid)
591 {
592 struct nfs_server *dst_server = NFS_SERVER(file_inode(dst));
593 struct nfs42_offload_data *data = NULL;
594 struct nfs_open_context *ctx = nfs_file_open_context(dst);
595 struct rpc_task *task;
596 struct rpc_message msg = {
597 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_CANCEL],
598 .rpc_cred = ctx->cred,
599 };
600 struct rpc_task_setup task_setup_data = {
601 .rpc_client = dst_server->client,
602 .rpc_message = &msg,
603 .callback_ops = &nfs42_offload_cancel_ops,
604 .workqueue = nfsiod_workqueue,
605 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
606 };
607 int status;
608
609 if (!(dst_server->caps & NFS_CAP_OFFLOAD_CANCEL))
610 return -EOPNOTSUPP;
611
612 data = kzalloc(sizeof(struct nfs42_offload_data), GFP_KERNEL);
613 if (data == NULL)
614 return -ENOMEM;
615
616 data->seq_server = dst_server;
617 data->args.osa_src_fh = NFS_FH(file_inode(dst));
618 memcpy(&data->args.osa_stateid, stateid,
619 sizeof(data->args.osa_stateid));
620 msg.rpc_argp = &data->args;
621 msg.rpc_resp = &data->res;
622 task_setup_data.callback_data = data;
623 nfs4_init_sequence(&data->args.osa_seq_args, &data->res.osr_seq_res,
624 1, 0);
625 task = rpc_run_task(&task_setup_data);
626 if (IS_ERR(task))
627 return PTR_ERR(task);
628 status = rpc_wait_for_completion_task(task);
629 if (status == -ENOTSUPP)
630 dst_server->caps &= ~NFS_CAP_OFFLOAD_CANCEL;
631 rpc_put_task(task);
632 return status;
633 }
634
635 static int
_nfs42_proc_offload_status(struct nfs_server * server,struct file * file,struct nfs42_offload_data * data)636 _nfs42_proc_offload_status(struct nfs_server *server, struct file *file,
637 struct nfs42_offload_data *data)
638 {
639 struct nfs_open_context *ctx = nfs_file_open_context(file);
640 struct rpc_message msg = {
641 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS],
642 .rpc_argp = &data->args,
643 .rpc_resp = &data->res,
644 .rpc_cred = ctx->cred,
645 };
646 int status;
647
648 status = nfs4_call_sync(server->client, server, &msg,
649 &data->args.osa_seq_args,
650 &data->res.osr_seq_res, 1);
651 trace_nfs4_offload_status(&data->args, status);
652 switch (status) {
653 case 0:
654 break;
655
656 case -NFS4ERR_ADMIN_REVOKED:
657 case -NFS4ERR_BAD_STATEID:
658 case -NFS4ERR_OLD_STATEID:
659 /*
660 * Server does not recognize the COPY stateid. CB_OFFLOAD
661 * could have purged it, or server might have rebooted.
662 * Since COPY stateids don't have an associated inode,
663 * avoid triggering state recovery.
664 */
665 status = -EBADF;
666 break;
667 case -NFS4ERR_NOTSUPP:
668 case -ENOTSUPP:
669 case -EOPNOTSUPP:
670 server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
671 status = -EOPNOTSUPP;
672 break;
673 }
674
675 return status;
676 }
677
678 /**
679 * nfs42_proc_offload_status - Poll completion status of an async copy operation
680 * @dst: handle of file being copied into
681 * @stateid: copy stateid (from async COPY result)
682 * @copied: OUT: number of bytes copied so far
683 *
684 * Return values:
685 * %0: Server returned an NFS4_OK completion status
686 * %-EINPROGRESS: Server returned no completion status
687 * %-EREMOTEIO: Server returned an error completion status
688 * %-EBADF: Server did not recognize the copy stateid
689 * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS
690 * %-ERESTARTSYS: Wait interrupted by signal
691 *
692 * Other negative errnos indicate the client could not complete the
693 * request.
694 */
695 static int
nfs42_proc_offload_status(struct file * dst,nfs4_stateid * stateid,u64 * copied)696 nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
697 {
698 struct inode *inode = file_inode(dst);
699 struct nfs_server *server = NFS_SERVER(inode);
700 struct nfs4_exception exception = {
701 .inode = inode,
702 };
703 struct nfs42_offload_data *data;
704 int status;
705
706 if (!(server->caps & NFS_CAP_OFFLOAD_STATUS))
707 return -EOPNOTSUPP;
708
709 data = kzalloc(sizeof(*data), GFP_KERNEL);
710 if (!data)
711 return -ENOMEM;
712 data->seq_server = server;
713 data->args.osa_src_fh = NFS_FH(inode);
714 memcpy(&data->args.osa_stateid, stateid,
715 sizeof(data->args.osa_stateid));
716 exception.stateid = &data->args.osa_stateid;
717 do {
718 status = _nfs42_proc_offload_status(server, dst, data);
719 if (status == -EOPNOTSUPP)
720 goto out;
721 status = nfs4_handle_exception(server, status, &exception);
722 } while (exception.retry);
723 if (status)
724 goto out;
725
726 *copied = data->res.osr_count;
727 if (!data->res.complete_count)
728 status = -EINPROGRESS;
729 else if (data->res.osr_complete != NFS_OK)
730 status = -EREMOTEIO;
731
732 out:
733 kfree(data);
734 return status;
735 }
736
_nfs42_proc_copy_notify(struct file * src,struct file * dst,struct nfs42_copy_notify_args * args,struct nfs42_copy_notify_res * res)737 static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
738 struct nfs42_copy_notify_args *args,
739 struct nfs42_copy_notify_res *res)
740 {
741 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
742 struct rpc_message msg = {
743 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COPY_NOTIFY],
744 .rpc_argp = args,
745 .rpc_resp = res,
746 };
747 int status;
748 struct nfs_open_context *ctx;
749 struct nfs_lock_context *l_ctx;
750
751 ctx = get_nfs_open_context(nfs_file_open_context(src));
752 l_ctx = nfs_get_lock_context(ctx);
753 if (IS_ERR(l_ctx)) {
754 status = PTR_ERR(l_ctx);
755 goto out;
756 }
757
758 status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
759 FMODE_READ);
760 nfs_put_lock_context(l_ctx);
761 if (status) {
762 if (status == -EAGAIN)
763 status = -NFS4ERR_BAD_STATEID;
764 goto out;
765 }
766
767 status = nfs4_call_sync(src_server->client, src_server, &msg,
768 &args->cna_seq_args, &res->cnr_seq_res, 0);
769 trace_nfs4_copy_notify(file_inode(src), args, res, status);
770 if (status == -ENOTSUPP)
771 src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
772
773 out:
774 put_nfs_open_context(nfs_file_open_context(src));
775 return status;
776 }
777
nfs42_proc_copy_notify(struct file * src,struct file * dst,struct nfs42_copy_notify_res * res)778 int nfs42_proc_copy_notify(struct file *src, struct file *dst,
779 struct nfs42_copy_notify_res *res)
780 {
781 struct nfs_server *src_server = NFS_SERVER(file_inode(src));
782 struct nfs42_copy_notify_args *args;
783 struct nfs4_exception exception = {
784 .inode = file_inode(src),
785 };
786 int status;
787
788 if (!(src_server->caps & NFS_CAP_COPY_NOTIFY))
789 return -EOPNOTSUPP;
790
791 args = kzalloc(sizeof(struct nfs42_copy_notify_args), GFP_KERNEL);
792 if (args == NULL)
793 return -ENOMEM;
794
795 args->cna_src_fh = NFS_FH(file_inode(src)),
796 args->cna_dst.nl4_type = NL4_NETADDR;
797 nfs42_set_netaddr(dst, &args->cna_dst.u.nl4_addr);
798 exception.stateid = &args->cna_src_stateid;
799
800 do {
801 status = _nfs42_proc_copy_notify(src, dst, args, res);
802 if (status == -ENOTSUPP) {
803 status = -EOPNOTSUPP;
804 goto out;
805 }
806 status = nfs4_handle_exception(src_server, status, &exception);
807 } while (exception.retry);
808
809 out:
810 kfree(args);
811 return status;
812 }
813
_nfs42_proc_llseek(struct file * filep,struct nfs_lock_context * lock,loff_t offset,int whence)814 static loff_t _nfs42_proc_llseek(struct file *filep,
815 struct nfs_lock_context *lock, loff_t offset, int whence)
816 {
817 struct inode *inode = file_inode(filep);
818 struct nfs42_seek_args args = {
819 .sa_fh = NFS_FH(inode),
820 .sa_offset = offset,
821 .sa_what = (whence == SEEK_HOLE) ?
822 NFS4_CONTENT_HOLE : NFS4_CONTENT_DATA,
823 };
824 struct nfs42_seek_res res;
825 struct rpc_message msg = {
826 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEEK],
827 .rpc_argp = &args,
828 .rpc_resp = &res,
829 };
830 struct nfs_server *server = NFS_SERVER(inode);
831 int status;
832
833 if (!nfs_server_capable(inode, NFS_CAP_SEEK))
834 return -ENOTSUPP;
835
836 status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
837 lock, FMODE_READ);
838 if (status) {
839 if (status == -EAGAIN)
840 status = -NFS4ERR_BAD_STATEID;
841 return status;
842 }
843
844 status = nfs_filemap_write_and_wait_range(inode->i_mapping,
845 offset, LLONG_MAX);
846 if (status)
847 return status;
848
849 status = nfs4_call_sync(server->client, server, &msg,
850 &args.seq_args, &res.seq_res, 0);
851 trace_nfs4_llseek(inode, &args, &res, status);
852 if (status == -ENOTSUPP)
853 server->caps &= ~NFS_CAP_SEEK;
854 if (status)
855 return status;
856
857 if (whence == SEEK_DATA && res.sr_eof)
858 return -NFS4ERR_NXIO;
859 else
860 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
861 }
862
nfs42_proc_llseek(struct file * filep,loff_t offset,int whence)863 loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
864 {
865 struct nfs_server *server = NFS_SERVER(file_inode(filep));
866 struct nfs4_exception exception = { };
867 struct nfs_lock_context *lock;
868 loff_t err;
869
870 lock = nfs_get_lock_context(nfs_file_open_context(filep));
871 if (IS_ERR(lock))
872 return PTR_ERR(lock);
873
874 exception.inode = file_inode(filep);
875 exception.state = lock->open_context->state;
876
877 do {
878 err = _nfs42_proc_llseek(filep, lock, offset, whence);
879 if (err >= 0)
880 break;
881 if (err == -ENOTSUPP) {
882 err = -EOPNOTSUPP;
883 break;
884 }
885 err = nfs4_handle_exception(server, err, &exception);
886 } while (exception.retry);
887
888 nfs_put_lock_context(lock);
889 return err;
890 }
891
892
893 static void
nfs42_layoutstat_prepare(struct rpc_task * task,void * calldata)894 nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
895 {
896 struct nfs42_layoutstat_data *data = calldata;
897 struct inode *inode = data->inode;
898 struct nfs_server *server = NFS_SERVER(inode);
899 struct pnfs_layout_hdr *lo;
900
901 spin_lock(&inode->i_lock);
902 lo = NFS_I(inode)->layout;
903 if (!pnfs_layout_is_valid(lo)) {
904 spin_unlock(&inode->i_lock);
905 rpc_exit(task, 0);
906 return;
907 }
908 nfs4_stateid_copy(&data->args.stateid, &lo->plh_stateid);
909 spin_unlock(&inode->i_lock);
910 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
911 &data->res.seq_res, task);
912 }
913
914 static void
nfs42_layoutstat_done(struct rpc_task * task,void * calldata)915 nfs42_layoutstat_done(struct rpc_task *task, void *calldata)
916 {
917 struct nfs42_layoutstat_data *data = calldata;
918 struct inode *inode = data->inode;
919 struct pnfs_layout_hdr *lo;
920
921 if (!nfs4_sequence_done(task, &data->res.seq_res))
922 return;
923
924 switch (task->tk_status) {
925 case 0:
926 return;
927 case -NFS4ERR_BADHANDLE:
928 case -ESTALE:
929 pnfs_destroy_layout(NFS_I(inode));
930 break;
931 case -NFS4ERR_EXPIRED:
932 case -NFS4ERR_ADMIN_REVOKED:
933 case -NFS4ERR_DELEG_REVOKED:
934 case -NFS4ERR_STALE_STATEID:
935 case -NFS4ERR_BAD_STATEID:
936 spin_lock(&inode->i_lock);
937 lo = NFS_I(inode)->layout;
938 if (pnfs_layout_is_valid(lo) &&
939 nfs4_stateid_match(&data->args.stateid,
940 &lo->plh_stateid)) {
941 LIST_HEAD(head);
942
943 /*
944 * Mark the bad layout state as invalid, then retry
945 * with the current stateid.
946 */
947 pnfs_mark_layout_stateid_invalid(lo, &head);
948 spin_unlock(&inode->i_lock);
949 pnfs_free_lseg_list(&head);
950 nfs_commit_inode(inode, 0);
951 } else
952 spin_unlock(&inode->i_lock);
953 break;
954 case -NFS4ERR_OLD_STATEID:
955 spin_lock(&inode->i_lock);
956 lo = NFS_I(inode)->layout;
957 if (pnfs_layout_is_valid(lo) &&
958 nfs4_stateid_match_other(&data->args.stateid,
959 &lo->plh_stateid)) {
960 /* Do we need to delay before resending? */
961 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
962 &data->args.stateid))
963 rpc_delay(task, HZ);
964 rpc_restart_call_prepare(task);
965 }
966 spin_unlock(&inode->i_lock);
967 break;
968 case -ENOTSUPP:
969 case -EOPNOTSUPP:
970 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
971 }
972
973 trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status);
974 }
975
976 static void
nfs42_layoutstat_release(void * calldata)977 nfs42_layoutstat_release(void *calldata)
978 {
979 struct nfs42_layoutstat_data *data = calldata;
980 struct nfs42_layoutstat_devinfo *devinfo = data->args.devinfo;
981 int i;
982
983 for (i = 0; i < data->args.num_dev; i++) {
984 if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
985 devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
986 }
987
988 pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout);
989 smp_mb__before_atomic();
990 clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags);
991 smp_mb__after_atomic();
992 nfs_iput_and_deactive(data->inode);
993 kfree(data->args.devinfo);
994 kfree(data);
995 }
996
997 static const struct rpc_call_ops nfs42_layoutstat_ops = {
998 .rpc_call_prepare = nfs42_layoutstat_prepare,
999 .rpc_call_done = nfs42_layoutstat_done,
1000 .rpc_release = nfs42_layoutstat_release,
1001 };
1002
nfs42_proc_layoutstats_generic(struct nfs_server * server,struct nfs42_layoutstat_data * data)1003 int nfs42_proc_layoutstats_generic(struct nfs_server *server,
1004 struct nfs42_layoutstat_data *data)
1005 {
1006 struct rpc_message msg = {
1007 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS],
1008 .rpc_argp = &data->args,
1009 .rpc_resp = &data->res,
1010 };
1011 struct rpc_task_setup task_setup = {
1012 .rpc_client = server->client,
1013 .rpc_message = &msg,
1014 .callback_ops = &nfs42_layoutstat_ops,
1015 .callback_data = data,
1016 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
1017 };
1018 struct rpc_task *task;
1019
1020 data->inode = nfs_igrab_and_active(data->args.inode);
1021 if (!data->inode) {
1022 nfs42_layoutstat_release(data);
1023 return -EAGAIN;
1024 }
1025 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
1026 task = rpc_run_task(&task_setup);
1027 if (IS_ERR(task))
1028 return PTR_ERR(task);
1029 rpc_put_task(task);
1030 return 0;
1031 }
1032
1033 static struct nfs42_layouterror_data *
nfs42_alloc_layouterror_data(struct pnfs_layout_segment * lseg,gfp_t gfp_flags)1034 nfs42_alloc_layouterror_data(struct pnfs_layout_segment *lseg, gfp_t gfp_flags)
1035 {
1036 struct nfs42_layouterror_data *data;
1037 struct inode *inode = lseg->pls_layout->plh_inode;
1038
1039 data = kzalloc(sizeof(*data), gfp_flags);
1040 if (data) {
1041 data->args.inode = data->inode = nfs_igrab_and_active(inode);
1042 if (data->inode) {
1043 data->lseg = pnfs_get_lseg(lseg);
1044 if (data->lseg)
1045 return data;
1046 nfs_iput_and_deactive(data->inode);
1047 }
1048 kfree(data);
1049 }
1050 return NULL;
1051 }
1052
1053 static void
nfs42_free_layouterror_data(struct nfs42_layouterror_data * data)1054 nfs42_free_layouterror_data(struct nfs42_layouterror_data *data)
1055 {
1056 pnfs_put_lseg(data->lseg);
1057 nfs_iput_and_deactive(data->inode);
1058 kfree(data);
1059 }
1060
1061 static void
nfs42_layouterror_prepare(struct rpc_task * task,void * calldata)1062 nfs42_layouterror_prepare(struct rpc_task *task, void *calldata)
1063 {
1064 struct nfs42_layouterror_data *data = calldata;
1065 struct inode *inode = data->inode;
1066 struct nfs_server *server = NFS_SERVER(inode);
1067 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
1068 unsigned i;
1069
1070 spin_lock(&inode->i_lock);
1071 if (!pnfs_layout_is_valid(lo)) {
1072 spin_unlock(&inode->i_lock);
1073 rpc_exit(task, 0);
1074 return;
1075 }
1076 for (i = 0; i < data->args.num_errors; i++)
1077 nfs4_stateid_copy(&data->args.errors[i].stateid,
1078 &lo->plh_stateid);
1079 spin_unlock(&inode->i_lock);
1080 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
1081 &data->res.seq_res, task);
1082 }
1083
1084 static void
nfs42_layouterror_done(struct rpc_task * task,void * calldata)1085 nfs42_layouterror_done(struct rpc_task *task, void *calldata)
1086 {
1087 struct nfs42_layouterror_data *data = calldata;
1088 struct inode *inode = data->inode;
1089 struct pnfs_layout_hdr *lo = data->lseg->pls_layout;
1090
1091 if (!nfs4_sequence_done(task, &data->res.seq_res))
1092 return;
1093
1094 switch (task->tk_status) {
1095 case 0:
1096 return;
1097 case -NFS4ERR_BADHANDLE:
1098 case -ESTALE:
1099 pnfs_destroy_layout(NFS_I(inode));
1100 break;
1101 case -NFS4ERR_EXPIRED:
1102 case -NFS4ERR_ADMIN_REVOKED:
1103 case -NFS4ERR_DELEG_REVOKED:
1104 case -NFS4ERR_STALE_STATEID:
1105 case -NFS4ERR_BAD_STATEID:
1106 spin_lock(&inode->i_lock);
1107 if (pnfs_layout_is_valid(lo) &&
1108 nfs4_stateid_match(&data->args.errors[0].stateid,
1109 &lo->plh_stateid)) {
1110 LIST_HEAD(head);
1111
1112 /*
1113 * Mark the bad layout state as invalid, then retry
1114 * with the current stateid.
1115 */
1116 pnfs_mark_layout_stateid_invalid(lo, &head);
1117 spin_unlock(&inode->i_lock);
1118 pnfs_free_lseg_list(&head);
1119 nfs_commit_inode(inode, 0);
1120 } else
1121 spin_unlock(&inode->i_lock);
1122 break;
1123 case -NFS4ERR_OLD_STATEID:
1124 spin_lock(&inode->i_lock);
1125 if (pnfs_layout_is_valid(lo) &&
1126 nfs4_stateid_match_other(&data->args.errors[0].stateid,
1127 &lo->plh_stateid)) {
1128 /* Do we need to delay before resending? */
1129 if (!nfs4_stateid_is_newer(&lo->plh_stateid,
1130 &data->args.errors[0].stateid))
1131 rpc_delay(task, HZ);
1132 rpc_restart_call_prepare(task);
1133 }
1134 spin_unlock(&inode->i_lock);
1135 break;
1136 case -ENOTSUPP:
1137 case -EOPNOTSUPP:
1138 NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
1139 }
1140
1141 trace_nfs4_layouterror(inode, &data->args.errors[0].stateid,
1142 task->tk_status);
1143 }
1144
1145 static void
nfs42_layouterror_release(void * calldata)1146 nfs42_layouterror_release(void *calldata)
1147 {
1148 struct nfs42_layouterror_data *data = calldata;
1149
1150 nfs42_free_layouterror_data(data);
1151 }
1152
1153 static const struct rpc_call_ops nfs42_layouterror_ops = {
1154 .rpc_call_prepare = nfs42_layouterror_prepare,
1155 .rpc_call_done = nfs42_layouterror_done,
1156 .rpc_release = nfs42_layouterror_release,
1157 };
1158
nfs42_proc_layouterror(struct pnfs_layout_segment * lseg,const struct nfs42_layout_error * errors,size_t n)1159 int nfs42_proc_layouterror(struct pnfs_layout_segment *lseg,
1160 const struct nfs42_layout_error *errors, size_t n)
1161 {
1162 struct inode *inode = lseg->pls_layout->plh_inode;
1163 struct nfs42_layouterror_data *data;
1164 struct rpc_task *task;
1165 struct rpc_message msg = {
1166 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTERROR],
1167 };
1168 struct rpc_task_setup task_setup = {
1169 .rpc_message = &msg,
1170 .callback_ops = &nfs42_layouterror_ops,
1171 .flags = RPC_TASK_ASYNC | RPC_TASK_MOVEABLE,
1172 };
1173 unsigned int i;
1174
1175 if (!nfs_server_capable(inode, NFS_CAP_LAYOUTERROR))
1176 return -EOPNOTSUPP;
1177 if (n > NFS42_LAYOUTERROR_MAX)
1178 return -EINVAL;
1179 data = nfs42_alloc_layouterror_data(lseg, nfs_io_gfp_mask());
1180 if (!data)
1181 return -ENOMEM;
1182 for (i = 0; i < n; i++) {
1183 data->args.errors[i] = errors[i];
1184 data->args.num_errors++;
1185 data->res.num_errors++;
1186 }
1187 msg.rpc_argp = &data->args;
1188 msg.rpc_resp = &data->res;
1189 task_setup.callback_data = data;
1190 task_setup.rpc_client = NFS_SERVER(inode)->client;
1191 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0, 0);
1192 task = rpc_run_task(&task_setup);
1193 if (IS_ERR(task))
1194 return PTR_ERR(task);
1195 rpc_put_task(task);
1196 return 0;
1197 }
1198 EXPORT_SYMBOL_GPL(nfs42_proc_layouterror);
1199
_nfs42_proc_clone(struct rpc_message * msg,struct file * src_f,struct file * dst_f,struct nfs_lock_context * src_lock,struct nfs_lock_context * dst_lock,loff_t src_offset,loff_t dst_offset,loff_t count)1200 static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
1201 struct file *dst_f, struct nfs_lock_context *src_lock,
1202 struct nfs_lock_context *dst_lock, loff_t src_offset,
1203 loff_t dst_offset, loff_t count)
1204 {
1205 struct inode *src_inode = file_inode(src_f);
1206 struct inode *dst_inode = file_inode(dst_f);
1207 struct nfs_server *server = NFS_SERVER(dst_inode);
1208 __u32 dst_bitmask[NFS_BITMASK_SZ];
1209 struct nfs42_clone_args args = {
1210 .src_fh = NFS_FH(src_inode),
1211 .dst_fh = NFS_FH(dst_inode),
1212 .src_offset = src_offset,
1213 .dst_offset = dst_offset,
1214 .count = count,
1215 .dst_bitmask = dst_bitmask,
1216 };
1217 struct nfs42_clone_res res = {
1218 .server = server,
1219 };
1220 int status;
1221
1222 msg->rpc_argp = &args;
1223 msg->rpc_resp = &res;
1224
1225 status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
1226 src_lock, FMODE_READ);
1227 if (status) {
1228 if (status == -EAGAIN)
1229 status = -NFS4ERR_BAD_STATEID;
1230 return status;
1231 }
1232 status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
1233 dst_lock, FMODE_WRITE);
1234 if (status) {
1235 if (status == -EAGAIN)
1236 status = -NFS4ERR_BAD_STATEID;
1237 return status;
1238 }
1239
1240 res.dst_fattr = nfs_alloc_fattr();
1241 if (!res.dst_fattr)
1242 return -ENOMEM;
1243
1244 nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask,
1245 dst_inode, NFS_INO_INVALID_BLOCKS);
1246
1247 status = nfs4_call_sync(server->client, server, msg,
1248 &args.seq_args, &res.seq_res, 0);
1249 trace_nfs4_clone(src_inode, dst_inode, &args, status);
1250 if (status == 0) {
1251 /* a zero-length count means clone to EOF in src */
1252 if (count == 0 && res.dst_fattr->valid & NFS_ATTR_FATTR_SIZE)
1253 count = nfs_size_to_loff_t(res.dst_fattr->size) - dst_offset;
1254 nfs42_copy_dest_done(dst_inode, dst_offset, count);
1255 status = nfs_post_op_update_inode(dst_inode, res.dst_fattr);
1256 }
1257
1258 kfree(res.dst_fattr);
1259 return status;
1260 }
1261
nfs42_proc_clone(struct file * src_f,struct file * dst_f,loff_t src_offset,loff_t dst_offset,loff_t count)1262 int nfs42_proc_clone(struct file *src_f, struct file *dst_f,
1263 loff_t src_offset, loff_t dst_offset, loff_t count)
1264 {
1265 struct rpc_message msg = {
1266 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLONE],
1267 };
1268 struct inode *inode = file_inode(src_f);
1269 struct nfs_server *server = NFS_SERVER(file_inode(src_f));
1270 struct nfs_lock_context *src_lock;
1271 struct nfs_lock_context *dst_lock;
1272 struct nfs4_exception src_exception = { };
1273 struct nfs4_exception dst_exception = { };
1274 int err, err2;
1275
1276 if (!nfs_server_capable(inode, NFS_CAP_CLONE))
1277 return -EOPNOTSUPP;
1278
1279 src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
1280 if (IS_ERR(src_lock))
1281 return PTR_ERR(src_lock);
1282
1283 src_exception.inode = file_inode(src_f);
1284 src_exception.state = src_lock->open_context->state;
1285
1286 dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
1287 if (IS_ERR(dst_lock)) {
1288 err = PTR_ERR(dst_lock);
1289 goto out_put_src_lock;
1290 }
1291
1292 dst_exception.inode = file_inode(dst_f);
1293 dst_exception.state = dst_lock->open_context->state;
1294
1295 do {
1296 err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
1297 src_offset, dst_offset, count);
1298 if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
1299 NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
1300 err = -EOPNOTSUPP;
1301 break;
1302 }
1303
1304 err2 = nfs4_handle_exception(server, err, &src_exception);
1305 err = nfs4_handle_exception(server, err, &dst_exception);
1306 if (!err)
1307 err = err2;
1308 } while (src_exception.retry || dst_exception.retry);
1309
1310 nfs_put_lock_context(dst_lock);
1311 out_put_src_lock:
1312 nfs_put_lock_context(src_lock);
1313 return err;
1314 }
1315
1316 #define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
1317
_nfs42_proc_removexattr(struct inode * inode,const char * name)1318 static int _nfs42_proc_removexattr(struct inode *inode, const char *name)
1319 {
1320 struct nfs_server *server = NFS_SERVER(inode);
1321 struct nfs42_removexattrargs args = {
1322 .fh = NFS_FH(inode),
1323 .xattr_name = name,
1324 };
1325 struct nfs42_removexattrres res;
1326 struct rpc_message msg = {
1327 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR],
1328 .rpc_argp = &args,
1329 .rpc_resp = &res,
1330 };
1331 int ret;
1332 unsigned long timestamp = jiffies;
1333
1334 ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
1335 &res.seq_res, 1);
1336 trace_nfs4_removexattr(inode, name, ret);
1337 if (!ret)
1338 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1339
1340 return ret;
1341 }
1342
_nfs42_proc_setxattr(struct inode * inode,const char * name,const void * buf,size_t buflen,int flags)1343 static int _nfs42_proc_setxattr(struct inode *inode, const char *name,
1344 const void *buf, size_t buflen, int flags)
1345 {
1346 struct nfs_server *server = NFS_SERVER(inode);
1347 __u32 bitmask[NFS_BITMASK_SZ];
1348 struct page *pages[NFS4XATTR_MAXPAGES];
1349 struct nfs42_setxattrargs arg = {
1350 .fh = NFS_FH(inode),
1351 .bitmask = bitmask,
1352 .xattr_pages = pages,
1353 .xattr_len = buflen,
1354 .xattr_name = name,
1355 .xattr_flags = flags,
1356 };
1357 struct nfs42_setxattrres res = {
1358 .server = server,
1359 };
1360 struct rpc_message msg = {
1361 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR],
1362 .rpc_argp = &arg,
1363 .rpc_resp = &res,
1364 };
1365 int ret, np;
1366 unsigned long timestamp = jiffies;
1367
1368 if (buflen > server->sxasize)
1369 return -ERANGE;
1370
1371 res.fattr = nfs_alloc_fattr();
1372 if (!res.fattr)
1373 return -ENOMEM;
1374
1375 if (buflen > 0) {
1376 np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages);
1377 if (np < 0) {
1378 ret = np;
1379 goto out;
1380 }
1381 } else
1382 np = 0;
1383
1384 nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask,
1385 inode, NFS_INO_INVALID_CHANGE);
1386
1387 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1388 &res.seq_res, 1);
1389 trace_nfs4_setxattr(inode, name, ret);
1390
1391 for (; np > 0; np--)
1392 put_page(pages[np - 1]);
1393
1394 if (!ret) {
1395 nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0);
1396 ret = nfs_post_op_update_inode(inode, res.fattr);
1397 }
1398
1399 out:
1400 kfree(res.fattr);
1401 return ret;
1402 }
1403
_nfs42_proc_getxattr(struct inode * inode,const char * name,void * buf,size_t buflen,struct page ** pages,size_t plen)1404 static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name,
1405 void *buf, size_t buflen, struct page **pages,
1406 size_t plen)
1407 {
1408 struct nfs_server *server = NFS_SERVER(inode);
1409 struct nfs42_getxattrargs arg = {
1410 .fh = NFS_FH(inode),
1411 .xattr_name = name,
1412 };
1413 struct nfs42_getxattrres res;
1414 struct rpc_message msg = {
1415 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR],
1416 .rpc_argp = &arg,
1417 .rpc_resp = &res,
1418 };
1419 ssize_t ret;
1420
1421 arg.xattr_len = plen;
1422 arg.xattr_pages = pages;
1423
1424 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1425 &res.seq_res, 0);
1426 trace_nfs4_getxattr(inode, name, ret);
1427 if (ret < 0)
1428 return ret;
1429
1430 /*
1431 * Normally, the caching is done one layer up, but for successful
1432 * RPCS, always cache the result here, even if the caller was
1433 * just querying the length, or if the reply was too big for
1434 * the caller. This avoids a second RPC in the case of the
1435 * common query-alloc-retrieve cycle for xattrs.
1436 *
1437 * Note that xattr_len is always capped to XATTR_SIZE_MAX.
1438 */
1439
1440 nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len);
1441
1442 if (buflen) {
1443 if (res.xattr_len > buflen)
1444 return -ERANGE;
1445 _copy_from_pages(buf, pages, 0, res.xattr_len);
1446 }
1447
1448 return res.xattr_len;
1449 }
1450
_nfs42_proc_listxattrs(struct inode * inode,void * buf,size_t buflen,u64 * cookiep,bool * eofp)1451 static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf,
1452 size_t buflen, u64 *cookiep, bool *eofp)
1453 {
1454 struct nfs_server *server = NFS_SERVER(inode);
1455 struct page **pages;
1456 struct nfs42_listxattrsargs arg = {
1457 .fh = NFS_FH(inode),
1458 .cookie = *cookiep,
1459 };
1460 struct nfs42_listxattrsres res = {
1461 .eof = false,
1462 .xattr_buf = buf,
1463 .xattr_len = buflen,
1464 };
1465 struct rpc_message msg = {
1466 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS],
1467 .rpc_argp = &arg,
1468 .rpc_resp = &res,
1469 };
1470 u32 xdrlen;
1471 int ret, np, i;
1472
1473
1474 ret = -ENOMEM;
1475 res.scratch = alloc_page(GFP_KERNEL);
1476 if (!res.scratch)
1477 goto out;
1478
1479 xdrlen = nfs42_listxattr_xdrsize(buflen);
1480 if (xdrlen > server->lxasize)
1481 xdrlen = server->lxasize;
1482 np = xdrlen / PAGE_SIZE + 1;
1483
1484 pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL);
1485 if (!pages)
1486 goto out_free_scratch;
1487 for (i = 0; i < np; i++) {
1488 pages[i] = alloc_page(GFP_KERNEL);
1489 if (!pages[i])
1490 goto out_free_pages;
1491 }
1492
1493 arg.xattr_pages = pages;
1494 arg.count = xdrlen;
1495
1496 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
1497 &res.seq_res, 0);
1498 trace_nfs4_listxattr(inode, ret);
1499
1500 if (ret >= 0) {
1501 ret = res.copied;
1502 *cookiep = res.cookie;
1503 *eofp = res.eof;
1504 }
1505
1506 out_free_pages:
1507 while (--np >= 0) {
1508 if (pages[np])
1509 __free_page(pages[np]);
1510 }
1511 kfree(pages);
1512 out_free_scratch:
1513 __free_page(res.scratch);
1514 out:
1515 return ret;
1516
1517 }
1518
nfs42_proc_getxattr(struct inode * inode,const char * name,void * buf,size_t buflen)1519 ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name,
1520 void *buf, size_t buflen)
1521 {
1522 struct nfs4_exception exception = { };
1523 ssize_t err, np, i;
1524 struct page **pages;
1525
1526 np = nfs_page_array_len(0, buflen ?: XATTR_SIZE_MAX);
1527 pages = kmalloc_array(np, sizeof(*pages), GFP_KERNEL);
1528 if (!pages)
1529 return -ENOMEM;
1530
1531 for (i = 0; i < np; i++) {
1532 pages[i] = alloc_page(GFP_KERNEL);
1533 if (!pages[i]) {
1534 err = -ENOMEM;
1535 goto out;
1536 }
1537 }
1538
1539 /*
1540 * The GETXATTR op has no length field in the call, and the
1541 * xattr data is at the end of the reply.
1542 *
1543 * There is no downside in using the page-aligned length. It will
1544 * allow receiving and caching xattrs that are too large for the
1545 * caller but still fit in the page-rounded value.
1546 */
1547 do {
1548 err = _nfs42_proc_getxattr(inode, name, buf, buflen,
1549 pages, np * PAGE_SIZE);
1550 if (err >= 0)
1551 break;
1552 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1553 &exception);
1554 } while (exception.retry);
1555
1556 out:
1557 while (--i >= 0)
1558 __free_page(pages[i]);
1559 kfree(pages);
1560
1561 return err;
1562 }
1563
nfs42_proc_setxattr(struct inode * inode,const char * name,const void * buf,size_t buflen,int flags)1564 int nfs42_proc_setxattr(struct inode *inode, const char *name,
1565 const void *buf, size_t buflen, int flags)
1566 {
1567 struct nfs4_exception exception = { };
1568 int err;
1569
1570 do {
1571 err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags);
1572 if (!err)
1573 break;
1574 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1575 &exception);
1576 } while (exception.retry);
1577
1578 return err;
1579 }
1580
nfs42_proc_listxattrs(struct inode * inode,void * buf,size_t buflen,u64 * cookiep,bool * eofp)1581 ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf,
1582 size_t buflen, u64 *cookiep, bool *eofp)
1583 {
1584 struct nfs4_exception exception = { };
1585 ssize_t err;
1586
1587 do {
1588 err = _nfs42_proc_listxattrs(inode, buf, buflen,
1589 cookiep, eofp);
1590 if (err >= 0)
1591 break;
1592 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1593 &exception);
1594 } while (exception.retry);
1595
1596 return err;
1597 }
1598
nfs42_proc_removexattr(struct inode * inode,const char * name)1599 int nfs42_proc_removexattr(struct inode *inode, const char *name)
1600 {
1601 struct nfs4_exception exception = { };
1602 int err;
1603
1604 do {
1605 err = _nfs42_proc_removexattr(inode, name);
1606 if (!err)
1607 break;
1608 err = nfs4_handle_exception(NFS_SERVER(inode), err,
1609 &exception);
1610 } while (exception.retry);
1611
1612 return err;
1613 }
1614