1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/jhash.h>
45 #include <linux/string_helpers.h>
46 #include <linux/fsnotify.h>
47 #include <linux/rhashtable.h>
48 #include <linux/nfs_ssc.h>
49
50 #include "xdr4.h"
51 #include "xdr4cb.h"
52 #include "vfs.h"
53 #include "current_stateid.h"
54
55 #include "netns.h"
56 #include "pnfs.h"
57 #include "filecache.h"
58 #include "trace.h"
59
60 #define NFSDDBG_FACILITY NFSDDBG_PROC
61
62 #define all_ones {{ ~0, ~0}, ~0}
63 static const stateid_t one_stateid = {
64 .si_generation = ~0,
65 .si_opaque = all_ones,
66 };
67 static const stateid_t zero_stateid = {
68 /* all fields zero */
69 };
70 static const stateid_t currentstateid = {
71 .si_generation = 1,
72 };
73 static const stateid_t close_stateid = {
74 .si_generation = 0xffffffffU,
75 };
76
77 static u64 current_sessionid = 1;
78
79 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
80 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
81 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
82 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
83
84 /* forward declarations */
85 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
86 static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
87 void nfsd4_end_grace(struct nfsd_net *nn);
88 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
89 static void nfsd4_file_hash_remove(struct nfs4_file *fi);
90
91 /* Locking: */
92
93 /*
94 * Currently used for the del_recall_lru and file hash table. In an
95 * effort to decrease the scope of the client_mutex, this spinlock may
96 * eventually cover more:
97 */
98 static DEFINE_SPINLOCK(state_lock);
99
100 enum nfsd4_st_mutex_lock_subclass {
101 OPEN_STATEID_MUTEX = 0,
102 LOCK_STATEID_MUTEX = 1,
103 };
104
105 /*
106 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
107 * the refcount on the open stateid to drop.
108 */
109 static DECLARE_WAIT_QUEUE_HEAD(close_wq);
110
111 /*
112 * A waitqueue where a writer to clients/#/ctl destroying a client can
113 * wait for cl_rpc_users to drop to 0 and then for the client to be
114 * unhashed.
115 */
116 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
117
118 static struct kmem_cache *client_slab;
119 static struct kmem_cache *openowner_slab;
120 static struct kmem_cache *lockowner_slab;
121 static struct kmem_cache *file_slab;
122 static struct kmem_cache *stateid_slab;
123 static struct kmem_cache *deleg_slab;
124 static struct kmem_cache *odstate_slab;
125
126 static void free_session(struct nfsd4_session *);
127
128 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
129 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
130
131 static struct workqueue_struct *laundry_wq;
132
nfsd4_create_laundry_wq(void)133 int nfsd4_create_laundry_wq(void)
134 {
135 int rc = 0;
136
137 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
138 if (laundry_wq == NULL)
139 rc = -ENOMEM;
140 return rc;
141 }
142
nfsd4_destroy_laundry_wq(void)143 void nfsd4_destroy_laundry_wq(void)
144 {
145 destroy_workqueue(laundry_wq);
146 }
147
is_session_dead(struct nfsd4_session * ses)148 static bool is_session_dead(struct nfsd4_session *ses)
149 {
150 return ses->se_flags & NFS4_SESSION_DEAD;
151 }
152
mark_session_dead_locked(struct nfsd4_session * ses,int ref_held_by_me)153 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
154 {
155 if (atomic_read(&ses->se_ref) > ref_held_by_me)
156 return nfserr_jukebox;
157 ses->se_flags |= NFS4_SESSION_DEAD;
158 return nfs_ok;
159 }
160
is_client_expired(struct nfs4_client * clp)161 static bool is_client_expired(struct nfs4_client *clp)
162 {
163 return clp->cl_time == 0;
164 }
165
nfsd4_dec_courtesy_client_count(struct nfsd_net * nn,struct nfs4_client * clp)166 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
167 struct nfs4_client *clp)
168 {
169 if (clp->cl_state != NFSD4_ACTIVE)
170 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
171 }
172
get_client_locked(struct nfs4_client * clp)173 static __be32 get_client_locked(struct nfs4_client *clp)
174 {
175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
176
177 lockdep_assert_held(&nn->client_lock);
178
179 if (is_client_expired(clp))
180 return nfserr_expired;
181 atomic_inc(&clp->cl_rpc_users);
182 nfsd4_dec_courtesy_client_count(nn, clp);
183 clp->cl_state = NFSD4_ACTIVE;
184 return nfs_ok;
185 }
186
187 /* must be called under the client_lock */
188 static inline void
renew_client_locked(struct nfs4_client * clp)189 renew_client_locked(struct nfs4_client *clp)
190 {
191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
192
193 if (is_client_expired(clp)) {
194 WARN_ON(1);
195 printk("%s: client (clientid %08x/%08x) already expired\n",
196 __func__,
197 clp->cl_clientid.cl_boot,
198 clp->cl_clientid.cl_id);
199 return;
200 }
201
202 list_move_tail(&clp->cl_lru, &nn->client_lru);
203 clp->cl_time = ktime_get_boottime_seconds();
204 nfsd4_dec_courtesy_client_count(nn, clp);
205 clp->cl_state = NFSD4_ACTIVE;
206 }
207
put_client_renew_locked(struct nfs4_client * clp)208 static void put_client_renew_locked(struct nfs4_client *clp)
209 {
210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
211
212 lockdep_assert_held(&nn->client_lock);
213
214 if (!atomic_dec_and_test(&clp->cl_rpc_users))
215 return;
216 if (!is_client_expired(clp))
217 renew_client_locked(clp);
218 else
219 wake_up_all(&expiry_wq);
220 }
221
put_client_renew(struct nfs4_client * clp)222 static void put_client_renew(struct nfs4_client *clp)
223 {
224 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
225
226 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
227 return;
228 if (!is_client_expired(clp))
229 renew_client_locked(clp);
230 else
231 wake_up_all(&expiry_wq);
232 spin_unlock(&nn->client_lock);
233 }
234
nfsd4_get_session_locked(struct nfsd4_session * ses)235 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
236 {
237 __be32 status;
238
239 if (is_session_dead(ses))
240 return nfserr_badsession;
241 status = get_client_locked(ses->se_client);
242 if (status)
243 return status;
244 atomic_inc(&ses->se_ref);
245 return nfs_ok;
246 }
247
nfsd4_put_session_locked(struct nfsd4_session * ses)248 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
249 {
250 struct nfs4_client *clp = ses->se_client;
251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
252
253 lockdep_assert_held(&nn->client_lock);
254
255 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
256 free_session(ses);
257 put_client_renew_locked(clp);
258 }
259
nfsd4_put_session(struct nfsd4_session * ses)260 static void nfsd4_put_session(struct nfsd4_session *ses)
261 {
262 struct nfs4_client *clp = ses->se_client;
263 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
264
265 spin_lock(&nn->client_lock);
266 nfsd4_put_session_locked(ses);
267 spin_unlock(&nn->client_lock);
268 }
269
270 static struct nfsd4_blocked_lock *
find_blocked_lock(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)271 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
272 struct nfsd_net *nn)
273 {
274 struct nfsd4_blocked_lock *cur, *found = NULL;
275
276 spin_lock(&nn->blocked_locks_lock);
277 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
278 if (fh_match(fh, &cur->nbl_fh)) {
279 list_del_init(&cur->nbl_list);
280 WARN_ON(list_empty(&cur->nbl_lru));
281 list_del_init(&cur->nbl_lru);
282 found = cur;
283 break;
284 }
285 }
286 spin_unlock(&nn->blocked_locks_lock);
287 if (found)
288 locks_delete_block(&found->nbl_lock);
289 return found;
290 }
291
292 static struct nfsd4_blocked_lock *
find_or_allocate_block(struct nfs4_lockowner * lo,struct knfsd_fh * fh,struct nfsd_net * nn)293 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
294 struct nfsd_net *nn)
295 {
296 struct nfsd4_blocked_lock *nbl;
297
298 nbl = find_blocked_lock(lo, fh, nn);
299 if (!nbl) {
300 nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
301 if (nbl) {
302 INIT_LIST_HEAD(&nbl->nbl_list);
303 INIT_LIST_HEAD(&nbl->nbl_lru);
304 fh_copy_shallow(&nbl->nbl_fh, fh);
305 locks_init_lock(&nbl->nbl_lock);
306 kref_init(&nbl->nbl_kref);
307 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
308 &nfsd4_cb_notify_lock_ops,
309 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
310 }
311 }
312 return nbl;
313 }
314
315 static void
free_nbl(struct kref * kref)316 free_nbl(struct kref *kref)
317 {
318 struct nfsd4_blocked_lock *nbl;
319
320 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
321 kfree(nbl);
322 }
323
324 static void
free_blocked_lock(struct nfsd4_blocked_lock * nbl)325 free_blocked_lock(struct nfsd4_blocked_lock *nbl)
326 {
327 locks_delete_block(&nbl->nbl_lock);
328 locks_release_private(&nbl->nbl_lock);
329 kref_put(&nbl->nbl_kref, free_nbl);
330 }
331
332 static void
remove_blocked_locks(struct nfs4_lockowner * lo)333 remove_blocked_locks(struct nfs4_lockowner *lo)
334 {
335 struct nfs4_client *clp = lo->lo_owner.so_client;
336 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
337 struct nfsd4_blocked_lock *nbl;
338 LIST_HEAD(reaplist);
339
340 /* Dequeue all blocked locks */
341 spin_lock(&nn->blocked_locks_lock);
342 while (!list_empty(&lo->lo_blocked)) {
343 nbl = list_first_entry(&lo->lo_blocked,
344 struct nfsd4_blocked_lock,
345 nbl_list);
346 list_del_init(&nbl->nbl_list);
347 WARN_ON(list_empty(&nbl->nbl_lru));
348 list_move(&nbl->nbl_lru, &reaplist);
349 }
350 spin_unlock(&nn->blocked_locks_lock);
351
352 /* Now free them */
353 while (!list_empty(&reaplist)) {
354 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
355 nbl_lru);
356 list_del_init(&nbl->nbl_lru);
357 free_blocked_lock(nbl);
358 }
359 }
360
361 static void
nfsd4_cb_notify_lock_prepare(struct nfsd4_callback * cb)362 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
363 {
364 struct nfsd4_blocked_lock *nbl = container_of(cb,
365 struct nfsd4_blocked_lock, nbl_cb);
366 locks_delete_block(&nbl->nbl_lock);
367 }
368
369 static int
nfsd4_cb_notify_lock_done(struct nfsd4_callback * cb,struct rpc_task * task)370 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
371 {
372 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
373
374 /*
375 * Since this is just an optimization, we don't try very hard if it
376 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
377 * just quit trying on anything else.
378 */
379 switch (task->tk_status) {
380 case -NFS4ERR_DELAY:
381 rpc_delay(task, 1 * HZ);
382 return 0;
383 default:
384 return 1;
385 }
386 }
387
388 static void
nfsd4_cb_notify_lock_release(struct nfsd4_callback * cb)389 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
390 {
391 struct nfsd4_blocked_lock *nbl = container_of(cb,
392 struct nfsd4_blocked_lock, nbl_cb);
393
394 free_blocked_lock(nbl);
395 }
396
397 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
398 .prepare = nfsd4_cb_notify_lock_prepare,
399 .done = nfsd4_cb_notify_lock_done,
400 .release = nfsd4_cb_notify_lock_release,
401 };
402
403 /*
404 * We store the NONE, READ, WRITE, and BOTH bits separately in the
405 * st_{access,deny}_bmap field of the stateid, in order to track not
406 * only what share bits are currently in force, but also what
407 * combinations of share bits previous opens have used. This allows us
408 * to enforce the recommendation in
409 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
410 * the server return an error if the client attempt to downgrade to a
411 * combination of share bits not explicable by closing some of its
412 * previous opens.
413 *
414 * This enforcement is arguably incomplete, since we don't keep
415 * track of access/deny bit combinations; so, e.g., we allow:
416 *
417 * OPEN allow read, deny write
418 * OPEN allow both, deny none
419 * DOWNGRADE allow read, deny none
420 *
421 * which we should reject.
422 *
423 * But you could also argue that our current code is already overkill,
424 * since it only exists to return NFS4ERR_INVAL on incorrect client
425 * behavior.
426 */
427 static unsigned int
bmap_to_share_mode(unsigned long bmap)428 bmap_to_share_mode(unsigned long bmap)
429 {
430 int i;
431 unsigned int access = 0;
432
433 for (i = 1; i < 4; i++) {
434 if (test_bit(i, &bmap))
435 access |= i;
436 }
437 return access;
438 }
439
440 /* set share access for a given stateid */
441 static inline void
set_access(u32 access,struct nfs4_ol_stateid * stp)442 set_access(u32 access, struct nfs4_ol_stateid *stp)
443 {
444 unsigned char mask = 1 << access;
445
446 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
447 stp->st_access_bmap |= mask;
448 }
449
450 /* clear share access for a given stateid */
451 static inline void
clear_access(u32 access,struct nfs4_ol_stateid * stp)452 clear_access(u32 access, struct nfs4_ol_stateid *stp)
453 {
454 unsigned char mask = 1 << access;
455
456 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
457 stp->st_access_bmap &= ~mask;
458 }
459
460 /* test whether a given stateid has access */
461 static inline bool
test_access(u32 access,struct nfs4_ol_stateid * stp)462 test_access(u32 access, struct nfs4_ol_stateid *stp)
463 {
464 unsigned char mask = 1 << access;
465
466 return (bool)(stp->st_access_bmap & mask);
467 }
468
469 /* set share deny for a given stateid */
470 static inline void
set_deny(u32 deny,struct nfs4_ol_stateid * stp)471 set_deny(u32 deny, struct nfs4_ol_stateid *stp)
472 {
473 unsigned char mask = 1 << deny;
474
475 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
476 stp->st_deny_bmap |= mask;
477 }
478
479 /* clear share deny for a given stateid */
480 static inline void
clear_deny(u32 deny,struct nfs4_ol_stateid * stp)481 clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
482 {
483 unsigned char mask = 1 << deny;
484
485 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
486 stp->st_deny_bmap &= ~mask;
487 }
488
489 /* test whether a given stateid is denying specific access */
490 static inline bool
test_deny(u32 deny,struct nfs4_ol_stateid * stp)491 test_deny(u32 deny, struct nfs4_ol_stateid *stp)
492 {
493 unsigned char mask = 1 << deny;
494
495 return (bool)(stp->st_deny_bmap & mask);
496 }
497
nfs4_access_to_omode(u32 access)498 static int nfs4_access_to_omode(u32 access)
499 {
500 switch (access & NFS4_SHARE_ACCESS_BOTH) {
501 case NFS4_SHARE_ACCESS_READ:
502 return O_RDONLY;
503 case NFS4_SHARE_ACCESS_WRITE:
504 return O_WRONLY;
505 case NFS4_SHARE_ACCESS_BOTH:
506 return O_RDWR;
507 }
508 WARN_ON_ONCE(1);
509 return O_RDONLY;
510 }
511
512 static inline int
access_permit_read(struct nfs4_ol_stateid * stp)513 access_permit_read(struct nfs4_ol_stateid *stp)
514 {
515 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
516 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
517 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
518 }
519
520 static inline int
access_permit_write(struct nfs4_ol_stateid * stp)521 access_permit_write(struct nfs4_ol_stateid *stp)
522 {
523 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
524 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
525 }
526
527 static inline struct nfs4_stateowner *
nfs4_get_stateowner(struct nfs4_stateowner * sop)528 nfs4_get_stateowner(struct nfs4_stateowner *sop)
529 {
530 atomic_inc(&sop->so_count);
531 return sop;
532 }
533
534 static int
same_owner_str(struct nfs4_stateowner * sop,struct xdr_netobj * owner)535 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
536 {
537 return (sop->so_owner.len == owner->len) &&
538 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
539 }
540
541 static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)542 find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
543 struct nfs4_client *clp)
544 {
545 struct nfs4_stateowner *so;
546
547 lockdep_assert_held(&clp->cl_lock);
548
549 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
550 so_strhash) {
551 if (!so->so_is_open_owner)
552 continue;
553 if (same_owner_str(so, &open->op_owner))
554 return openowner(nfs4_get_stateowner(so));
555 }
556 return NULL;
557 }
558
559 static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval,struct nfsd4_open * open,struct nfs4_client * clp)560 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
561 struct nfs4_client *clp)
562 {
563 struct nfs4_openowner *oo;
564
565 spin_lock(&clp->cl_lock);
566 oo = find_openstateowner_str_locked(hashval, open, clp);
567 spin_unlock(&clp->cl_lock);
568 return oo;
569 }
570
571 static inline u32
opaque_hashval(const void * ptr,int nbytes)572 opaque_hashval(const void *ptr, int nbytes)
573 {
574 unsigned char *cptr = (unsigned char *) ptr;
575
576 u32 x = 0;
577 while (nbytes--) {
578 x *= 37;
579 x += *cptr++;
580 }
581 return x;
582 }
583
nfsd4_free_file_rcu(struct rcu_head * rcu)584 static void nfsd4_free_file_rcu(struct rcu_head *rcu)
585 {
586 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
587
588 kmem_cache_free(file_slab, fp);
589 }
590
591 void
put_nfs4_file(struct nfs4_file * fi)592 put_nfs4_file(struct nfs4_file *fi)
593 {
594 if (refcount_dec_and_test(&fi->fi_ref)) {
595 nfsd4_file_hash_remove(fi);
596 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
597 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
598 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
599 }
600 }
601
602 static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file * f)603 find_writeable_file_locked(struct nfs4_file *f)
604 {
605 struct nfsd_file *ret;
606
607 lockdep_assert_held(&f->fi_lock);
608
609 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
610 if (!ret)
611 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
612 return ret;
613 }
614
615 static struct nfsd_file *
find_writeable_file(struct nfs4_file * f)616 find_writeable_file(struct nfs4_file *f)
617 {
618 struct nfsd_file *ret;
619
620 spin_lock(&f->fi_lock);
621 ret = find_writeable_file_locked(f);
622 spin_unlock(&f->fi_lock);
623
624 return ret;
625 }
626
627 static struct nfsd_file *
find_readable_file_locked(struct nfs4_file * f)628 find_readable_file_locked(struct nfs4_file *f)
629 {
630 struct nfsd_file *ret;
631
632 lockdep_assert_held(&f->fi_lock);
633
634 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
635 if (!ret)
636 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
637 return ret;
638 }
639
640 static struct nfsd_file *
find_readable_file(struct nfs4_file * f)641 find_readable_file(struct nfs4_file *f)
642 {
643 struct nfsd_file *ret;
644
645 spin_lock(&f->fi_lock);
646 ret = find_readable_file_locked(f);
647 spin_unlock(&f->fi_lock);
648
649 return ret;
650 }
651
652 static struct nfsd_file *
find_rw_file(struct nfs4_file * f)653 find_rw_file(struct nfs4_file *f)
654 {
655 struct nfsd_file *ret;
656
657 spin_lock(&f->fi_lock);
658 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
659 spin_unlock(&f->fi_lock);
660
661 return ret;
662 }
663
664 struct nfsd_file *
find_any_file(struct nfs4_file * f)665 find_any_file(struct nfs4_file *f)
666 {
667 struct nfsd_file *ret;
668
669 if (!f)
670 return NULL;
671 spin_lock(&f->fi_lock);
672 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
673 if (!ret) {
674 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
675 if (!ret)
676 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
677 }
678 spin_unlock(&f->fi_lock);
679 return ret;
680 }
681
find_any_file_locked(struct nfs4_file * f)682 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
683 {
684 lockdep_assert_held(&f->fi_lock);
685
686 if (f->fi_fds[O_RDWR])
687 return f->fi_fds[O_RDWR];
688 if (f->fi_fds[O_WRONLY])
689 return f->fi_fds[O_WRONLY];
690 if (f->fi_fds[O_RDONLY])
691 return f->fi_fds[O_RDONLY];
692 return NULL;
693 }
694
695 static atomic_long_t num_delegations;
696 unsigned long max_delegations;
697
698 /*
699 * Open owner state (share locks)
700 */
701
702 /* hash tables for lock and open owners */
703 #define OWNER_HASH_BITS 8
704 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
705 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
706
ownerstr_hashval(struct xdr_netobj * ownername)707 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
708 {
709 unsigned int ret;
710
711 ret = opaque_hashval(ownername->data, ownername->len);
712 return ret & OWNER_HASH_MASK;
713 }
714
715 static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
716
717 static const struct rhashtable_params nfs4_file_rhash_params = {
718 .key_len = sizeof_field(struct nfs4_file, fi_inode),
719 .key_offset = offsetof(struct nfs4_file, fi_inode),
720 .head_offset = offsetof(struct nfs4_file, fi_rlist),
721
722 /*
723 * Start with a single page hash table to reduce resizing churn
724 * on light workloads.
725 */
726 .min_size = 256,
727 .automatic_shrinking = true,
728 };
729
730 /*
731 * Check if courtesy clients have conflicting access and resolve it if possible
732 *
733 * access: is op_share_access if share_access is true.
734 * Check if access mode, op_share_access, would conflict with
735 * the current deny mode of the file 'fp'.
736 * access: is op_share_deny if share_access is false.
737 * Check if the deny mode, op_share_deny, would conflict with
738 * current access of the file 'fp'.
739 * stp: skip checking this entry.
740 * new_stp: normal open, not open upgrade.
741 *
742 * Function returns:
743 * false - access/deny mode conflict with normal client.
744 * true - no conflict or conflict with courtesy client(s) is resolved.
745 */
746 static bool
nfs4_resolve_deny_conflicts_locked(struct nfs4_file * fp,bool new_stp,struct nfs4_ol_stateid * stp,u32 access,bool share_access)747 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
748 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
749 {
750 struct nfs4_ol_stateid *st;
751 bool resolvable = true;
752 unsigned char bmap;
753 struct nfsd_net *nn;
754 struct nfs4_client *clp;
755
756 lockdep_assert_held(&fp->fi_lock);
757 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
758 /* ignore lock stateid */
759 if (st->st_openstp)
760 continue;
761 if (st == stp && new_stp)
762 continue;
763 /* check file access against deny mode or vice versa */
764 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
765 if (!(access & bmap_to_share_mode(bmap)))
766 continue;
767 clp = st->st_stid.sc_client;
768 if (try_to_expire_client(clp))
769 continue;
770 resolvable = false;
771 break;
772 }
773 if (resolvable) {
774 clp = stp->st_stid.sc_client;
775 nn = net_generic(clp->net, nfsd_net_id);
776 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
777 }
778 return resolvable;
779 }
780
781 static void
__nfs4_file_get_access(struct nfs4_file * fp,u32 access)782 __nfs4_file_get_access(struct nfs4_file *fp, u32 access)
783 {
784 lockdep_assert_held(&fp->fi_lock);
785
786 if (access & NFS4_SHARE_ACCESS_WRITE)
787 atomic_inc(&fp->fi_access[O_WRONLY]);
788 if (access & NFS4_SHARE_ACCESS_READ)
789 atomic_inc(&fp->fi_access[O_RDONLY]);
790 }
791
792 static __be32
nfs4_file_get_access(struct nfs4_file * fp,u32 access)793 nfs4_file_get_access(struct nfs4_file *fp, u32 access)
794 {
795 lockdep_assert_held(&fp->fi_lock);
796
797 /* Does this access mode make sense? */
798 if (access & ~NFS4_SHARE_ACCESS_BOTH)
799 return nfserr_inval;
800
801 /* Does it conflict with a deny mode already set? */
802 if ((access & fp->fi_share_deny) != 0)
803 return nfserr_share_denied;
804
805 __nfs4_file_get_access(fp, access);
806 return nfs_ok;
807 }
808
nfs4_file_check_deny(struct nfs4_file * fp,u32 deny)809 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
810 {
811 /* Common case is that there is no deny mode. */
812 if (deny) {
813 /* Does this deny mode make sense? */
814 if (deny & ~NFS4_SHARE_DENY_BOTH)
815 return nfserr_inval;
816
817 if ((deny & NFS4_SHARE_DENY_READ) &&
818 atomic_read(&fp->fi_access[O_RDONLY]))
819 return nfserr_share_denied;
820
821 if ((deny & NFS4_SHARE_DENY_WRITE) &&
822 atomic_read(&fp->fi_access[O_WRONLY]))
823 return nfserr_share_denied;
824 }
825 return nfs_ok;
826 }
827
__nfs4_file_put_access(struct nfs4_file * fp,int oflag)828 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
829 {
830 might_lock(&fp->fi_lock);
831
832 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
833 struct nfsd_file *f1 = NULL;
834 struct nfsd_file *f2 = NULL;
835
836 swap(f1, fp->fi_fds[oflag]);
837 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
838 swap(f2, fp->fi_fds[O_RDWR]);
839 spin_unlock(&fp->fi_lock);
840 if (f1)
841 nfsd_file_put(f1);
842 if (f2)
843 nfsd_file_put(f2);
844 }
845 }
846
nfs4_file_put_access(struct nfs4_file * fp,u32 access)847 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
848 {
849 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
850
851 if (access & NFS4_SHARE_ACCESS_WRITE)
852 __nfs4_file_put_access(fp, O_WRONLY);
853 if (access & NFS4_SHARE_ACCESS_READ)
854 __nfs4_file_put_access(fp, O_RDONLY);
855 }
856
857 /*
858 * Allocate a new open/delegation state counter. This is needed for
859 * pNFS for proper return on close semantics.
860 *
861 * Note that we only allocate it for pNFS-enabled exports, otherwise
862 * all pointers to struct nfs4_clnt_odstate are always NULL.
863 */
864 static struct nfs4_clnt_odstate *
alloc_clnt_odstate(struct nfs4_client * clp)865 alloc_clnt_odstate(struct nfs4_client *clp)
866 {
867 struct nfs4_clnt_odstate *co;
868
869 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
870 if (co) {
871 co->co_client = clp;
872 refcount_set(&co->co_odcount, 1);
873 }
874 return co;
875 }
876
877 static void
hash_clnt_odstate_locked(struct nfs4_clnt_odstate * co)878 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
879 {
880 struct nfs4_file *fp = co->co_file;
881
882 lockdep_assert_held(&fp->fi_lock);
883 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
884 }
885
886 static inline void
get_clnt_odstate(struct nfs4_clnt_odstate * co)887 get_clnt_odstate(struct nfs4_clnt_odstate *co)
888 {
889 if (co)
890 refcount_inc(&co->co_odcount);
891 }
892
893 static void
put_clnt_odstate(struct nfs4_clnt_odstate * co)894 put_clnt_odstate(struct nfs4_clnt_odstate *co)
895 {
896 struct nfs4_file *fp;
897
898 if (!co)
899 return;
900
901 fp = co->co_file;
902 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
903 list_del(&co->co_perfile);
904 spin_unlock(&fp->fi_lock);
905
906 nfsd4_return_all_file_layouts(co->co_client, fp);
907 kmem_cache_free(odstate_slab, co);
908 }
909 }
910
911 static struct nfs4_clnt_odstate *
find_or_hash_clnt_odstate(struct nfs4_file * fp,struct nfs4_clnt_odstate * new)912 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
913 {
914 struct nfs4_clnt_odstate *co;
915 struct nfs4_client *cl;
916
917 if (!new)
918 return NULL;
919
920 cl = new->co_client;
921
922 spin_lock(&fp->fi_lock);
923 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
924 if (co->co_client == cl) {
925 get_clnt_odstate(co);
926 goto out;
927 }
928 }
929 co = new;
930 co->co_file = fp;
931 hash_clnt_odstate_locked(new);
932 out:
933 spin_unlock(&fp->fi_lock);
934 return co;
935 }
936
nfs4_alloc_stid(struct nfs4_client * cl,struct kmem_cache * slab,void (* sc_free)(struct nfs4_stid *))937 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
938 void (*sc_free)(struct nfs4_stid *))
939 {
940 struct nfs4_stid *stid;
941 int new_id;
942
943 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
944 if (!stid)
945 return NULL;
946
947 idr_preload(GFP_KERNEL);
948 spin_lock(&cl->cl_lock);
949 /* Reserving 0 for start of file in nfsdfs "states" file: */
950 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
951 spin_unlock(&cl->cl_lock);
952 idr_preload_end();
953 if (new_id < 0)
954 goto out_free;
955
956 stid->sc_free = sc_free;
957 stid->sc_client = cl;
958 stid->sc_stateid.si_opaque.so_id = new_id;
959 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
960 /* Will be incremented before return to client: */
961 refcount_set(&stid->sc_count, 1);
962 spin_lock_init(&stid->sc_lock);
963 INIT_LIST_HEAD(&stid->sc_cp_list);
964
965 /*
966 * It shouldn't be a problem to reuse an opaque stateid value.
967 * I don't think it is for 4.1. But with 4.0 I worry that, for
968 * example, a stray write retransmission could be accepted by
969 * the server when it should have been rejected. Therefore,
970 * adopt a trick from the sctp code to attempt to maximize the
971 * amount of time until an id is reused, by ensuring they always
972 * "increase" (mod INT_MAX):
973 */
974 return stid;
975 out_free:
976 kmem_cache_free(slab, stid);
977 return NULL;
978 }
979
980 /*
981 * Create a unique stateid_t to represent each COPY.
982 */
nfs4_init_cp_state(struct nfsd_net * nn,copy_stateid_t * stid,unsigned char cs_type)983 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
984 unsigned char cs_type)
985 {
986 int new_id;
987
988 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
989 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
990
991 idr_preload(GFP_KERNEL);
992 spin_lock(&nn->s2s_cp_lock);
993 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
994 stid->cs_stid.si_opaque.so_id = new_id;
995 stid->cs_stid.si_generation = 1;
996 spin_unlock(&nn->s2s_cp_lock);
997 idr_preload_end();
998 if (new_id < 0)
999 return 0;
1000 stid->cs_type = cs_type;
1001 return 1;
1002 }
1003
nfs4_init_copy_state(struct nfsd_net * nn,struct nfsd4_copy * copy)1004 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
1005 {
1006 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
1007 }
1008
nfs4_alloc_init_cpntf_state(struct nfsd_net * nn,struct nfs4_stid * p_stid)1009 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1010 struct nfs4_stid *p_stid)
1011 {
1012 struct nfs4_cpntf_state *cps;
1013
1014 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1015 if (!cps)
1016 return NULL;
1017 cps->cpntf_time = ktime_get_boottime_seconds();
1018 refcount_set(&cps->cp_stateid.cs_count, 1);
1019 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1020 goto out_free;
1021 spin_lock(&nn->s2s_cp_lock);
1022 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1023 spin_unlock(&nn->s2s_cp_lock);
1024 return cps;
1025 out_free:
1026 kfree(cps);
1027 return NULL;
1028 }
1029
nfs4_free_copy_state(struct nfsd4_copy * copy)1030 void nfs4_free_copy_state(struct nfsd4_copy *copy)
1031 {
1032 struct nfsd_net *nn;
1033
1034 if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1035 return;
1036 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1037 spin_lock(&nn->s2s_cp_lock);
1038 idr_remove(&nn->s2s_cp_stateids,
1039 copy->cp_stateid.cs_stid.si_opaque.so_id);
1040 spin_unlock(&nn->s2s_cp_lock);
1041 }
1042
nfs4_free_cpntf_statelist(struct net * net,struct nfs4_stid * stid)1043 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1044 {
1045 struct nfs4_cpntf_state *cps;
1046 struct nfsd_net *nn;
1047
1048 nn = net_generic(net, nfsd_net_id);
1049 spin_lock(&nn->s2s_cp_lock);
1050 while (!list_empty(&stid->sc_cp_list)) {
1051 cps = list_first_entry(&stid->sc_cp_list,
1052 struct nfs4_cpntf_state, cp_list);
1053 _free_cpntf_state_locked(nn, cps);
1054 }
1055 spin_unlock(&nn->s2s_cp_lock);
1056 }
1057
nfs4_alloc_open_stateid(struct nfs4_client * clp)1058 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1059 {
1060 struct nfs4_stid *stid;
1061
1062 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1063 if (!stid)
1064 return NULL;
1065
1066 return openlockstateid(stid);
1067 }
1068
nfs4_free_deleg(struct nfs4_stid * stid)1069 static void nfs4_free_deleg(struct nfs4_stid *stid)
1070 {
1071 struct nfs4_delegation *dp = delegstateid(stid);
1072
1073 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1074 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1075 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1076 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1077 kmem_cache_free(deleg_slab, stid);
1078 atomic_long_dec(&num_delegations);
1079 }
1080
1081 /*
1082 * When we recall a delegation, we should be careful not to hand it
1083 * out again straight away.
1084 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1085 * in which the filehandles of recalled delegations are "stored".
1086 * If a filehandle appear in either filter, a delegation is blocked.
1087 * When a delegation is recalled, the filehandle is stored in the "new"
1088 * filter.
1089 * Every 30 seconds we swap the filters and clear the "new" one,
1090 * unless both are empty of course.
1091 *
1092 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1093 * low 3 bytes as hash-table indices.
1094 *
1095 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1096 * is used to manage concurrent access. Testing does not need the lock
1097 * except when swapping the two filters.
1098 */
1099 static DEFINE_SPINLOCK(blocked_delegations_lock);
1100 static struct bloom_pair {
1101 int entries, old_entries;
1102 time64_t swap_time;
1103 int new; /* index into 'set' */
1104 DECLARE_BITMAP(set[2], 256);
1105 } blocked_delegations;
1106
delegation_blocked(struct knfsd_fh * fh)1107 static int delegation_blocked(struct knfsd_fh *fh)
1108 {
1109 u32 hash;
1110 struct bloom_pair *bd = &blocked_delegations;
1111
1112 if (bd->entries == 0)
1113 return 0;
1114 if (ktime_get_seconds() - bd->swap_time > 30) {
1115 spin_lock(&blocked_delegations_lock);
1116 if (ktime_get_seconds() - bd->swap_time > 30) {
1117 bd->entries -= bd->old_entries;
1118 bd->old_entries = bd->entries;
1119 memset(bd->set[bd->new], 0,
1120 sizeof(bd->set[0]));
1121 bd->new = 1-bd->new;
1122 bd->swap_time = ktime_get_seconds();
1123 }
1124 spin_unlock(&blocked_delegations_lock);
1125 }
1126 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1127 if (test_bit(hash&255, bd->set[0]) &&
1128 test_bit((hash>>8)&255, bd->set[0]) &&
1129 test_bit((hash>>16)&255, bd->set[0]))
1130 return 1;
1131
1132 if (test_bit(hash&255, bd->set[1]) &&
1133 test_bit((hash>>8)&255, bd->set[1]) &&
1134 test_bit((hash>>16)&255, bd->set[1]))
1135 return 1;
1136
1137 return 0;
1138 }
1139
block_delegations(struct knfsd_fh * fh)1140 static void block_delegations(struct knfsd_fh *fh)
1141 {
1142 u32 hash;
1143 struct bloom_pair *bd = &blocked_delegations;
1144
1145 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1146
1147 spin_lock(&blocked_delegations_lock);
1148 __set_bit(hash&255, bd->set[bd->new]);
1149 __set_bit((hash>>8)&255, bd->set[bd->new]);
1150 __set_bit((hash>>16)&255, bd->set[bd->new]);
1151 if (bd->entries == 0)
1152 bd->swap_time = ktime_get_seconds();
1153 bd->entries += 1;
1154 spin_unlock(&blocked_delegations_lock);
1155 }
1156
1157 static struct nfs4_delegation *
alloc_init_deleg(struct nfs4_client * clp,struct nfs4_file * fp,struct nfs4_clnt_odstate * odstate,u32 dl_type)1158 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1159 struct nfs4_clnt_odstate *odstate, u32 dl_type)
1160 {
1161 struct nfs4_delegation *dp;
1162 struct nfs4_stid *stid;
1163 long n;
1164
1165 dprintk("NFSD alloc_init_deleg\n");
1166 n = atomic_long_inc_return(&num_delegations);
1167 if (n < 0 || n > max_delegations)
1168 goto out_dec;
1169 if (delegation_blocked(&fp->fi_fhandle))
1170 goto out_dec;
1171 stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
1172 if (stid == NULL)
1173 goto out_dec;
1174 dp = delegstateid(stid);
1175
1176 /*
1177 * delegation seqid's are never incremented. The 4.1 special
1178 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1179 * 0 anyway just for consistency and use 1:
1180 */
1181 dp->dl_stid.sc_stateid.si_generation = 1;
1182 INIT_LIST_HEAD(&dp->dl_perfile);
1183 INIT_LIST_HEAD(&dp->dl_perclnt);
1184 INIT_LIST_HEAD(&dp->dl_recall_lru);
1185 dp->dl_clnt_odstate = odstate;
1186 get_clnt_odstate(odstate);
1187 dp->dl_type = dl_type;
1188 dp->dl_retries = 1;
1189 dp->dl_recalled = false;
1190 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1191 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1192 get_nfs4_file(fp);
1193 dp->dl_stid.sc_file = fp;
1194 return dp;
1195 out_dec:
1196 atomic_long_dec(&num_delegations);
1197 return NULL;
1198 }
1199
1200 void
nfs4_put_stid(struct nfs4_stid * s)1201 nfs4_put_stid(struct nfs4_stid *s)
1202 {
1203 struct nfs4_file *fp = s->sc_file;
1204 struct nfs4_client *clp = s->sc_client;
1205
1206 might_lock(&clp->cl_lock);
1207
1208 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1209 wake_up_all(&close_wq);
1210 return;
1211 }
1212 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1213 nfs4_free_cpntf_statelist(clp->net, s);
1214 spin_unlock(&clp->cl_lock);
1215 s->sc_free(s);
1216 if (fp)
1217 put_nfs4_file(fp);
1218 }
1219
1220 void
nfs4_inc_and_copy_stateid(stateid_t * dst,struct nfs4_stid * stid)1221 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1222 {
1223 stateid_t *src = &stid->sc_stateid;
1224
1225 spin_lock(&stid->sc_lock);
1226 if (unlikely(++src->si_generation == 0))
1227 src->si_generation = 1;
1228 memcpy(dst, src, sizeof(*dst));
1229 spin_unlock(&stid->sc_lock);
1230 }
1231
put_deleg_file(struct nfs4_file * fp)1232 static void put_deleg_file(struct nfs4_file *fp)
1233 {
1234 struct nfsd_file *nf = NULL;
1235
1236 spin_lock(&fp->fi_lock);
1237 if (--fp->fi_delegees == 0)
1238 swap(nf, fp->fi_deleg_file);
1239 spin_unlock(&fp->fi_lock);
1240
1241 if (nf)
1242 nfsd_file_put(nf);
1243 }
1244
nfs4_unlock_deleg_lease(struct nfs4_delegation * dp)1245 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1246 {
1247 struct nfs4_file *fp = dp->dl_stid.sc_file;
1248 struct nfsd_file *nf = fp->fi_deleg_file;
1249
1250 WARN_ON_ONCE(!fp->fi_delegees);
1251
1252 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1253 put_deleg_file(fp);
1254 }
1255
destroy_unhashed_deleg(struct nfs4_delegation * dp)1256 static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1257 {
1258 put_clnt_odstate(dp->dl_clnt_odstate);
1259 nfs4_unlock_deleg_lease(dp);
1260 nfs4_put_stid(&dp->dl_stid);
1261 }
1262
nfs4_unhash_stid(struct nfs4_stid * s)1263 void nfs4_unhash_stid(struct nfs4_stid *s)
1264 {
1265 s->sc_type = 0;
1266 }
1267
1268 /**
1269 * nfs4_delegation_exists - Discover if this delegation already exists
1270 * @clp: a pointer to the nfs4_client we're granting a delegation to
1271 * @fp: a pointer to the nfs4_file we're granting a delegation on
1272 *
1273 * Return:
1274 * On success: true iff an existing delegation is found
1275 */
1276
1277 static bool
nfs4_delegation_exists(struct nfs4_client * clp,struct nfs4_file * fp)1278 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1279 {
1280 struct nfs4_delegation *searchdp = NULL;
1281 struct nfs4_client *searchclp = NULL;
1282
1283 lockdep_assert_held(&state_lock);
1284 lockdep_assert_held(&fp->fi_lock);
1285
1286 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1287 searchclp = searchdp->dl_stid.sc_client;
1288 if (clp == searchclp) {
1289 return true;
1290 }
1291 }
1292 return false;
1293 }
1294
1295 /**
1296 * hash_delegation_locked - Add a delegation to the appropriate lists
1297 * @dp: a pointer to the nfs4_delegation we are adding.
1298 * @fp: a pointer to the nfs4_file we're granting a delegation on
1299 *
1300 * Return:
1301 * On success: NULL if the delegation was successfully hashed.
1302 *
1303 * On error: -EAGAIN if one was previously granted to this
1304 * nfs4_client for this nfs4_file. Delegation is not hashed.
1305 *
1306 */
1307
1308 static int
hash_delegation_locked(struct nfs4_delegation * dp,struct nfs4_file * fp)1309 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1310 {
1311 struct nfs4_client *clp = dp->dl_stid.sc_client;
1312
1313 lockdep_assert_held(&state_lock);
1314 lockdep_assert_held(&fp->fi_lock);
1315
1316 if (nfs4_delegation_exists(clp, fp))
1317 return -EAGAIN;
1318 refcount_inc(&dp->dl_stid.sc_count);
1319 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1320 list_add(&dp->dl_perfile, &fp->fi_delegations);
1321 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1322 return 0;
1323 }
1324
delegation_hashed(struct nfs4_delegation * dp)1325 static bool delegation_hashed(struct nfs4_delegation *dp)
1326 {
1327 return !(list_empty(&dp->dl_perfile));
1328 }
1329
1330 static bool
unhash_delegation_locked(struct nfs4_delegation * dp)1331 unhash_delegation_locked(struct nfs4_delegation *dp)
1332 {
1333 struct nfs4_file *fp = dp->dl_stid.sc_file;
1334
1335 lockdep_assert_held(&state_lock);
1336
1337 if (!delegation_hashed(dp))
1338 return false;
1339
1340 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1341 /* Ensure that deleg break won't try to requeue it */
1342 ++dp->dl_time;
1343 spin_lock(&fp->fi_lock);
1344 list_del_init(&dp->dl_perclnt);
1345 list_del_init(&dp->dl_recall_lru);
1346 list_del_init(&dp->dl_perfile);
1347 spin_unlock(&fp->fi_lock);
1348 return true;
1349 }
1350
destroy_delegation(struct nfs4_delegation * dp)1351 static void destroy_delegation(struct nfs4_delegation *dp)
1352 {
1353 bool unhashed;
1354
1355 spin_lock(&state_lock);
1356 unhashed = unhash_delegation_locked(dp);
1357 spin_unlock(&state_lock);
1358 if (unhashed)
1359 destroy_unhashed_deleg(dp);
1360 }
1361
revoke_delegation(struct nfs4_delegation * dp)1362 static void revoke_delegation(struct nfs4_delegation *dp)
1363 {
1364 struct nfs4_client *clp = dp->dl_stid.sc_client;
1365
1366 WARN_ON(!list_empty(&dp->dl_recall_lru));
1367
1368 trace_nfsd_stid_revoke(&dp->dl_stid);
1369
1370 if (clp->cl_minorversion) {
1371 spin_lock(&clp->cl_lock);
1372 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1373 refcount_inc(&dp->dl_stid.sc_count);
1374 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1375 spin_unlock(&clp->cl_lock);
1376 }
1377 destroy_unhashed_deleg(dp);
1378 }
1379
1380 /*
1381 * SETCLIENTID state
1382 */
1383
clientid_hashval(u32 id)1384 static unsigned int clientid_hashval(u32 id)
1385 {
1386 return id & CLIENT_HASH_MASK;
1387 }
1388
clientstr_hashval(struct xdr_netobj name)1389 static unsigned int clientstr_hashval(struct xdr_netobj name)
1390 {
1391 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1392 }
1393
1394 /*
1395 * A stateid that had a deny mode associated with it is being released
1396 * or downgraded. Recalculate the deny mode on the file.
1397 */
1398 static void
recalculate_deny_mode(struct nfs4_file * fp)1399 recalculate_deny_mode(struct nfs4_file *fp)
1400 {
1401 struct nfs4_ol_stateid *stp;
1402
1403 spin_lock(&fp->fi_lock);
1404 fp->fi_share_deny = 0;
1405 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1406 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1407 spin_unlock(&fp->fi_lock);
1408 }
1409
1410 static void
reset_union_bmap_deny(u32 deny,struct nfs4_ol_stateid * stp)1411 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1412 {
1413 int i;
1414 bool change = false;
1415
1416 for (i = 1; i < 4; i++) {
1417 if ((i & deny) != i) {
1418 change = true;
1419 clear_deny(i, stp);
1420 }
1421 }
1422
1423 /* Recalculate per-file deny mode if there was a change */
1424 if (change)
1425 recalculate_deny_mode(stp->st_stid.sc_file);
1426 }
1427
1428 /* release all access and file references for a given stateid */
1429 static void
release_all_access(struct nfs4_ol_stateid * stp)1430 release_all_access(struct nfs4_ol_stateid *stp)
1431 {
1432 int i;
1433 struct nfs4_file *fp = stp->st_stid.sc_file;
1434
1435 if (fp && stp->st_deny_bmap != 0)
1436 recalculate_deny_mode(fp);
1437
1438 for (i = 1; i < 4; i++) {
1439 if (test_access(i, stp))
1440 nfs4_file_put_access(stp->st_stid.sc_file, i);
1441 clear_access(i, stp);
1442 }
1443 }
1444
nfs4_free_stateowner(struct nfs4_stateowner * sop)1445 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1446 {
1447 kfree(sop->so_owner.data);
1448 sop->so_ops->so_free(sop);
1449 }
1450
nfs4_put_stateowner(struct nfs4_stateowner * sop)1451 static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1452 {
1453 struct nfs4_client *clp = sop->so_client;
1454
1455 might_lock(&clp->cl_lock);
1456
1457 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1458 return;
1459 sop->so_ops->so_unhash(sop);
1460 spin_unlock(&clp->cl_lock);
1461 nfs4_free_stateowner(sop);
1462 }
1463
1464 static bool
nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid * stp)1465 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1466 {
1467 return list_empty(&stp->st_perfile);
1468 }
1469
unhash_ol_stateid(struct nfs4_ol_stateid * stp)1470 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1471 {
1472 struct nfs4_file *fp = stp->st_stid.sc_file;
1473
1474 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1475
1476 if (list_empty(&stp->st_perfile))
1477 return false;
1478
1479 spin_lock(&fp->fi_lock);
1480 list_del_init(&stp->st_perfile);
1481 spin_unlock(&fp->fi_lock);
1482 list_del(&stp->st_perstateowner);
1483 return true;
1484 }
1485
nfs4_free_ol_stateid(struct nfs4_stid * stid)1486 static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1487 {
1488 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1489
1490 put_clnt_odstate(stp->st_clnt_odstate);
1491 release_all_access(stp);
1492 if (stp->st_stateowner)
1493 nfs4_put_stateowner(stp->st_stateowner);
1494 WARN_ON(!list_empty(&stid->sc_cp_list));
1495 kmem_cache_free(stateid_slab, stid);
1496 }
1497
nfs4_free_lock_stateid(struct nfs4_stid * stid)1498 static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1499 {
1500 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1501 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1502 struct nfsd_file *nf;
1503
1504 nf = find_any_file(stp->st_stid.sc_file);
1505 if (nf) {
1506 get_file(nf->nf_file);
1507 filp_close(nf->nf_file, (fl_owner_t)lo);
1508 nfsd_file_put(nf);
1509 }
1510 nfs4_free_ol_stateid(stid);
1511 }
1512
1513 /*
1514 * Put the persistent reference to an already unhashed generic stateid, while
1515 * holding the cl_lock. If it's the last reference, then put it onto the
1516 * reaplist for later destruction.
1517 */
put_ol_stateid_locked(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1518 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1519 struct list_head *reaplist)
1520 {
1521 struct nfs4_stid *s = &stp->st_stid;
1522 struct nfs4_client *clp = s->sc_client;
1523
1524 lockdep_assert_held(&clp->cl_lock);
1525
1526 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1527
1528 if (!refcount_dec_and_test(&s->sc_count)) {
1529 wake_up_all(&close_wq);
1530 return;
1531 }
1532
1533 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1534 list_add(&stp->st_locks, reaplist);
1535 }
1536
unhash_lock_stateid(struct nfs4_ol_stateid * stp)1537 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1538 {
1539 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1540
1541 if (!unhash_ol_stateid(stp))
1542 return false;
1543 list_del_init(&stp->st_locks);
1544 nfs4_unhash_stid(&stp->st_stid);
1545 return true;
1546 }
1547
release_lock_stateid(struct nfs4_ol_stateid * stp)1548 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1549 {
1550 struct nfs4_client *clp = stp->st_stid.sc_client;
1551 bool unhashed;
1552
1553 spin_lock(&clp->cl_lock);
1554 unhashed = unhash_lock_stateid(stp);
1555 spin_unlock(&clp->cl_lock);
1556 if (unhashed)
1557 nfs4_put_stid(&stp->st_stid);
1558 }
1559
unhash_lockowner_locked(struct nfs4_lockowner * lo)1560 static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1561 {
1562 struct nfs4_client *clp = lo->lo_owner.so_client;
1563
1564 lockdep_assert_held(&clp->cl_lock);
1565
1566 list_del_init(&lo->lo_owner.so_strhash);
1567 }
1568
1569 /*
1570 * Free a list of generic stateids that were collected earlier after being
1571 * fully unhashed.
1572 */
1573 static void
free_ol_stateid_reaplist(struct list_head * reaplist)1574 free_ol_stateid_reaplist(struct list_head *reaplist)
1575 {
1576 struct nfs4_ol_stateid *stp;
1577 struct nfs4_file *fp;
1578
1579 might_sleep();
1580
1581 while (!list_empty(reaplist)) {
1582 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1583 st_locks);
1584 list_del(&stp->st_locks);
1585 fp = stp->st_stid.sc_file;
1586 stp->st_stid.sc_free(&stp->st_stid);
1587 if (fp)
1588 put_nfs4_file(fp);
1589 }
1590 }
1591
release_open_stateid_locks(struct nfs4_ol_stateid * open_stp,struct list_head * reaplist)1592 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1593 struct list_head *reaplist)
1594 {
1595 struct nfs4_ol_stateid *stp;
1596
1597 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1598
1599 while (!list_empty(&open_stp->st_locks)) {
1600 stp = list_entry(open_stp->st_locks.next,
1601 struct nfs4_ol_stateid, st_locks);
1602 WARN_ON(!unhash_lock_stateid(stp));
1603 put_ol_stateid_locked(stp, reaplist);
1604 }
1605 }
1606
unhash_open_stateid(struct nfs4_ol_stateid * stp,struct list_head * reaplist)1607 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1608 struct list_head *reaplist)
1609 {
1610 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1611
1612 if (!unhash_ol_stateid(stp))
1613 return false;
1614 release_open_stateid_locks(stp, reaplist);
1615 return true;
1616 }
1617
release_open_stateid(struct nfs4_ol_stateid * stp)1618 static void release_open_stateid(struct nfs4_ol_stateid *stp)
1619 {
1620 LIST_HEAD(reaplist);
1621
1622 spin_lock(&stp->st_stid.sc_client->cl_lock);
1623 if (unhash_open_stateid(stp, &reaplist))
1624 put_ol_stateid_locked(stp, &reaplist);
1625 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1626 free_ol_stateid_reaplist(&reaplist);
1627 }
1628
unhash_openowner_locked(struct nfs4_openowner * oo)1629 static void unhash_openowner_locked(struct nfs4_openowner *oo)
1630 {
1631 struct nfs4_client *clp = oo->oo_owner.so_client;
1632
1633 lockdep_assert_held(&clp->cl_lock);
1634
1635 list_del_init(&oo->oo_owner.so_strhash);
1636 list_del_init(&oo->oo_perclient);
1637 }
1638
release_last_closed_stateid(struct nfs4_openowner * oo)1639 static void release_last_closed_stateid(struct nfs4_openowner *oo)
1640 {
1641 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1642 nfsd_net_id);
1643 struct nfs4_ol_stateid *s;
1644
1645 spin_lock(&nn->client_lock);
1646 s = oo->oo_last_closed_stid;
1647 if (s) {
1648 list_del_init(&oo->oo_close_lru);
1649 oo->oo_last_closed_stid = NULL;
1650 }
1651 spin_unlock(&nn->client_lock);
1652 if (s)
1653 nfs4_put_stid(&s->st_stid);
1654 }
1655
release_openowner(struct nfs4_openowner * oo)1656 static void release_openowner(struct nfs4_openowner *oo)
1657 {
1658 struct nfs4_ol_stateid *stp;
1659 struct nfs4_client *clp = oo->oo_owner.so_client;
1660 struct list_head reaplist;
1661
1662 INIT_LIST_HEAD(&reaplist);
1663
1664 spin_lock(&clp->cl_lock);
1665 unhash_openowner_locked(oo);
1666 while (!list_empty(&oo->oo_owner.so_stateids)) {
1667 stp = list_first_entry(&oo->oo_owner.so_stateids,
1668 struct nfs4_ol_stateid, st_perstateowner);
1669 if (unhash_open_stateid(stp, &reaplist))
1670 put_ol_stateid_locked(stp, &reaplist);
1671 }
1672 spin_unlock(&clp->cl_lock);
1673 free_ol_stateid_reaplist(&reaplist);
1674 release_last_closed_stateid(oo);
1675 nfs4_put_stateowner(&oo->oo_owner);
1676 }
1677
1678 static inline int
hash_sessionid(struct nfs4_sessionid * sessionid)1679 hash_sessionid(struct nfs4_sessionid *sessionid)
1680 {
1681 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1682
1683 return sid->sequence % SESSION_HASH_SIZE;
1684 }
1685
1686 #ifdef CONFIG_SUNRPC_DEBUG
1687 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1688 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1689 {
1690 u32 *ptr = (u32 *)(&sessionid->data[0]);
1691 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1692 }
1693 #else
1694 static inline void
dump_sessionid(const char * fn,struct nfs4_sessionid * sessionid)1695 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1696 {
1697 }
1698 #endif
1699
1700 /*
1701 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1702 * won't be used for replay.
1703 */
nfsd4_bump_seqid(struct nfsd4_compound_state * cstate,__be32 nfserr)1704 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1705 {
1706 struct nfs4_stateowner *so = cstate->replay_owner;
1707
1708 if (nfserr == nfserr_replay_me)
1709 return;
1710
1711 if (!seqid_mutating_err(ntohl(nfserr))) {
1712 nfsd4_cstate_clear_replay(cstate);
1713 return;
1714 }
1715 if (!so)
1716 return;
1717 if (so->so_is_open_owner)
1718 release_last_closed_stateid(openowner(so));
1719 so->so_seqid++;
1720 return;
1721 }
1722
1723 static void
gen_sessionid(struct nfsd4_session * ses)1724 gen_sessionid(struct nfsd4_session *ses)
1725 {
1726 struct nfs4_client *clp = ses->se_client;
1727 struct nfsd4_sessionid *sid;
1728
1729 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1730 sid->clientid = clp->cl_clientid;
1731 sid->sequence = current_sessionid++;
1732 sid->reserved = 0;
1733 }
1734
1735 /*
1736 * The protocol defines ca_maxresponssize_cached to include the size of
1737 * the rpc header, but all we need to cache is the data starting after
1738 * the end of the initial SEQUENCE operation--the rest we regenerate
1739 * each time. Therefore we can advertise a ca_maxresponssize_cached
1740 * value that is the number of bytes in our cache plus a few additional
1741 * bytes. In order to stay on the safe side, and not promise more than
1742 * we can cache, those additional bytes must be the minimum possible: 24
1743 * bytes of rpc header (xid through accept state, with AUTH_NULL
1744 * verifier), 12 for the compound header (with zero-length tag), and 44
1745 * for the SEQUENCE op response:
1746 */
1747 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1748
1749 static void
free_session_slots(struct nfsd4_session * ses)1750 free_session_slots(struct nfsd4_session *ses)
1751 {
1752 int i;
1753
1754 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1755 free_svc_cred(&ses->se_slots[i]->sl_cred);
1756 kfree(ses->se_slots[i]);
1757 }
1758 }
1759
1760 /*
1761 * We don't actually need to cache the rpc and session headers, so we
1762 * can allocate a little less for each slot:
1763 */
slot_bytes(struct nfsd4_channel_attrs * ca)1764 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1765 {
1766 u32 size;
1767
1768 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1769 size = 0;
1770 else
1771 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1772 return size + sizeof(struct nfsd4_slot);
1773 }
1774
1775 /*
1776 * XXX: If we run out of reserved DRC memory we could (up to a point)
1777 * re-negotiate active sessions and reduce their slot usage to make
1778 * room for new connections. For now we just fail the create session.
1779 */
nfsd4_get_drc_mem(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)1780 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1781 {
1782 u32 slotsize = slot_bytes(ca);
1783 u32 num = ca->maxreqs;
1784 unsigned long avail, total_avail;
1785 unsigned int scale_factor;
1786
1787 spin_lock(&nfsd_drc_lock);
1788 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1789 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1790 else
1791 /* We have handed out more space than we chose in
1792 * set_max_drc() to allow. That isn't really a
1793 * problem as long as that doesn't make us think we
1794 * have lots more due to integer overflow.
1795 */
1796 total_avail = 0;
1797 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1798 /*
1799 * Never use more than a fraction of the remaining memory,
1800 * unless it's the only way to give this client a slot.
1801 * The chosen fraction is either 1/8 or 1/number of threads,
1802 * whichever is smaller. This ensures there are adequate
1803 * slots to support multiple clients per thread.
1804 * Give the client one slot even if that would require
1805 * over-allocation--it is better than failure.
1806 */
1807 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1808
1809 avail = clamp_t(unsigned long, avail, slotsize,
1810 total_avail/scale_factor);
1811 num = min_t(int, num, avail / slotsize);
1812 num = max_t(int, num, 1);
1813 nfsd_drc_mem_used += num * slotsize;
1814 spin_unlock(&nfsd_drc_lock);
1815
1816 return num;
1817 }
1818
nfsd4_put_drc_mem(struct nfsd4_channel_attrs * ca)1819 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1820 {
1821 int slotsize = slot_bytes(ca);
1822
1823 spin_lock(&nfsd_drc_lock);
1824 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1825 spin_unlock(&nfsd_drc_lock);
1826 }
1827
alloc_session(struct nfsd4_channel_attrs * fattrs,struct nfsd4_channel_attrs * battrs)1828 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1829 struct nfsd4_channel_attrs *battrs)
1830 {
1831 int numslots = fattrs->maxreqs;
1832 int slotsize = slot_bytes(fattrs);
1833 struct nfsd4_session *new;
1834 int i;
1835
1836 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
1837 > PAGE_SIZE);
1838
1839 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
1840 if (!new)
1841 return NULL;
1842 /* allocate each struct nfsd4_slot and data cache in one piece */
1843 for (i = 0; i < numslots; i++) {
1844 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1845 if (!new->se_slots[i])
1846 goto out_free;
1847 }
1848
1849 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1850 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1851
1852 return new;
1853 out_free:
1854 while (i--)
1855 kfree(new->se_slots[i]);
1856 kfree(new);
1857 return NULL;
1858 }
1859
free_conn(struct nfsd4_conn * c)1860 static void free_conn(struct nfsd4_conn *c)
1861 {
1862 svc_xprt_put(c->cn_xprt);
1863 kfree(c);
1864 }
1865
nfsd4_conn_lost(struct svc_xpt_user * u)1866 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1867 {
1868 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1869 struct nfs4_client *clp = c->cn_session->se_client;
1870
1871 trace_nfsd_cb_lost(clp);
1872
1873 spin_lock(&clp->cl_lock);
1874 if (!list_empty(&c->cn_persession)) {
1875 list_del(&c->cn_persession);
1876 free_conn(c);
1877 }
1878 nfsd4_probe_callback(clp);
1879 spin_unlock(&clp->cl_lock);
1880 }
1881
alloc_conn(struct svc_rqst * rqstp,u32 flags)1882 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1883 {
1884 struct nfsd4_conn *conn;
1885
1886 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1887 if (!conn)
1888 return NULL;
1889 svc_xprt_get(rqstp->rq_xprt);
1890 conn->cn_xprt = rqstp->rq_xprt;
1891 conn->cn_flags = flags;
1892 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1893 return conn;
1894 }
1895
__nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1896 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1897 {
1898 conn->cn_session = ses;
1899 list_add(&conn->cn_persession, &ses->se_conns);
1900 }
1901
nfsd4_hash_conn(struct nfsd4_conn * conn,struct nfsd4_session * ses)1902 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1903 {
1904 struct nfs4_client *clp = ses->se_client;
1905
1906 spin_lock(&clp->cl_lock);
1907 __nfsd4_hash_conn(conn, ses);
1908 spin_unlock(&clp->cl_lock);
1909 }
1910
nfsd4_register_conn(struct nfsd4_conn * conn)1911 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1912 {
1913 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1914 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1915 }
1916
nfsd4_init_conn(struct svc_rqst * rqstp,struct nfsd4_conn * conn,struct nfsd4_session * ses)1917 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1918 {
1919 int ret;
1920
1921 nfsd4_hash_conn(conn, ses);
1922 ret = nfsd4_register_conn(conn);
1923 if (ret)
1924 /* oops; xprt is already down: */
1925 nfsd4_conn_lost(&conn->cn_xpt_user);
1926 /* We may have gained or lost a callback channel: */
1927 nfsd4_probe_callback_sync(ses->se_client);
1928 }
1929
alloc_conn_from_crses(struct svc_rqst * rqstp,struct nfsd4_create_session * cses)1930 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1931 {
1932 u32 dir = NFS4_CDFC4_FORE;
1933
1934 if (cses->flags & SESSION4_BACK_CHAN)
1935 dir |= NFS4_CDFC4_BACK;
1936 return alloc_conn(rqstp, dir);
1937 }
1938
1939 /* must be called under client_lock */
nfsd4_del_conns(struct nfsd4_session * s)1940 static void nfsd4_del_conns(struct nfsd4_session *s)
1941 {
1942 struct nfs4_client *clp = s->se_client;
1943 struct nfsd4_conn *c;
1944
1945 spin_lock(&clp->cl_lock);
1946 while (!list_empty(&s->se_conns)) {
1947 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1948 list_del_init(&c->cn_persession);
1949 spin_unlock(&clp->cl_lock);
1950
1951 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1952 free_conn(c);
1953
1954 spin_lock(&clp->cl_lock);
1955 }
1956 spin_unlock(&clp->cl_lock);
1957 }
1958
__free_session(struct nfsd4_session * ses)1959 static void __free_session(struct nfsd4_session *ses)
1960 {
1961 free_session_slots(ses);
1962 kfree(ses);
1963 }
1964
free_session(struct nfsd4_session * ses)1965 static void free_session(struct nfsd4_session *ses)
1966 {
1967 nfsd4_del_conns(ses);
1968 nfsd4_put_drc_mem(&ses->se_fchannel);
1969 __free_session(ses);
1970 }
1971
init_session(struct svc_rqst * rqstp,struct nfsd4_session * new,struct nfs4_client * clp,struct nfsd4_create_session * cses)1972 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1973 {
1974 int idx;
1975 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1976
1977 new->se_client = clp;
1978 gen_sessionid(new);
1979
1980 INIT_LIST_HEAD(&new->se_conns);
1981
1982 new->se_cb_seq_nr = 1;
1983 new->se_flags = cses->flags;
1984 new->se_cb_prog = cses->callback_prog;
1985 new->se_cb_sec = cses->cb_sec;
1986 atomic_set(&new->se_ref, 0);
1987 idx = hash_sessionid(&new->se_sessionid);
1988 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1989 spin_lock(&clp->cl_lock);
1990 list_add(&new->se_perclnt, &clp->cl_sessions);
1991 spin_unlock(&clp->cl_lock);
1992
1993 {
1994 struct sockaddr *sa = svc_addr(rqstp);
1995 /*
1996 * This is a little silly; with sessions there's no real
1997 * use for the callback address. Use the peer address
1998 * as a reasonable default for now, but consider fixing
1999 * the rpc client not to require an address in the
2000 * future:
2001 */
2002 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2003 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2004 }
2005 }
2006
2007 /* caller must hold client_lock */
2008 static struct nfsd4_session *
__find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net)2009 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2010 {
2011 struct nfsd4_session *elem;
2012 int idx;
2013 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2014
2015 lockdep_assert_held(&nn->client_lock);
2016
2017 dump_sessionid(__func__, sessionid);
2018 idx = hash_sessionid(sessionid);
2019 /* Search in the appropriate list */
2020 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2021 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2022 NFS4_MAX_SESSIONID_LEN)) {
2023 return elem;
2024 }
2025 }
2026
2027 dprintk("%s: session not found\n", __func__);
2028 return NULL;
2029 }
2030
2031 static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid * sessionid,struct net * net,__be32 * ret)2032 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2033 __be32 *ret)
2034 {
2035 struct nfsd4_session *session;
2036 __be32 status = nfserr_badsession;
2037
2038 session = __find_in_sessionid_hashtbl(sessionid, net);
2039 if (!session)
2040 goto out;
2041 status = nfsd4_get_session_locked(session);
2042 if (status)
2043 session = NULL;
2044 out:
2045 *ret = status;
2046 return session;
2047 }
2048
2049 /* caller must hold client_lock */
2050 static void
unhash_session(struct nfsd4_session * ses)2051 unhash_session(struct nfsd4_session *ses)
2052 {
2053 struct nfs4_client *clp = ses->se_client;
2054 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2055
2056 lockdep_assert_held(&nn->client_lock);
2057
2058 list_del(&ses->se_hash);
2059 spin_lock(&ses->se_client->cl_lock);
2060 list_del(&ses->se_perclnt);
2061 spin_unlock(&ses->se_client->cl_lock);
2062 }
2063
2064 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2065 static int
STALE_CLIENTID(clientid_t * clid,struct nfsd_net * nn)2066 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2067 {
2068 /*
2069 * We're assuming the clid was not given out from a boot
2070 * precisely 2^32 (about 136 years) before this one. That seems
2071 * a safe assumption:
2072 */
2073 if (clid->cl_boot == (u32)nn->boot_time)
2074 return 0;
2075 trace_nfsd_clid_stale(clid);
2076 return 1;
2077 }
2078
2079 /*
2080 * XXX Should we use a slab cache ?
2081 * This type of memory management is somewhat inefficient, but we use it
2082 * anyway since SETCLIENTID is not a common operation.
2083 */
alloc_client(struct xdr_netobj name,struct nfsd_net * nn)2084 static struct nfs4_client *alloc_client(struct xdr_netobj name,
2085 struct nfsd_net *nn)
2086 {
2087 struct nfs4_client *clp;
2088 int i;
2089
2090 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2091 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2092 return NULL;
2093 }
2094 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2095 if (clp == NULL)
2096 return NULL;
2097 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2098 if (clp->cl_name.data == NULL)
2099 goto err_no_name;
2100 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2101 sizeof(struct list_head),
2102 GFP_KERNEL);
2103 if (!clp->cl_ownerstr_hashtbl)
2104 goto err_no_hashtbl;
2105 for (i = 0; i < OWNER_HASH_SIZE; i++)
2106 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2107 INIT_LIST_HEAD(&clp->cl_sessions);
2108 idr_init(&clp->cl_stateids);
2109 atomic_set(&clp->cl_rpc_users, 0);
2110 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2111 clp->cl_state = NFSD4_ACTIVE;
2112 atomic_inc(&nn->nfs4_client_count);
2113 atomic_set(&clp->cl_delegs_in_recall, 0);
2114 INIT_LIST_HEAD(&clp->cl_idhash);
2115 INIT_LIST_HEAD(&clp->cl_openowners);
2116 INIT_LIST_HEAD(&clp->cl_delegations);
2117 INIT_LIST_HEAD(&clp->cl_lru);
2118 INIT_LIST_HEAD(&clp->cl_revoked);
2119 #ifdef CONFIG_NFSD_PNFS
2120 INIT_LIST_HEAD(&clp->cl_lo_states);
2121 #endif
2122 INIT_LIST_HEAD(&clp->async_copies);
2123 spin_lock_init(&clp->async_lock);
2124 spin_lock_init(&clp->cl_lock);
2125 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2126 return clp;
2127 err_no_hashtbl:
2128 kfree(clp->cl_name.data);
2129 err_no_name:
2130 kmem_cache_free(client_slab, clp);
2131 return NULL;
2132 }
2133
__free_client(struct kref * k)2134 static void __free_client(struct kref *k)
2135 {
2136 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2137 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2138
2139 free_svc_cred(&clp->cl_cred);
2140 kfree(clp->cl_ownerstr_hashtbl);
2141 kfree(clp->cl_name.data);
2142 kfree(clp->cl_nii_domain.data);
2143 kfree(clp->cl_nii_name.data);
2144 idr_destroy(&clp->cl_stateids);
2145 kfree(clp->cl_ra);
2146 kmem_cache_free(client_slab, clp);
2147 }
2148
drop_client(struct nfs4_client * clp)2149 static void drop_client(struct nfs4_client *clp)
2150 {
2151 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2152 }
2153
2154 static void
free_client(struct nfs4_client * clp)2155 free_client(struct nfs4_client *clp)
2156 {
2157 while (!list_empty(&clp->cl_sessions)) {
2158 struct nfsd4_session *ses;
2159 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2160 se_perclnt);
2161 list_del(&ses->se_perclnt);
2162 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2163 free_session(ses);
2164 }
2165 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2166 if (clp->cl_nfsd_dentry) {
2167 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2168 clp->cl_nfsd_dentry = NULL;
2169 wake_up_all(&expiry_wq);
2170 }
2171 drop_client(clp);
2172 }
2173
2174 /* must be called under the client_lock */
2175 static void
unhash_client_locked(struct nfs4_client * clp)2176 unhash_client_locked(struct nfs4_client *clp)
2177 {
2178 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2179 struct nfsd4_session *ses;
2180
2181 lockdep_assert_held(&nn->client_lock);
2182
2183 /* Mark the client as expired! */
2184 clp->cl_time = 0;
2185 /* Make it invisible */
2186 if (!list_empty(&clp->cl_idhash)) {
2187 list_del_init(&clp->cl_idhash);
2188 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2189 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2190 else
2191 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2192 }
2193 list_del_init(&clp->cl_lru);
2194 spin_lock(&clp->cl_lock);
2195 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2196 list_del_init(&ses->se_hash);
2197 spin_unlock(&clp->cl_lock);
2198 }
2199
2200 static void
unhash_client(struct nfs4_client * clp)2201 unhash_client(struct nfs4_client *clp)
2202 {
2203 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2204
2205 spin_lock(&nn->client_lock);
2206 unhash_client_locked(clp);
2207 spin_unlock(&nn->client_lock);
2208 }
2209
mark_client_expired_locked(struct nfs4_client * clp)2210 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2211 {
2212 if (atomic_read(&clp->cl_rpc_users))
2213 return nfserr_jukebox;
2214 unhash_client_locked(clp);
2215 return nfs_ok;
2216 }
2217
2218 static void
__destroy_client(struct nfs4_client * clp)2219 __destroy_client(struct nfs4_client *clp)
2220 {
2221 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2222 int i;
2223 struct nfs4_openowner *oo;
2224 struct nfs4_delegation *dp;
2225 struct list_head reaplist;
2226
2227 INIT_LIST_HEAD(&reaplist);
2228 spin_lock(&state_lock);
2229 while (!list_empty(&clp->cl_delegations)) {
2230 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2231 WARN_ON(!unhash_delegation_locked(dp));
2232 list_add(&dp->dl_recall_lru, &reaplist);
2233 }
2234 spin_unlock(&state_lock);
2235 while (!list_empty(&reaplist)) {
2236 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2237 list_del_init(&dp->dl_recall_lru);
2238 destroy_unhashed_deleg(dp);
2239 }
2240 while (!list_empty(&clp->cl_revoked)) {
2241 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2242 list_del_init(&dp->dl_recall_lru);
2243 nfs4_put_stid(&dp->dl_stid);
2244 }
2245 while (!list_empty(&clp->cl_openowners)) {
2246 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2247 nfs4_get_stateowner(&oo->oo_owner);
2248 release_openowner(oo);
2249 }
2250 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2251 struct nfs4_stateowner *so, *tmp;
2252
2253 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2254 so_strhash) {
2255 /* Should be no openowners at this point */
2256 WARN_ON_ONCE(so->so_is_open_owner);
2257 remove_blocked_locks(lockowner(so));
2258 }
2259 }
2260 nfsd4_return_all_client_layouts(clp);
2261 nfsd4_shutdown_copy(clp);
2262 nfsd4_shutdown_callback(clp);
2263 if (clp->cl_cb_conn.cb_xprt)
2264 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2265 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2266 nfsd4_dec_courtesy_client_count(nn, clp);
2267 free_client(clp);
2268 wake_up_all(&expiry_wq);
2269 }
2270
2271 static void
destroy_client(struct nfs4_client * clp)2272 destroy_client(struct nfs4_client *clp)
2273 {
2274 unhash_client(clp);
2275 __destroy_client(clp);
2276 }
2277
inc_reclaim_complete(struct nfs4_client * clp)2278 static void inc_reclaim_complete(struct nfs4_client *clp)
2279 {
2280 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2281
2282 if (!nn->track_reclaim_completes)
2283 return;
2284 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2285 return;
2286 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2287 nn->reclaim_str_hashtbl_size) {
2288 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2289 clp->net->ns.inum);
2290 nfsd4_end_grace(nn);
2291 }
2292 }
2293
expire_client(struct nfs4_client * clp)2294 static void expire_client(struct nfs4_client *clp)
2295 {
2296 unhash_client(clp);
2297 nfsd4_client_record_remove(clp);
2298 __destroy_client(clp);
2299 }
2300
copy_verf(struct nfs4_client * target,nfs4_verifier * source)2301 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2302 {
2303 memcpy(target->cl_verifier.data, source->data,
2304 sizeof(target->cl_verifier.data));
2305 }
2306
copy_clid(struct nfs4_client * target,struct nfs4_client * source)2307 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2308 {
2309 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2310 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2311 }
2312
copy_cred(struct svc_cred * target,struct svc_cred * source)2313 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2314 {
2315 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2316 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2317 GFP_KERNEL);
2318 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2319 if ((source->cr_principal && !target->cr_principal) ||
2320 (source->cr_raw_principal && !target->cr_raw_principal) ||
2321 (source->cr_targ_princ && !target->cr_targ_princ))
2322 return -ENOMEM;
2323
2324 target->cr_flavor = source->cr_flavor;
2325 target->cr_uid = source->cr_uid;
2326 target->cr_gid = source->cr_gid;
2327 target->cr_group_info = source->cr_group_info;
2328 get_group_info(target->cr_group_info);
2329 target->cr_gss_mech = source->cr_gss_mech;
2330 if (source->cr_gss_mech)
2331 gss_mech_get(source->cr_gss_mech);
2332 return 0;
2333 }
2334
2335 static int
compare_blob(const struct xdr_netobj * o1,const struct xdr_netobj * o2)2336 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2337 {
2338 if (o1->len < o2->len)
2339 return -1;
2340 if (o1->len > o2->len)
2341 return 1;
2342 return memcmp(o1->data, o2->data, o1->len);
2343 }
2344
2345 static int
same_verf(nfs4_verifier * v1,nfs4_verifier * v2)2346 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2347 {
2348 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2349 }
2350
2351 static int
same_clid(clientid_t * cl1,clientid_t * cl2)2352 same_clid(clientid_t *cl1, clientid_t *cl2)
2353 {
2354 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2355 }
2356
groups_equal(struct group_info * g1,struct group_info * g2)2357 static bool groups_equal(struct group_info *g1, struct group_info *g2)
2358 {
2359 int i;
2360
2361 if (g1->ngroups != g2->ngroups)
2362 return false;
2363 for (i=0; i<g1->ngroups; i++)
2364 if (!gid_eq(g1->gid[i], g2->gid[i]))
2365 return false;
2366 return true;
2367 }
2368
2369 /*
2370 * RFC 3530 language requires clid_inuse be returned when the
2371 * "principal" associated with a requests differs from that previously
2372 * used. We use uid, gid's, and gss principal string as our best
2373 * approximation. We also don't want to allow non-gss use of a client
2374 * established using gss: in theory cr_principal should catch that
2375 * change, but in practice cr_principal can be null even in the gss case
2376 * since gssd doesn't always pass down a principal string.
2377 */
is_gss_cred(struct svc_cred * cr)2378 static bool is_gss_cred(struct svc_cred *cr)
2379 {
2380 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2381 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2382 }
2383
2384
2385 static bool
same_creds(struct svc_cred * cr1,struct svc_cred * cr2)2386 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2387 {
2388 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2389 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2390 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2391 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2392 return false;
2393 /* XXX: check that cr_targ_princ fields match ? */
2394 if (cr1->cr_principal == cr2->cr_principal)
2395 return true;
2396 if (!cr1->cr_principal || !cr2->cr_principal)
2397 return false;
2398 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2399 }
2400
svc_rqst_integrity_protected(struct svc_rqst * rqstp)2401 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2402 {
2403 struct svc_cred *cr = &rqstp->rq_cred;
2404 u32 service;
2405
2406 if (!cr->cr_gss_mech)
2407 return false;
2408 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2409 return service == RPC_GSS_SVC_INTEGRITY ||
2410 service == RPC_GSS_SVC_PRIVACY;
2411 }
2412
nfsd4_mach_creds_match(struct nfs4_client * cl,struct svc_rqst * rqstp)2413 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2414 {
2415 struct svc_cred *cr = &rqstp->rq_cred;
2416
2417 if (!cl->cl_mach_cred)
2418 return true;
2419 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2420 return false;
2421 if (!svc_rqst_integrity_protected(rqstp))
2422 return false;
2423 if (cl->cl_cred.cr_raw_principal)
2424 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2425 cr->cr_raw_principal);
2426 if (!cr->cr_principal)
2427 return false;
2428 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2429 }
2430
gen_confirm(struct nfs4_client * clp,struct nfsd_net * nn)2431 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2432 {
2433 __be32 verf[2];
2434
2435 /*
2436 * This is opaque to client, so no need to byte-swap. Use
2437 * __force to keep sparse happy
2438 */
2439 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2440 verf[1] = (__force __be32)nn->clverifier_counter++;
2441 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2442 }
2443
gen_clid(struct nfs4_client * clp,struct nfsd_net * nn)2444 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2445 {
2446 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2447 clp->cl_clientid.cl_id = nn->clientid_counter++;
2448 gen_confirm(clp, nn);
2449 }
2450
2451 static struct nfs4_stid *
find_stateid_locked(struct nfs4_client * cl,stateid_t * t)2452 find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2453 {
2454 struct nfs4_stid *ret;
2455
2456 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2457 if (!ret || !ret->sc_type)
2458 return NULL;
2459 return ret;
2460 }
2461
2462 static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client * cl,stateid_t * t,char typemask)2463 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2464 {
2465 struct nfs4_stid *s;
2466
2467 spin_lock(&cl->cl_lock);
2468 s = find_stateid_locked(cl, t);
2469 if (s != NULL) {
2470 if (typemask & s->sc_type)
2471 refcount_inc(&s->sc_count);
2472 else
2473 s = NULL;
2474 }
2475 spin_unlock(&cl->cl_lock);
2476 return s;
2477 }
2478
get_nfsdfs_clp(struct inode * inode)2479 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2480 {
2481 struct nfsdfs_client *nc;
2482 nc = get_nfsdfs_client(inode);
2483 if (!nc)
2484 return NULL;
2485 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2486 }
2487
seq_quote_mem(struct seq_file * m,char * data,int len)2488 static void seq_quote_mem(struct seq_file *m, char *data, int len)
2489 {
2490 seq_printf(m, "\"");
2491 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2492 seq_printf(m, "\"");
2493 }
2494
cb_state2str(int state)2495 static const char *cb_state2str(int state)
2496 {
2497 switch (state) {
2498 case NFSD4_CB_UP:
2499 return "UP";
2500 case NFSD4_CB_UNKNOWN:
2501 return "UNKNOWN";
2502 case NFSD4_CB_DOWN:
2503 return "DOWN";
2504 case NFSD4_CB_FAULT:
2505 return "FAULT";
2506 }
2507 return "UNDEFINED";
2508 }
2509
client_info_show(struct seq_file * m,void * v)2510 static int client_info_show(struct seq_file *m, void *v)
2511 {
2512 struct inode *inode = file_inode(m->file);
2513 struct nfs4_client *clp;
2514 u64 clid;
2515
2516 clp = get_nfsdfs_clp(inode);
2517 if (!clp)
2518 return -ENXIO;
2519 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2520 seq_printf(m, "clientid: 0x%llx\n", clid);
2521 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2522
2523 if (clp->cl_state == NFSD4_COURTESY)
2524 seq_puts(m, "status: courtesy\n");
2525 else if (clp->cl_state == NFSD4_EXPIRABLE)
2526 seq_puts(m, "status: expirable\n");
2527 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2528 seq_puts(m, "status: confirmed\n");
2529 else
2530 seq_puts(m, "status: unconfirmed\n");
2531 seq_printf(m, "seconds from last renew: %lld\n",
2532 ktime_get_boottime_seconds() - clp->cl_time);
2533 seq_printf(m, "name: ");
2534 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2535 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2536 if (clp->cl_nii_domain.data) {
2537 seq_printf(m, "Implementation domain: ");
2538 seq_quote_mem(m, clp->cl_nii_domain.data,
2539 clp->cl_nii_domain.len);
2540 seq_printf(m, "\nImplementation name: ");
2541 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2542 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2543 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2544 }
2545 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2546 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2547 drop_client(clp);
2548
2549 return 0;
2550 }
2551
2552 DEFINE_SHOW_ATTRIBUTE(client_info);
2553
states_start(struct seq_file * s,loff_t * pos)2554 static void *states_start(struct seq_file *s, loff_t *pos)
2555 __acquires(&clp->cl_lock)
2556 {
2557 struct nfs4_client *clp = s->private;
2558 unsigned long id = *pos;
2559 void *ret;
2560
2561 spin_lock(&clp->cl_lock);
2562 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2563 *pos = id;
2564 return ret;
2565 }
2566
states_next(struct seq_file * s,void * v,loff_t * pos)2567 static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2568 {
2569 struct nfs4_client *clp = s->private;
2570 unsigned long id = *pos;
2571 void *ret;
2572
2573 id = *pos;
2574 id++;
2575 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2576 *pos = id;
2577 return ret;
2578 }
2579
states_stop(struct seq_file * s,void * v)2580 static void states_stop(struct seq_file *s, void *v)
2581 __releases(&clp->cl_lock)
2582 {
2583 struct nfs4_client *clp = s->private;
2584
2585 spin_unlock(&clp->cl_lock);
2586 }
2587
nfs4_show_fname(struct seq_file * s,struct nfsd_file * f)2588 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2589 {
2590 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2591 }
2592
nfs4_show_superblock(struct seq_file * s,struct nfsd_file * f)2593 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2594 {
2595 struct inode *inode = file_inode(f->nf_file);
2596
2597 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2598 MAJOR(inode->i_sb->s_dev),
2599 MINOR(inode->i_sb->s_dev),
2600 inode->i_ino);
2601 }
2602
nfs4_show_owner(struct seq_file * s,struct nfs4_stateowner * oo)2603 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2604 {
2605 seq_printf(s, "owner: ");
2606 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2607 }
2608
nfs4_show_stateid(struct seq_file * s,stateid_t * stid)2609 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2610 {
2611 seq_printf(s, "0x%.8x", stid->si_generation);
2612 seq_printf(s, "%12phN", &stid->si_opaque);
2613 }
2614
nfs4_show_open(struct seq_file * s,struct nfs4_stid * st)2615 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2616 {
2617 struct nfs4_ol_stateid *ols;
2618 struct nfs4_file *nf;
2619 struct nfsd_file *file;
2620 struct nfs4_stateowner *oo;
2621 unsigned int access, deny;
2622
2623 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2624 return 0; /* XXX: or SEQ_SKIP? */
2625 ols = openlockstateid(st);
2626 oo = ols->st_stateowner;
2627 nf = st->sc_file;
2628
2629 spin_lock(&nf->fi_lock);
2630 file = find_any_file_locked(nf);
2631 if (!file)
2632 goto out;
2633
2634 seq_printf(s, "- ");
2635 nfs4_show_stateid(s, &st->sc_stateid);
2636 seq_printf(s, ": { type: open, ");
2637
2638 access = bmap_to_share_mode(ols->st_access_bmap);
2639 deny = bmap_to_share_mode(ols->st_deny_bmap);
2640
2641 seq_printf(s, "access: %s%s, ",
2642 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2643 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2644 seq_printf(s, "deny: %s%s, ",
2645 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2646 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2647
2648 nfs4_show_superblock(s, file);
2649 seq_printf(s, ", ");
2650 nfs4_show_fname(s, file);
2651 seq_printf(s, ", ");
2652 nfs4_show_owner(s, oo);
2653 seq_printf(s, " }\n");
2654 out:
2655 spin_unlock(&nf->fi_lock);
2656 return 0;
2657 }
2658
nfs4_show_lock(struct seq_file * s,struct nfs4_stid * st)2659 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2660 {
2661 struct nfs4_ol_stateid *ols;
2662 struct nfs4_file *nf;
2663 struct nfsd_file *file;
2664 struct nfs4_stateowner *oo;
2665
2666 ols = openlockstateid(st);
2667 oo = ols->st_stateowner;
2668 nf = st->sc_file;
2669 spin_lock(&nf->fi_lock);
2670 file = find_any_file_locked(nf);
2671 if (!file)
2672 goto out;
2673
2674 seq_printf(s, "- ");
2675 nfs4_show_stateid(s, &st->sc_stateid);
2676 seq_printf(s, ": { type: lock, ");
2677
2678 /*
2679 * Note: a lock stateid isn't really the same thing as a lock,
2680 * it's the locking state held by one owner on a file, and there
2681 * may be multiple (or no) lock ranges associated with it.
2682 * (Same for the matter is true of open stateids.)
2683 */
2684
2685 nfs4_show_superblock(s, file);
2686 /* XXX: open stateid? */
2687 seq_printf(s, ", ");
2688 nfs4_show_fname(s, file);
2689 seq_printf(s, ", ");
2690 nfs4_show_owner(s, oo);
2691 seq_printf(s, " }\n");
2692 out:
2693 spin_unlock(&nf->fi_lock);
2694 return 0;
2695 }
2696
nfs4_show_deleg(struct seq_file * s,struct nfs4_stid * st)2697 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2698 {
2699 struct nfs4_delegation *ds;
2700 struct nfs4_file *nf;
2701 struct nfsd_file *file;
2702
2703 ds = delegstateid(st);
2704 nf = st->sc_file;
2705 spin_lock(&nf->fi_lock);
2706 file = nf->fi_deleg_file;
2707 if (!file)
2708 goto out;
2709
2710 seq_printf(s, "- ");
2711 nfs4_show_stateid(s, &st->sc_stateid);
2712 seq_printf(s, ": { type: deleg, ");
2713
2714 /* Kinda dead code as long as we only support read delegs: */
2715 seq_printf(s, "access: %s, ",
2716 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2717
2718 /* XXX: lease time, whether it's being recalled. */
2719
2720 nfs4_show_superblock(s, file);
2721 seq_printf(s, ", ");
2722 nfs4_show_fname(s, file);
2723 seq_printf(s, " }\n");
2724 out:
2725 spin_unlock(&nf->fi_lock);
2726 return 0;
2727 }
2728
nfs4_show_layout(struct seq_file * s,struct nfs4_stid * st)2729 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2730 {
2731 struct nfs4_layout_stateid *ls;
2732 struct nfsd_file *file;
2733
2734 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2735 file = ls->ls_file;
2736
2737 seq_printf(s, "- ");
2738 nfs4_show_stateid(s, &st->sc_stateid);
2739 seq_printf(s, ": { type: layout, ");
2740
2741 /* XXX: What else would be useful? */
2742
2743 nfs4_show_superblock(s, file);
2744 seq_printf(s, ", ");
2745 nfs4_show_fname(s, file);
2746 seq_printf(s, " }\n");
2747
2748 return 0;
2749 }
2750
states_show(struct seq_file * s,void * v)2751 static int states_show(struct seq_file *s, void *v)
2752 {
2753 struct nfs4_stid *st = v;
2754
2755 switch (st->sc_type) {
2756 case NFS4_OPEN_STID:
2757 return nfs4_show_open(s, st);
2758 case NFS4_LOCK_STID:
2759 return nfs4_show_lock(s, st);
2760 case NFS4_DELEG_STID:
2761 return nfs4_show_deleg(s, st);
2762 case NFS4_LAYOUT_STID:
2763 return nfs4_show_layout(s, st);
2764 default:
2765 return 0; /* XXX: or SEQ_SKIP? */
2766 }
2767 /* XXX: copy stateids? */
2768 }
2769
2770 static struct seq_operations states_seq_ops = {
2771 .start = states_start,
2772 .next = states_next,
2773 .stop = states_stop,
2774 .show = states_show
2775 };
2776
client_states_open(struct inode * inode,struct file * file)2777 static int client_states_open(struct inode *inode, struct file *file)
2778 {
2779 struct seq_file *s;
2780 struct nfs4_client *clp;
2781 int ret;
2782
2783 clp = get_nfsdfs_clp(inode);
2784 if (!clp)
2785 return -ENXIO;
2786
2787 ret = seq_open(file, &states_seq_ops);
2788 if (ret)
2789 return ret;
2790 s = file->private_data;
2791 s->private = clp;
2792 return 0;
2793 }
2794
client_opens_release(struct inode * inode,struct file * file)2795 static int client_opens_release(struct inode *inode, struct file *file)
2796 {
2797 struct seq_file *m = file->private_data;
2798 struct nfs4_client *clp = m->private;
2799
2800 /* XXX: alternatively, we could get/drop in seq start/stop */
2801 drop_client(clp);
2802 return seq_release(inode, file);
2803 }
2804
2805 static const struct file_operations client_states_fops = {
2806 .open = client_states_open,
2807 .read = seq_read,
2808 .llseek = seq_lseek,
2809 .release = client_opens_release,
2810 };
2811
2812 /*
2813 * Normally we refuse to destroy clients that are in use, but here the
2814 * administrator is telling us to just do it. We also want to wait
2815 * so the caller has a guarantee that the client's locks are gone by
2816 * the time the write returns:
2817 */
force_expire_client(struct nfs4_client * clp)2818 static void force_expire_client(struct nfs4_client *clp)
2819 {
2820 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2821 bool already_expired;
2822
2823 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2824
2825 spin_lock(&nn->client_lock);
2826 clp->cl_time = 0;
2827 spin_unlock(&nn->client_lock);
2828
2829 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2830 spin_lock(&nn->client_lock);
2831 already_expired = list_empty(&clp->cl_lru);
2832 if (!already_expired)
2833 unhash_client_locked(clp);
2834 spin_unlock(&nn->client_lock);
2835
2836 if (!already_expired)
2837 expire_client(clp);
2838 else
2839 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2840 }
2841
client_ctl_write(struct file * file,const char __user * buf,size_t size,loff_t * pos)2842 static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2843 size_t size, loff_t *pos)
2844 {
2845 char *data;
2846 struct nfs4_client *clp;
2847
2848 data = simple_transaction_get(file, buf, size);
2849 if (IS_ERR(data))
2850 return PTR_ERR(data);
2851 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2852 return -EINVAL;
2853 clp = get_nfsdfs_clp(file_inode(file));
2854 if (!clp)
2855 return -ENXIO;
2856 force_expire_client(clp);
2857 drop_client(clp);
2858 return 7;
2859 }
2860
2861 static const struct file_operations client_ctl_fops = {
2862 .write = client_ctl_write,
2863 .release = simple_transaction_release,
2864 };
2865
2866 static const struct tree_descr client_files[] = {
2867 [0] = {"info", &client_info_fops, S_IRUSR},
2868 [1] = {"states", &client_states_fops, S_IRUSR},
2869 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2870 [3] = {""},
2871 };
2872
2873 static int
nfsd4_cb_recall_any_done(struct nfsd4_callback * cb,struct rpc_task * task)2874 nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
2875 struct rpc_task *task)
2876 {
2877 trace_nfsd_cb_recall_any_done(cb, task);
2878 switch (task->tk_status) {
2879 case -NFS4ERR_DELAY:
2880 rpc_delay(task, 2 * HZ);
2881 return 0;
2882 default:
2883 return 1;
2884 }
2885 }
2886
2887 static void
nfsd4_cb_recall_any_release(struct nfsd4_callback * cb)2888 nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
2889 {
2890 struct nfs4_client *clp = cb->cb_clp;
2891 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2892
2893 spin_lock(&nn->client_lock);
2894 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
2895 put_client_renew_locked(clp);
2896 spin_unlock(&nn->client_lock);
2897 }
2898
2899 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
2900 .done = nfsd4_cb_recall_any_done,
2901 .release = nfsd4_cb_recall_any_release,
2902 };
2903
create_client(struct xdr_netobj name,struct svc_rqst * rqstp,nfs4_verifier * verf)2904 static struct nfs4_client *create_client(struct xdr_netobj name,
2905 struct svc_rqst *rqstp, nfs4_verifier *verf)
2906 {
2907 struct nfs4_client *clp;
2908 struct sockaddr *sa = svc_addr(rqstp);
2909 int ret;
2910 struct net *net = SVC_NET(rqstp);
2911 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2912 struct dentry *dentries[ARRAY_SIZE(client_files)];
2913
2914 clp = alloc_client(name, nn);
2915 if (clp == NULL)
2916 return NULL;
2917
2918 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2919 if (ret) {
2920 free_client(clp);
2921 return NULL;
2922 }
2923 gen_clid(clp, nn);
2924 kref_init(&clp->cl_nfsdfs.cl_ref);
2925 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2926 clp->cl_time = ktime_get_boottime_seconds();
2927 clear_bit(0, &clp->cl_cb_slot_busy);
2928 copy_verf(clp, verf);
2929 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2930 clp->cl_cb_session = NULL;
2931 clp->net = net;
2932 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2933 nn, &clp->cl_nfsdfs,
2934 clp->cl_clientid.cl_id - nn->clientid_base,
2935 client_files, dentries);
2936 clp->cl_nfsd_info_dentry = dentries[0];
2937 if (!clp->cl_nfsd_dentry) {
2938 free_client(clp);
2939 return NULL;
2940 }
2941 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
2942 if (!clp->cl_ra) {
2943 free_client(clp);
2944 return NULL;
2945 }
2946 clp->cl_ra_time = 0;
2947 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
2948 NFSPROC4_CLNT_CB_RECALL_ANY);
2949 return clp;
2950 }
2951
2952 static void
add_clp_to_name_tree(struct nfs4_client * new_clp,struct rb_root * root)2953 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2954 {
2955 struct rb_node **new = &(root->rb_node), *parent = NULL;
2956 struct nfs4_client *clp;
2957
2958 while (*new) {
2959 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2960 parent = *new;
2961
2962 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2963 new = &((*new)->rb_left);
2964 else
2965 new = &((*new)->rb_right);
2966 }
2967
2968 rb_link_node(&new_clp->cl_namenode, parent, new);
2969 rb_insert_color(&new_clp->cl_namenode, root);
2970 }
2971
2972 static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj * name,struct rb_root * root)2973 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2974 {
2975 int cmp;
2976 struct rb_node *node = root->rb_node;
2977 struct nfs4_client *clp;
2978
2979 while (node) {
2980 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2981 cmp = compare_blob(&clp->cl_name, name);
2982 if (cmp > 0)
2983 node = node->rb_left;
2984 else if (cmp < 0)
2985 node = node->rb_right;
2986 else
2987 return clp;
2988 }
2989 return NULL;
2990 }
2991
2992 static void
add_to_unconfirmed(struct nfs4_client * clp)2993 add_to_unconfirmed(struct nfs4_client *clp)
2994 {
2995 unsigned int idhashval;
2996 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2997
2998 lockdep_assert_held(&nn->client_lock);
2999
3000 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3001 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3002 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3003 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3004 renew_client_locked(clp);
3005 }
3006
3007 static void
move_to_confirmed(struct nfs4_client * clp)3008 move_to_confirmed(struct nfs4_client *clp)
3009 {
3010 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3011 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3012
3013 lockdep_assert_held(&nn->client_lock);
3014
3015 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3016 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3017 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3018 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3019 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3020 renew_client_locked(clp);
3021 }
3022
3023 static struct nfs4_client *
find_client_in_id_table(struct list_head * tbl,clientid_t * clid,bool sessions)3024 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3025 {
3026 struct nfs4_client *clp;
3027 unsigned int idhashval = clientid_hashval(clid->cl_id);
3028
3029 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3030 if (same_clid(&clp->cl_clientid, clid)) {
3031 if ((bool)clp->cl_minorversion != sessions)
3032 return NULL;
3033 renew_client_locked(clp);
3034 return clp;
3035 }
3036 }
3037 return NULL;
3038 }
3039
3040 static struct nfs4_client *
find_confirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3041 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3042 {
3043 struct list_head *tbl = nn->conf_id_hashtbl;
3044
3045 lockdep_assert_held(&nn->client_lock);
3046 return find_client_in_id_table(tbl, clid, sessions);
3047 }
3048
3049 static struct nfs4_client *
find_unconfirmed_client(clientid_t * clid,bool sessions,struct nfsd_net * nn)3050 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3051 {
3052 struct list_head *tbl = nn->unconf_id_hashtbl;
3053
3054 lockdep_assert_held(&nn->client_lock);
3055 return find_client_in_id_table(tbl, clid, sessions);
3056 }
3057
clp_used_exchangeid(struct nfs4_client * clp)3058 static bool clp_used_exchangeid(struct nfs4_client *clp)
3059 {
3060 return clp->cl_exchange_flags != 0;
3061 }
3062
3063 static struct nfs4_client *
find_confirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3064 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3065 {
3066 lockdep_assert_held(&nn->client_lock);
3067 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3068 }
3069
3070 static struct nfs4_client *
find_unconfirmed_client_by_name(struct xdr_netobj * name,struct nfsd_net * nn)3071 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3072 {
3073 lockdep_assert_held(&nn->client_lock);
3074 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3075 }
3076
3077 static void
gen_callback(struct nfs4_client * clp,struct nfsd4_setclientid * se,struct svc_rqst * rqstp)3078 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3079 {
3080 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3081 struct sockaddr *sa = svc_addr(rqstp);
3082 u32 scopeid = rpc_get_scope_id(sa);
3083 unsigned short expected_family;
3084
3085 /* Currently, we only support tcp and tcp6 for the callback channel */
3086 if (se->se_callback_netid_len == 3 &&
3087 !memcmp(se->se_callback_netid_val, "tcp", 3))
3088 expected_family = AF_INET;
3089 else if (se->se_callback_netid_len == 4 &&
3090 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3091 expected_family = AF_INET6;
3092 else
3093 goto out_err;
3094
3095 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3096 se->se_callback_addr_len,
3097 (struct sockaddr *)&conn->cb_addr,
3098 sizeof(conn->cb_addr));
3099
3100 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3101 goto out_err;
3102
3103 if (conn->cb_addr.ss_family == AF_INET6)
3104 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3105
3106 conn->cb_prog = se->se_callback_prog;
3107 conn->cb_ident = se->se_callback_ident;
3108 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3109 trace_nfsd_cb_args(clp, conn);
3110 return;
3111 out_err:
3112 conn->cb_addr.ss_family = AF_UNSPEC;
3113 conn->cb_addrlen = 0;
3114 trace_nfsd_cb_nodelegs(clp);
3115 return;
3116 }
3117
3118 /*
3119 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3120 */
3121 static void
nfsd4_store_cache_entry(struct nfsd4_compoundres * resp)3122 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3123 {
3124 struct xdr_buf *buf = resp->xdr->buf;
3125 struct nfsd4_slot *slot = resp->cstate.slot;
3126 unsigned int base;
3127
3128 dprintk("--> %s slot %p\n", __func__, slot);
3129
3130 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3131 slot->sl_opcnt = resp->opcnt;
3132 slot->sl_status = resp->cstate.status;
3133 free_svc_cred(&slot->sl_cred);
3134 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3135
3136 if (!nfsd4_cache_this(resp)) {
3137 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3138 return;
3139 }
3140 slot->sl_flags |= NFSD4_SLOT_CACHED;
3141
3142 base = resp->cstate.data_offset;
3143 slot->sl_datalen = buf->len - base;
3144 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3145 WARN(1, "%s: sessions DRC could not cache compound\n",
3146 __func__);
3147 return;
3148 }
3149
3150 /*
3151 * Encode the replay sequence operation from the slot values.
3152 * If cachethis is FALSE encode the uncached rep error on the next
3153 * operation which sets resp->p and increments resp->opcnt for
3154 * nfs4svc_encode_compoundres.
3155 *
3156 */
3157 static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs * args,struct nfsd4_compoundres * resp)3158 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3159 struct nfsd4_compoundres *resp)
3160 {
3161 struct nfsd4_op *op;
3162 struct nfsd4_slot *slot = resp->cstate.slot;
3163
3164 /* Encode the replayed sequence operation */
3165 op = &args->ops[resp->opcnt - 1];
3166 nfsd4_encode_operation(resp, op);
3167
3168 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3169 return op->status;
3170 if (args->opcnt == 1) {
3171 /*
3172 * The original operation wasn't a solo sequence--we
3173 * always cache those--so this retry must not match the
3174 * original:
3175 */
3176 op->status = nfserr_seq_false_retry;
3177 } else {
3178 op = &args->ops[resp->opcnt++];
3179 op->status = nfserr_retry_uncached_rep;
3180 nfsd4_encode_operation(resp, op);
3181 }
3182 return op->status;
3183 }
3184
3185 /*
3186 * The sequence operation is not cached because we can use the slot and
3187 * session values.
3188 */
3189 static __be32
nfsd4_replay_cache_entry(struct nfsd4_compoundres * resp,struct nfsd4_sequence * seq)3190 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3191 struct nfsd4_sequence *seq)
3192 {
3193 struct nfsd4_slot *slot = resp->cstate.slot;
3194 struct xdr_stream *xdr = resp->xdr;
3195 __be32 *p;
3196 __be32 status;
3197
3198 dprintk("--> %s slot %p\n", __func__, slot);
3199
3200 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3201 if (status)
3202 return status;
3203
3204 p = xdr_reserve_space(xdr, slot->sl_datalen);
3205 if (!p) {
3206 WARN_ON_ONCE(1);
3207 return nfserr_serverfault;
3208 }
3209 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3210 xdr_commit_encode(xdr);
3211
3212 resp->opcnt = slot->sl_opcnt;
3213 return slot->sl_status;
3214 }
3215
3216 /*
3217 * Set the exchange_id flags returned by the server.
3218 */
3219 static void
nfsd4_set_ex_flags(struct nfs4_client * new,struct nfsd4_exchange_id * clid)3220 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3221 {
3222 #ifdef CONFIG_NFSD_PNFS
3223 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3224 #else
3225 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3226 #endif
3227
3228 /* Referrals are supported, Migration is not. */
3229 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3230
3231 /* set the wire flags to return to client. */
3232 clid->flags = new->cl_exchange_flags;
3233 }
3234
client_has_openowners(struct nfs4_client * clp)3235 static bool client_has_openowners(struct nfs4_client *clp)
3236 {
3237 struct nfs4_openowner *oo;
3238
3239 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3240 if (!list_empty(&oo->oo_owner.so_stateids))
3241 return true;
3242 }
3243 return false;
3244 }
3245
client_has_state(struct nfs4_client * clp)3246 static bool client_has_state(struct nfs4_client *clp)
3247 {
3248 return client_has_openowners(clp)
3249 #ifdef CONFIG_NFSD_PNFS
3250 || !list_empty(&clp->cl_lo_states)
3251 #endif
3252 || !list_empty(&clp->cl_delegations)
3253 || !list_empty(&clp->cl_sessions)
3254 || !list_empty(&clp->async_copies);
3255 }
3256
copy_impl_id(struct nfs4_client * clp,struct nfsd4_exchange_id * exid)3257 static __be32 copy_impl_id(struct nfs4_client *clp,
3258 struct nfsd4_exchange_id *exid)
3259 {
3260 if (!exid->nii_domain.data)
3261 return 0;
3262 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3263 if (!clp->cl_nii_domain.data)
3264 return nfserr_jukebox;
3265 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3266 if (!clp->cl_nii_name.data)
3267 return nfserr_jukebox;
3268 clp->cl_nii_time = exid->nii_time;
3269 return 0;
3270 }
3271
3272 __be32
nfsd4_exchange_id(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3273 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3274 union nfsd4_op_u *u)
3275 {
3276 struct nfsd4_exchange_id *exid = &u->exchange_id;
3277 struct nfs4_client *conf, *new;
3278 struct nfs4_client *unconf = NULL;
3279 __be32 status;
3280 char addr_str[INET6_ADDRSTRLEN];
3281 nfs4_verifier verf = exid->verifier;
3282 struct sockaddr *sa = svc_addr(rqstp);
3283 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3284 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3285
3286 rpc_ntop(sa, addr_str, sizeof(addr_str));
3287 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3288 "ip_addr=%s flags %x, spa_how %u\n",
3289 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3290 addr_str, exid->flags, exid->spa_how);
3291
3292 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3293 return nfserr_inval;
3294
3295 new = create_client(exid->clname, rqstp, &verf);
3296 if (new == NULL)
3297 return nfserr_jukebox;
3298 status = copy_impl_id(new, exid);
3299 if (status)
3300 goto out_nolock;
3301
3302 switch (exid->spa_how) {
3303 case SP4_MACH_CRED:
3304 exid->spo_must_enforce[0] = 0;
3305 exid->spo_must_enforce[1] = (
3306 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3307 1 << (OP_EXCHANGE_ID - 32) |
3308 1 << (OP_CREATE_SESSION - 32) |
3309 1 << (OP_DESTROY_SESSION - 32) |
3310 1 << (OP_DESTROY_CLIENTID - 32));
3311
3312 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3313 1 << (OP_OPEN_DOWNGRADE) |
3314 1 << (OP_LOCKU) |
3315 1 << (OP_DELEGRETURN));
3316
3317 exid->spo_must_allow[1] &= (
3318 1 << (OP_TEST_STATEID - 32) |
3319 1 << (OP_FREE_STATEID - 32));
3320 if (!svc_rqst_integrity_protected(rqstp)) {
3321 status = nfserr_inval;
3322 goto out_nolock;
3323 }
3324 /*
3325 * Sometimes userspace doesn't give us a principal.
3326 * Which is a bug, really. Anyway, we can't enforce
3327 * MACH_CRED in that case, better to give up now:
3328 */
3329 if (!new->cl_cred.cr_principal &&
3330 !new->cl_cred.cr_raw_principal) {
3331 status = nfserr_serverfault;
3332 goto out_nolock;
3333 }
3334 new->cl_mach_cred = true;
3335 break;
3336 case SP4_NONE:
3337 break;
3338 default: /* checked by xdr code */
3339 WARN_ON_ONCE(1);
3340 fallthrough;
3341 case SP4_SSV:
3342 status = nfserr_encr_alg_unsupp;
3343 goto out_nolock;
3344 }
3345
3346 /* Cases below refer to rfc 5661 section 18.35.4: */
3347 spin_lock(&nn->client_lock);
3348 conf = find_confirmed_client_by_name(&exid->clname, nn);
3349 if (conf) {
3350 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3351 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3352
3353 if (update) {
3354 if (!clp_used_exchangeid(conf)) { /* buggy client */
3355 status = nfserr_inval;
3356 goto out;
3357 }
3358 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3359 status = nfserr_wrong_cred;
3360 goto out;
3361 }
3362 if (!creds_match) { /* case 9 */
3363 status = nfserr_perm;
3364 goto out;
3365 }
3366 if (!verfs_match) { /* case 8 */
3367 status = nfserr_not_same;
3368 goto out;
3369 }
3370 /* case 6 */
3371 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3372 trace_nfsd_clid_confirmed_r(conf);
3373 goto out_copy;
3374 }
3375 if (!creds_match) { /* case 3 */
3376 if (client_has_state(conf)) {
3377 status = nfserr_clid_inuse;
3378 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3379 goto out;
3380 }
3381 goto out_new;
3382 }
3383 if (verfs_match) { /* case 2 */
3384 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3385 trace_nfsd_clid_confirmed_r(conf);
3386 goto out_copy;
3387 }
3388 /* case 5, client reboot */
3389 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3390 conf = NULL;
3391 goto out_new;
3392 }
3393
3394 if (update) { /* case 7 */
3395 status = nfserr_noent;
3396 goto out;
3397 }
3398
3399 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3400 if (unconf) /* case 4, possible retry or client restart */
3401 unhash_client_locked(unconf);
3402
3403 /* case 1, new owner ID */
3404 trace_nfsd_clid_fresh(new);
3405
3406 out_new:
3407 if (conf) {
3408 status = mark_client_expired_locked(conf);
3409 if (status)
3410 goto out;
3411 trace_nfsd_clid_replaced(&conf->cl_clientid);
3412 }
3413 new->cl_minorversion = cstate->minorversion;
3414 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3415 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3416
3417 add_to_unconfirmed(new);
3418 swap(new, conf);
3419 out_copy:
3420 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3421 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3422
3423 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3424 nfsd4_set_ex_flags(conf, exid);
3425
3426 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3427 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3428 status = nfs_ok;
3429
3430 out:
3431 spin_unlock(&nn->client_lock);
3432 out_nolock:
3433 if (new)
3434 expire_client(new);
3435 if (unconf) {
3436 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3437 expire_client(unconf);
3438 }
3439 return status;
3440 }
3441
3442 static __be32
check_slot_seqid(u32 seqid,u32 slot_seqid,int slot_inuse)3443 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3444 {
3445 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3446 slot_seqid);
3447
3448 /* The slot is in use, and no response has been sent. */
3449 if (slot_inuse) {
3450 if (seqid == slot_seqid)
3451 return nfserr_jukebox;
3452 else
3453 return nfserr_seq_misordered;
3454 }
3455 /* Note unsigned 32-bit arithmetic handles wraparound: */
3456 if (likely(seqid == slot_seqid + 1))
3457 return nfs_ok;
3458 if (seqid == slot_seqid)
3459 return nfserr_replay_cache;
3460 return nfserr_seq_misordered;
3461 }
3462
3463 /*
3464 * Cache the create session result into the create session single DRC
3465 * slot cache by saving the xdr structure. sl_seqid has been set.
3466 * Do this for solo or embedded create session operations.
3467 */
3468 static void
nfsd4_cache_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot,__be32 nfserr)3469 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3470 struct nfsd4_clid_slot *slot, __be32 nfserr)
3471 {
3472 slot->sl_status = nfserr;
3473 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3474 }
3475
3476 static __be32
nfsd4_replay_create_session(struct nfsd4_create_session * cr_ses,struct nfsd4_clid_slot * slot)3477 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3478 struct nfsd4_clid_slot *slot)
3479 {
3480 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3481 return slot->sl_status;
3482 }
3483
3484 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3485 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3486 1 + /* MIN tag is length with zero, only length */ \
3487 3 + /* version, opcount, opcode */ \
3488 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3489 /* seqid, slotID, slotID, cache */ \
3490 4 ) * sizeof(__be32))
3491
3492 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3493 2 + /* verifier: AUTH_NULL, length 0 */\
3494 1 + /* status */ \
3495 1 + /* MIN tag is length with zero, only length */ \
3496 3 + /* opcount, opcode, opstatus*/ \
3497 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3498 /* seqid, slotID, slotID, slotID, status */ \
3499 5 ) * sizeof(__be32))
3500
check_forechannel_attrs(struct nfsd4_channel_attrs * ca,struct nfsd_net * nn)3501 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3502 {
3503 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3504
3505 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3506 return nfserr_toosmall;
3507 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3508 return nfserr_toosmall;
3509 ca->headerpadsz = 0;
3510 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3511 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3512 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3513 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3514 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3515 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3516 /*
3517 * Note decreasing slot size below client's request may make it
3518 * difficult for client to function correctly, whereas
3519 * decreasing the number of slots will (just?) affect
3520 * performance. When short on memory we therefore prefer to
3521 * decrease number of slots instead of their size. Clients that
3522 * request larger slots than they need will get poor results:
3523 * Note that we always allow at least one slot, because our
3524 * accounting is soft and provides no guarantees either way.
3525 */
3526 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3527
3528 return nfs_ok;
3529 }
3530
3531 /*
3532 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3533 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3534 */
3535 #define RPC_MAX_HEADER_WITH_AUTH_SYS \
3536 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3537
3538 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3539 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3540
3541 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3542 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3543 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3544 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3545 sizeof(__be32))
3546
check_backchannel_attrs(struct nfsd4_channel_attrs * ca)3547 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3548 {
3549 ca->headerpadsz = 0;
3550
3551 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3552 return nfserr_toosmall;
3553 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3554 return nfserr_toosmall;
3555 ca->maxresp_cached = 0;
3556 if (ca->maxops < 2)
3557 return nfserr_toosmall;
3558
3559 return nfs_ok;
3560 }
3561
nfsd4_check_cb_sec(struct nfsd4_cb_sec * cbs)3562 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3563 {
3564 switch (cbs->flavor) {
3565 case RPC_AUTH_NULL:
3566 case RPC_AUTH_UNIX:
3567 return nfs_ok;
3568 default:
3569 /*
3570 * GSS case: the spec doesn't allow us to return this
3571 * error. But it also doesn't allow us not to support
3572 * GSS.
3573 * I'd rather this fail hard than return some error the
3574 * client might think it can already handle:
3575 */
3576 return nfserr_encr_alg_unsupp;
3577 }
3578 }
3579
3580 __be32
nfsd4_create_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3581 nfsd4_create_session(struct svc_rqst *rqstp,
3582 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3583 {
3584 struct nfsd4_create_session *cr_ses = &u->create_session;
3585 struct sockaddr *sa = svc_addr(rqstp);
3586 struct nfs4_client *conf, *unconf;
3587 struct nfs4_client *old = NULL;
3588 struct nfsd4_session *new;
3589 struct nfsd4_conn *conn;
3590 struct nfsd4_clid_slot *cs_slot = NULL;
3591 __be32 status = 0;
3592 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3593
3594 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3595 return nfserr_inval;
3596 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3597 if (status)
3598 return status;
3599 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3600 if (status)
3601 return status;
3602 status = check_backchannel_attrs(&cr_ses->back_channel);
3603 if (status)
3604 goto out_release_drc_mem;
3605 status = nfserr_jukebox;
3606 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3607 if (!new)
3608 goto out_release_drc_mem;
3609 conn = alloc_conn_from_crses(rqstp, cr_ses);
3610 if (!conn)
3611 goto out_free_session;
3612
3613 spin_lock(&nn->client_lock);
3614 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3615 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3616 WARN_ON_ONCE(conf && unconf);
3617
3618 if (conf) {
3619 status = nfserr_wrong_cred;
3620 if (!nfsd4_mach_creds_match(conf, rqstp))
3621 goto out_free_conn;
3622 cs_slot = &conf->cl_cs_slot;
3623 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3624 if (status) {
3625 if (status == nfserr_replay_cache)
3626 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3627 goto out_free_conn;
3628 }
3629 } else if (unconf) {
3630 status = nfserr_clid_inuse;
3631 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3632 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3633 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3634 goto out_free_conn;
3635 }
3636 status = nfserr_wrong_cred;
3637 if (!nfsd4_mach_creds_match(unconf, rqstp))
3638 goto out_free_conn;
3639 cs_slot = &unconf->cl_cs_slot;
3640 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3641 if (status) {
3642 /* an unconfirmed replay returns misordered */
3643 status = nfserr_seq_misordered;
3644 goto out_free_conn;
3645 }
3646 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3647 if (old) {
3648 status = mark_client_expired_locked(old);
3649 if (status) {
3650 old = NULL;
3651 goto out_free_conn;
3652 }
3653 trace_nfsd_clid_replaced(&old->cl_clientid);
3654 }
3655 move_to_confirmed(unconf);
3656 conf = unconf;
3657 } else {
3658 status = nfserr_stale_clientid;
3659 goto out_free_conn;
3660 }
3661 status = nfs_ok;
3662 /* Persistent sessions are not supported */
3663 cr_ses->flags &= ~SESSION4_PERSIST;
3664 /* Upshifting from TCP to RDMA is not supported */
3665 cr_ses->flags &= ~SESSION4_RDMA;
3666
3667 init_session(rqstp, new, conf, cr_ses);
3668 nfsd4_get_session_locked(new);
3669
3670 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3671 NFS4_MAX_SESSIONID_LEN);
3672 cs_slot->sl_seqid++;
3673 cr_ses->seqid = cs_slot->sl_seqid;
3674
3675 /* cache solo and embedded create sessions under the client_lock */
3676 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3677 spin_unlock(&nn->client_lock);
3678 if (conf == unconf)
3679 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3680 /* init connection and backchannel */
3681 nfsd4_init_conn(rqstp, conn, new);
3682 nfsd4_put_session(new);
3683 if (old)
3684 expire_client(old);
3685 return status;
3686 out_free_conn:
3687 spin_unlock(&nn->client_lock);
3688 free_conn(conn);
3689 if (old)
3690 expire_client(old);
3691 out_free_session:
3692 __free_session(new);
3693 out_release_drc_mem:
3694 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3695 return status;
3696 }
3697
nfsd4_map_bcts_dir(u32 * dir)3698 static __be32 nfsd4_map_bcts_dir(u32 *dir)
3699 {
3700 switch (*dir) {
3701 case NFS4_CDFC4_FORE:
3702 case NFS4_CDFC4_BACK:
3703 return nfs_ok;
3704 case NFS4_CDFC4_FORE_OR_BOTH:
3705 case NFS4_CDFC4_BACK_OR_BOTH:
3706 *dir = NFS4_CDFC4_BOTH;
3707 return nfs_ok;
3708 }
3709 return nfserr_inval;
3710 }
3711
nfsd4_backchannel_ctl(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3712 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3713 struct nfsd4_compound_state *cstate,
3714 union nfsd4_op_u *u)
3715 {
3716 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3717 struct nfsd4_session *session = cstate->session;
3718 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3719 __be32 status;
3720
3721 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3722 if (status)
3723 return status;
3724 spin_lock(&nn->client_lock);
3725 session->se_cb_prog = bc->bc_cb_program;
3726 session->se_cb_sec = bc->bc_cb_sec;
3727 spin_unlock(&nn->client_lock);
3728
3729 nfsd4_probe_callback(session->se_client);
3730
3731 return nfs_ok;
3732 }
3733
__nfsd4_find_conn(struct svc_xprt * xpt,struct nfsd4_session * s)3734 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3735 {
3736 struct nfsd4_conn *c;
3737
3738 list_for_each_entry(c, &s->se_conns, cn_persession) {
3739 if (c->cn_xprt == xpt) {
3740 return c;
3741 }
3742 }
3743 return NULL;
3744 }
3745
nfsd4_match_existing_connection(struct svc_rqst * rqst,struct nfsd4_session * session,u32 req,struct nfsd4_conn ** conn)3746 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3747 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3748 {
3749 struct nfs4_client *clp = session->se_client;
3750 struct svc_xprt *xpt = rqst->rq_xprt;
3751 struct nfsd4_conn *c;
3752 __be32 status;
3753
3754 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3755 spin_lock(&clp->cl_lock);
3756 c = __nfsd4_find_conn(xpt, session);
3757 if (!c)
3758 status = nfserr_noent;
3759 else if (req == c->cn_flags)
3760 status = nfs_ok;
3761 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3762 c->cn_flags != NFS4_CDFC4_BACK)
3763 status = nfs_ok;
3764 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3765 c->cn_flags != NFS4_CDFC4_FORE)
3766 status = nfs_ok;
3767 else
3768 status = nfserr_inval;
3769 spin_unlock(&clp->cl_lock);
3770 if (status == nfs_ok && conn)
3771 *conn = c;
3772 return status;
3773 }
3774
nfsd4_bind_conn_to_session(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3775 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3776 struct nfsd4_compound_state *cstate,
3777 union nfsd4_op_u *u)
3778 {
3779 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3780 __be32 status;
3781 struct nfsd4_conn *conn;
3782 struct nfsd4_session *session;
3783 struct net *net = SVC_NET(rqstp);
3784 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3785
3786 if (!nfsd4_last_compound_op(rqstp))
3787 return nfserr_not_only_op;
3788 spin_lock(&nn->client_lock);
3789 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3790 spin_unlock(&nn->client_lock);
3791 if (!session)
3792 goto out_no_session;
3793 status = nfserr_wrong_cred;
3794 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3795 goto out;
3796 status = nfsd4_match_existing_connection(rqstp, session,
3797 bcts->dir, &conn);
3798 if (status == nfs_ok) {
3799 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3800 bcts->dir == NFS4_CDFC4_BACK)
3801 conn->cn_flags |= NFS4_CDFC4_BACK;
3802 nfsd4_probe_callback(session->se_client);
3803 goto out;
3804 }
3805 if (status == nfserr_inval)
3806 goto out;
3807 status = nfsd4_map_bcts_dir(&bcts->dir);
3808 if (status)
3809 goto out;
3810 conn = alloc_conn(rqstp, bcts->dir);
3811 status = nfserr_jukebox;
3812 if (!conn)
3813 goto out;
3814 nfsd4_init_conn(rqstp, conn, session);
3815 status = nfs_ok;
3816 out:
3817 nfsd4_put_session(session);
3818 out_no_session:
3819 return status;
3820 }
3821
nfsd4_compound_in_session(struct nfsd4_compound_state * cstate,struct nfs4_sessionid * sid)3822 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3823 {
3824 if (!cstate->session)
3825 return false;
3826 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3827 }
3828
3829 __be32
nfsd4_destroy_session(struct svc_rqst * r,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3830 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3831 union nfsd4_op_u *u)
3832 {
3833 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3834 struct nfsd4_session *ses;
3835 __be32 status;
3836 int ref_held_by_me = 0;
3837 struct net *net = SVC_NET(r);
3838 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3839
3840 status = nfserr_not_only_op;
3841 if (nfsd4_compound_in_session(cstate, sessionid)) {
3842 if (!nfsd4_last_compound_op(r))
3843 goto out;
3844 ref_held_by_me++;
3845 }
3846 dump_sessionid(__func__, sessionid);
3847 spin_lock(&nn->client_lock);
3848 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3849 if (!ses)
3850 goto out_client_lock;
3851 status = nfserr_wrong_cred;
3852 if (!nfsd4_mach_creds_match(ses->se_client, r))
3853 goto out_put_session;
3854 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3855 if (status)
3856 goto out_put_session;
3857 unhash_session(ses);
3858 spin_unlock(&nn->client_lock);
3859
3860 nfsd4_probe_callback_sync(ses->se_client);
3861
3862 spin_lock(&nn->client_lock);
3863 status = nfs_ok;
3864 out_put_session:
3865 nfsd4_put_session_locked(ses);
3866 out_client_lock:
3867 spin_unlock(&nn->client_lock);
3868 out:
3869 return status;
3870 }
3871
nfsd4_sequence_check_conn(struct nfsd4_conn * new,struct nfsd4_session * ses)3872 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3873 {
3874 struct nfs4_client *clp = ses->se_client;
3875 struct nfsd4_conn *c;
3876 __be32 status = nfs_ok;
3877 int ret;
3878
3879 spin_lock(&clp->cl_lock);
3880 c = __nfsd4_find_conn(new->cn_xprt, ses);
3881 if (c)
3882 goto out_free;
3883 status = nfserr_conn_not_bound_to_session;
3884 if (clp->cl_mach_cred)
3885 goto out_free;
3886 __nfsd4_hash_conn(new, ses);
3887 spin_unlock(&clp->cl_lock);
3888 ret = nfsd4_register_conn(new);
3889 if (ret)
3890 /* oops; xprt is already down: */
3891 nfsd4_conn_lost(&new->cn_xpt_user);
3892 return nfs_ok;
3893 out_free:
3894 spin_unlock(&clp->cl_lock);
3895 free_conn(new);
3896 return status;
3897 }
3898
nfsd4_session_too_many_ops(struct svc_rqst * rqstp,struct nfsd4_session * session)3899 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3900 {
3901 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3902
3903 return args->opcnt > session->se_fchannel.maxops;
3904 }
3905
nfsd4_request_too_big(struct svc_rqst * rqstp,struct nfsd4_session * session)3906 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3907 struct nfsd4_session *session)
3908 {
3909 struct xdr_buf *xb = &rqstp->rq_arg;
3910
3911 return xb->len > session->se_fchannel.maxreq_sz;
3912 }
3913
replay_matches_cache(struct svc_rqst * rqstp,struct nfsd4_sequence * seq,struct nfsd4_slot * slot)3914 static bool replay_matches_cache(struct svc_rqst *rqstp,
3915 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3916 {
3917 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3918
3919 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3920 (bool)seq->cachethis)
3921 return false;
3922 /*
3923 * If there's an error then the reply can have fewer ops than
3924 * the call.
3925 */
3926 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3927 return false;
3928 /*
3929 * But if we cached a reply with *more* ops than the call you're
3930 * sending us now, then this new call is clearly not really a
3931 * replay of the old one:
3932 */
3933 if (slot->sl_opcnt > argp->opcnt)
3934 return false;
3935 /* This is the only check explicitly called by spec: */
3936 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3937 return false;
3938 /*
3939 * There may be more comparisons we could actually do, but the
3940 * spec doesn't require us to catch every case where the calls
3941 * don't match (that would require caching the call as well as
3942 * the reply), so we don't bother.
3943 */
3944 return true;
3945 }
3946
3947 __be32
nfsd4_sequence(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)3948 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3949 union nfsd4_op_u *u)
3950 {
3951 struct nfsd4_sequence *seq = &u->sequence;
3952 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3953 struct xdr_stream *xdr = resp->xdr;
3954 struct nfsd4_session *session;
3955 struct nfs4_client *clp;
3956 struct nfsd4_slot *slot;
3957 struct nfsd4_conn *conn;
3958 __be32 status;
3959 int buflen;
3960 struct net *net = SVC_NET(rqstp);
3961 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3962
3963 if (resp->opcnt != 1)
3964 return nfserr_sequence_pos;
3965
3966 /*
3967 * Will be either used or freed by nfsd4_sequence_check_conn
3968 * below.
3969 */
3970 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3971 if (!conn)
3972 return nfserr_jukebox;
3973
3974 spin_lock(&nn->client_lock);
3975 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3976 if (!session)
3977 goto out_no_session;
3978 clp = session->se_client;
3979
3980 status = nfserr_too_many_ops;
3981 if (nfsd4_session_too_many_ops(rqstp, session))
3982 goto out_put_session;
3983
3984 status = nfserr_req_too_big;
3985 if (nfsd4_request_too_big(rqstp, session))
3986 goto out_put_session;
3987
3988 status = nfserr_badslot;
3989 if (seq->slotid >= session->se_fchannel.maxreqs)
3990 goto out_put_session;
3991
3992 slot = session->se_slots[seq->slotid];
3993 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3994
3995 /* We do not negotiate the number of slots yet, so set the
3996 * maxslots to the session maxreqs which is used to encode
3997 * sr_highest_slotid and the sr_target_slot id to maxslots */
3998 seq->maxslots = session->se_fchannel.maxreqs;
3999
4000 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
4001 slot->sl_flags & NFSD4_SLOT_INUSE);
4002 if (status == nfserr_replay_cache) {
4003 status = nfserr_seq_misordered;
4004 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4005 goto out_put_session;
4006 status = nfserr_seq_false_retry;
4007 if (!replay_matches_cache(rqstp, seq, slot))
4008 goto out_put_session;
4009 cstate->slot = slot;
4010 cstate->session = session;
4011 cstate->clp = clp;
4012 /* Return the cached reply status and set cstate->status
4013 * for nfsd4_proc_compound processing */
4014 status = nfsd4_replay_cache_entry(resp, seq);
4015 cstate->status = nfserr_replay_cache;
4016 goto out;
4017 }
4018 if (status)
4019 goto out_put_session;
4020
4021 status = nfsd4_sequence_check_conn(conn, session);
4022 conn = NULL;
4023 if (status)
4024 goto out_put_session;
4025
4026 buflen = (seq->cachethis) ?
4027 session->se_fchannel.maxresp_cached :
4028 session->se_fchannel.maxresp_sz;
4029 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4030 nfserr_rep_too_big;
4031 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4032 goto out_put_session;
4033 svc_reserve(rqstp, buflen);
4034
4035 status = nfs_ok;
4036 /* Success! bump slot seqid */
4037 slot->sl_seqid = seq->seqid;
4038 slot->sl_flags |= NFSD4_SLOT_INUSE;
4039 if (seq->cachethis)
4040 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4041 else
4042 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4043
4044 cstate->slot = slot;
4045 cstate->session = session;
4046 cstate->clp = clp;
4047
4048 out:
4049 switch (clp->cl_cb_state) {
4050 case NFSD4_CB_DOWN:
4051 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4052 break;
4053 case NFSD4_CB_FAULT:
4054 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4055 break;
4056 default:
4057 seq->status_flags = 0;
4058 }
4059 if (!list_empty(&clp->cl_revoked))
4060 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4061 out_no_session:
4062 if (conn)
4063 free_conn(conn);
4064 spin_unlock(&nn->client_lock);
4065 return status;
4066 out_put_session:
4067 nfsd4_put_session_locked(session);
4068 goto out_no_session;
4069 }
4070
4071 void
nfsd4_sequence_done(struct nfsd4_compoundres * resp)4072 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4073 {
4074 struct nfsd4_compound_state *cs = &resp->cstate;
4075
4076 if (nfsd4_has_session(cs)) {
4077 if (cs->status != nfserr_replay_cache) {
4078 nfsd4_store_cache_entry(resp);
4079 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4080 }
4081 /* Drop session reference that was taken in nfsd4_sequence() */
4082 nfsd4_put_session(cs->session);
4083 } else if (cs->clp)
4084 put_client_renew(cs->clp);
4085 }
4086
4087 __be32
nfsd4_destroy_clientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4088 nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4089 struct nfsd4_compound_state *cstate,
4090 union nfsd4_op_u *u)
4091 {
4092 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4093 struct nfs4_client *conf, *unconf;
4094 struct nfs4_client *clp = NULL;
4095 __be32 status = 0;
4096 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4097
4098 spin_lock(&nn->client_lock);
4099 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4100 conf = find_confirmed_client(&dc->clientid, true, nn);
4101 WARN_ON_ONCE(conf && unconf);
4102
4103 if (conf) {
4104 if (client_has_state(conf)) {
4105 status = nfserr_clientid_busy;
4106 goto out;
4107 }
4108 status = mark_client_expired_locked(conf);
4109 if (status)
4110 goto out;
4111 clp = conf;
4112 } else if (unconf)
4113 clp = unconf;
4114 else {
4115 status = nfserr_stale_clientid;
4116 goto out;
4117 }
4118 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4119 clp = NULL;
4120 status = nfserr_wrong_cred;
4121 goto out;
4122 }
4123 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4124 unhash_client_locked(clp);
4125 out:
4126 spin_unlock(&nn->client_lock);
4127 if (clp)
4128 expire_client(clp);
4129 return status;
4130 }
4131
4132 __be32
nfsd4_reclaim_complete(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4133 nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4134 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4135 {
4136 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4137 struct nfs4_client *clp = cstate->clp;
4138 __be32 status = 0;
4139
4140 if (rc->rca_one_fs) {
4141 if (!cstate->current_fh.fh_dentry)
4142 return nfserr_nofilehandle;
4143 /*
4144 * We don't take advantage of the rca_one_fs case.
4145 * That's OK, it's optional, we can safely ignore it.
4146 */
4147 return nfs_ok;
4148 }
4149
4150 status = nfserr_complete_already;
4151 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4152 goto out;
4153
4154 status = nfserr_stale_clientid;
4155 if (is_client_expired(clp))
4156 /*
4157 * The following error isn't really legal.
4158 * But we only get here if the client just explicitly
4159 * destroyed the client. Surely it no longer cares what
4160 * error it gets back on an operation for the dead
4161 * client.
4162 */
4163 goto out;
4164
4165 status = nfs_ok;
4166 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4167 nfsd4_client_record_create(clp);
4168 inc_reclaim_complete(clp);
4169 out:
4170 return status;
4171 }
4172
4173 __be32
nfsd4_setclientid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4174 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4175 union nfsd4_op_u *u)
4176 {
4177 struct nfsd4_setclientid *setclid = &u->setclientid;
4178 struct xdr_netobj clname = setclid->se_name;
4179 nfs4_verifier clverifier = setclid->se_verf;
4180 struct nfs4_client *conf, *new;
4181 struct nfs4_client *unconf = NULL;
4182 __be32 status;
4183 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4184
4185 new = create_client(clname, rqstp, &clverifier);
4186 if (new == NULL)
4187 return nfserr_jukebox;
4188 spin_lock(&nn->client_lock);
4189 conf = find_confirmed_client_by_name(&clname, nn);
4190 if (conf && client_has_state(conf)) {
4191 status = nfserr_clid_inuse;
4192 if (clp_used_exchangeid(conf))
4193 goto out;
4194 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4195 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4196 goto out;
4197 }
4198 }
4199 unconf = find_unconfirmed_client_by_name(&clname, nn);
4200 if (unconf)
4201 unhash_client_locked(unconf);
4202 if (conf) {
4203 if (same_verf(&conf->cl_verifier, &clverifier)) {
4204 copy_clid(new, conf);
4205 gen_confirm(new, nn);
4206 } else
4207 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4208 &clverifier);
4209 } else
4210 trace_nfsd_clid_fresh(new);
4211 new->cl_minorversion = 0;
4212 gen_callback(new, setclid, rqstp);
4213 add_to_unconfirmed(new);
4214 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4215 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4216 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4217 new = NULL;
4218 status = nfs_ok;
4219 out:
4220 spin_unlock(&nn->client_lock);
4221 if (new)
4222 free_client(new);
4223 if (unconf) {
4224 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4225 expire_client(unconf);
4226 }
4227 return status;
4228 }
4229
4230 __be32
nfsd4_setclientid_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)4231 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4232 struct nfsd4_compound_state *cstate,
4233 union nfsd4_op_u *u)
4234 {
4235 struct nfsd4_setclientid_confirm *setclientid_confirm =
4236 &u->setclientid_confirm;
4237 struct nfs4_client *conf, *unconf;
4238 struct nfs4_client *old = NULL;
4239 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4240 clientid_t * clid = &setclientid_confirm->sc_clientid;
4241 __be32 status;
4242 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4243
4244 if (STALE_CLIENTID(clid, nn))
4245 return nfserr_stale_clientid;
4246
4247 spin_lock(&nn->client_lock);
4248 conf = find_confirmed_client(clid, false, nn);
4249 unconf = find_unconfirmed_client(clid, false, nn);
4250 /*
4251 * We try hard to give out unique clientid's, so if we get an
4252 * attempt to confirm the same clientid with a different cred,
4253 * the client may be buggy; this should never happen.
4254 *
4255 * Nevertheless, RFC 7530 recommends INUSE for this case:
4256 */
4257 status = nfserr_clid_inuse;
4258 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4259 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4260 goto out;
4261 }
4262 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4263 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4264 goto out;
4265 }
4266 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4267 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4268 status = nfs_ok;
4269 } else
4270 status = nfserr_stale_clientid;
4271 goto out;
4272 }
4273 status = nfs_ok;
4274 if (conf) {
4275 old = unconf;
4276 unhash_client_locked(old);
4277 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4278 } else {
4279 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4280 if (old) {
4281 status = nfserr_clid_inuse;
4282 if (client_has_state(old)
4283 && !same_creds(&unconf->cl_cred,
4284 &old->cl_cred)) {
4285 old = NULL;
4286 goto out;
4287 }
4288 status = mark_client_expired_locked(old);
4289 if (status) {
4290 old = NULL;
4291 goto out;
4292 }
4293 trace_nfsd_clid_replaced(&old->cl_clientid);
4294 }
4295 move_to_confirmed(unconf);
4296 conf = unconf;
4297 }
4298 get_client_locked(conf);
4299 spin_unlock(&nn->client_lock);
4300 if (conf == unconf)
4301 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4302 nfsd4_probe_callback(conf);
4303 spin_lock(&nn->client_lock);
4304 put_client_renew_locked(conf);
4305 out:
4306 spin_unlock(&nn->client_lock);
4307 if (old)
4308 expire_client(old);
4309 return status;
4310 }
4311
nfsd4_alloc_file(void)4312 static struct nfs4_file *nfsd4_alloc_file(void)
4313 {
4314 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4315 }
4316
4317 /* OPEN Share state helper functions */
4318
nfsd4_file_init(const struct svc_fh * fh,struct nfs4_file * fp)4319 static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4320 {
4321 refcount_set(&fp->fi_ref, 1);
4322 spin_lock_init(&fp->fi_lock);
4323 INIT_LIST_HEAD(&fp->fi_stateids);
4324 INIT_LIST_HEAD(&fp->fi_delegations);
4325 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4326 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4327 fp->fi_deleg_file = NULL;
4328 fp->fi_had_conflict = false;
4329 fp->fi_share_deny = 0;
4330 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4331 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4332 fp->fi_aliased = false;
4333 fp->fi_inode = d_inode(fh->fh_dentry);
4334 #ifdef CONFIG_NFSD_PNFS
4335 INIT_LIST_HEAD(&fp->fi_lo_states);
4336 atomic_set(&fp->fi_lo_recalls, 0);
4337 #endif
4338 }
4339
4340 void
nfsd4_free_slabs(void)4341 nfsd4_free_slabs(void)
4342 {
4343 kmem_cache_destroy(client_slab);
4344 kmem_cache_destroy(openowner_slab);
4345 kmem_cache_destroy(lockowner_slab);
4346 kmem_cache_destroy(file_slab);
4347 kmem_cache_destroy(stateid_slab);
4348 kmem_cache_destroy(deleg_slab);
4349 kmem_cache_destroy(odstate_slab);
4350 }
4351
4352 int
nfsd4_init_slabs(void)4353 nfsd4_init_slabs(void)
4354 {
4355 client_slab = kmem_cache_create("nfsd4_clients",
4356 sizeof(struct nfs4_client), 0, 0, NULL);
4357 if (client_slab == NULL)
4358 goto out;
4359 openowner_slab = kmem_cache_create("nfsd4_openowners",
4360 sizeof(struct nfs4_openowner), 0, 0, NULL);
4361 if (openowner_slab == NULL)
4362 goto out_free_client_slab;
4363 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4364 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4365 if (lockowner_slab == NULL)
4366 goto out_free_openowner_slab;
4367 file_slab = kmem_cache_create("nfsd4_files",
4368 sizeof(struct nfs4_file), 0, 0, NULL);
4369 if (file_slab == NULL)
4370 goto out_free_lockowner_slab;
4371 stateid_slab = kmem_cache_create("nfsd4_stateids",
4372 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4373 if (stateid_slab == NULL)
4374 goto out_free_file_slab;
4375 deleg_slab = kmem_cache_create("nfsd4_delegations",
4376 sizeof(struct nfs4_delegation), 0, 0, NULL);
4377 if (deleg_slab == NULL)
4378 goto out_free_stateid_slab;
4379 odstate_slab = kmem_cache_create("nfsd4_odstate",
4380 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4381 if (odstate_slab == NULL)
4382 goto out_free_deleg_slab;
4383 return 0;
4384
4385 out_free_deleg_slab:
4386 kmem_cache_destroy(deleg_slab);
4387 out_free_stateid_slab:
4388 kmem_cache_destroy(stateid_slab);
4389 out_free_file_slab:
4390 kmem_cache_destroy(file_slab);
4391 out_free_lockowner_slab:
4392 kmem_cache_destroy(lockowner_slab);
4393 out_free_openowner_slab:
4394 kmem_cache_destroy(openowner_slab);
4395 out_free_client_slab:
4396 kmem_cache_destroy(client_slab);
4397 out:
4398 return -ENOMEM;
4399 }
4400
4401 static unsigned long
nfsd4_state_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)4402 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4403 {
4404 int count;
4405 struct nfsd_net *nn = shrink->private_data;
4406
4407 count = atomic_read(&nn->nfsd_courtesy_clients);
4408 if (!count)
4409 count = atomic_long_read(&num_delegations);
4410 if (count)
4411 queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4412 return (unsigned long)count;
4413 }
4414
4415 static unsigned long
nfsd4_state_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)4416 nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4417 {
4418 return SHRINK_STOP;
4419 }
4420
4421 void
nfsd4_init_leases_net(struct nfsd_net * nn)4422 nfsd4_init_leases_net(struct nfsd_net *nn)
4423 {
4424 struct sysinfo si;
4425 u64 max_clients;
4426
4427 nn->nfsd4_lease = 90; /* default lease time */
4428 nn->nfsd4_grace = 90;
4429 nn->somebody_reclaimed = false;
4430 nn->track_reclaim_completes = false;
4431 nn->clverifier_counter = get_random_u32();
4432 nn->clientid_base = get_random_u32();
4433 nn->clientid_counter = nn->clientid_base + 1;
4434 nn->s2s_cp_cl_id = nn->clientid_counter++;
4435
4436 atomic_set(&nn->nfs4_client_count, 0);
4437 si_meminfo(&si);
4438 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4439 max_clients *= NFS4_CLIENTS_PER_GB;
4440 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4441
4442 atomic_set(&nn->nfsd_courtesy_clients, 0);
4443 }
4444
init_nfs4_replay(struct nfs4_replay * rp)4445 static void init_nfs4_replay(struct nfs4_replay *rp)
4446 {
4447 rp->rp_status = nfserr_serverfault;
4448 rp->rp_buflen = 0;
4449 rp->rp_buf = rp->rp_ibuf;
4450 mutex_init(&rp->rp_mutex);
4451 }
4452
nfsd4_cstate_assign_replay(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so)4453 static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4454 struct nfs4_stateowner *so)
4455 {
4456 if (!nfsd4_has_session(cstate)) {
4457 mutex_lock(&so->so_replay.rp_mutex);
4458 cstate->replay_owner = nfs4_get_stateowner(so);
4459 }
4460 }
4461
nfsd4_cstate_clear_replay(struct nfsd4_compound_state * cstate)4462 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4463 {
4464 struct nfs4_stateowner *so = cstate->replay_owner;
4465
4466 if (so != NULL) {
4467 cstate->replay_owner = NULL;
4468 mutex_unlock(&so->so_replay.rp_mutex);
4469 nfs4_put_stateowner(so);
4470 }
4471 }
4472
alloc_stateowner(struct kmem_cache * slab,struct xdr_netobj * owner,struct nfs4_client * clp)4473 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4474 {
4475 struct nfs4_stateowner *sop;
4476
4477 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4478 if (!sop)
4479 return NULL;
4480
4481 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4482 if (!sop->so_owner.data) {
4483 kmem_cache_free(slab, sop);
4484 return NULL;
4485 }
4486
4487 INIT_LIST_HEAD(&sop->so_stateids);
4488 sop->so_client = clp;
4489 init_nfs4_replay(&sop->so_replay);
4490 atomic_set(&sop->so_count, 1);
4491 return sop;
4492 }
4493
hash_openowner(struct nfs4_openowner * oo,struct nfs4_client * clp,unsigned int strhashval)4494 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4495 {
4496 lockdep_assert_held(&clp->cl_lock);
4497
4498 list_add(&oo->oo_owner.so_strhash,
4499 &clp->cl_ownerstr_hashtbl[strhashval]);
4500 list_add(&oo->oo_perclient, &clp->cl_openowners);
4501 }
4502
nfs4_unhash_openowner(struct nfs4_stateowner * so)4503 static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4504 {
4505 unhash_openowner_locked(openowner(so));
4506 }
4507
nfs4_free_openowner(struct nfs4_stateowner * so)4508 static void nfs4_free_openowner(struct nfs4_stateowner *so)
4509 {
4510 struct nfs4_openowner *oo = openowner(so);
4511
4512 kmem_cache_free(openowner_slab, oo);
4513 }
4514
4515 static const struct nfs4_stateowner_operations openowner_ops = {
4516 .so_unhash = nfs4_unhash_openowner,
4517 .so_free = nfs4_free_openowner,
4518 };
4519
4520 static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4521 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4522 {
4523 struct nfs4_ol_stateid *local, *ret = NULL;
4524 struct nfs4_openowner *oo = open->op_openowner;
4525
4526 lockdep_assert_held(&fp->fi_lock);
4527
4528 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4529 /* ignore lock owners */
4530 if (local->st_stateowner->so_is_open_owner == 0)
4531 continue;
4532 if (local->st_stateowner != &oo->oo_owner)
4533 continue;
4534 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4535 ret = local;
4536 refcount_inc(&ret->st_stid.sc_count);
4537 break;
4538 }
4539 }
4540 return ret;
4541 }
4542
4543 static __be32
nfsd4_verify_open_stid(struct nfs4_stid * s)4544 nfsd4_verify_open_stid(struct nfs4_stid *s)
4545 {
4546 __be32 ret = nfs_ok;
4547
4548 switch (s->sc_type) {
4549 default:
4550 break;
4551 case 0:
4552 case NFS4_CLOSED_STID:
4553 case NFS4_CLOSED_DELEG_STID:
4554 ret = nfserr_bad_stateid;
4555 break;
4556 case NFS4_REVOKED_DELEG_STID:
4557 ret = nfserr_deleg_revoked;
4558 }
4559 return ret;
4560 }
4561
4562 /* Lock the stateid st_mutex, and deal with races with CLOSE */
4563 static __be32
nfsd4_lock_ol_stateid(struct nfs4_ol_stateid * stp)4564 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4565 {
4566 __be32 ret;
4567
4568 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4569 ret = nfsd4_verify_open_stid(&stp->st_stid);
4570 if (ret != nfs_ok)
4571 mutex_unlock(&stp->st_mutex);
4572 return ret;
4573 }
4574
4575 static struct nfs4_ol_stateid *
nfsd4_find_and_lock_existing_open(struct nfs4_file * fp,struct nfsd4_open * open)4576 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4577 {
4578 struct nfs4_ol_stateid *stp;
4579 for (;;) {
4580 spin_lock(&fp->fi_lock);
4581 stp = nfsd4_find_existing_open(fp, open);
4582 spin_unlock(&fp->fi_lock);
4583 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4584 break;
4585 nfs4_put_stid(&stp->st_stid);
4586 }
4587 return stp;
4588 }
4589
4590 static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval,struct nfsd4_open * open,struct nfsd4_compound_state * cstate)4591 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4592 struct nfsd4_compound_state *cstate)
4593 {
4594 struct nfs4_client *clp = cstate->clp;
4595 struct nfs4_openowner *oo, *ret;
4596
4597 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4598 if (!oo)
4599 return NULL;
4600 oo->oo_owner.so_ops = &openowner_ops;
4601 oo->oo_owner.so_is_open_owner = 1;
4602 oo->oo_owner.so_seqid = open->op_seqid;
4603 oo->oo_flags = 0;
4604 if (nfsd4_has_session(cstate))
4605 oo->oo_flags |= NFS4_OO_CONFIRMED;
4606 oo->oo_time = 0;
4607 oo->oo_last_closed_stid = NULL;
4608 INIT_LIST_HEAD(&oo->oo_close_lru);
4609 spin_lock(&clp->cl_lock);
4610 ret = find_openstateowner_str_locked(strhashval, open, clp);
4611 if (ret == NULL) {
4612 hash_openowner(oo, clp, strhashval);
4613 ret = oo;
4614 } else
4615 nfs4_free_stateowner(&oo->oo_owner);
4616
4617 spin_unlock(&clp->cl_lock);
4618 return ret;
4619 }
4620
4621 static struct nfs4_ol_stateid *
init_open_stateid(struct nfs4_file * fp,struct nfsd4_open * open)4622 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4623 {
4624
4625 struct nfs4_openowner *oo = open->op_openowner;
4626 struct nfs4_ol_stateid *retstp = NULL;
4627 struct nfs4_ol_stateid *stp;
4628
4629 stp = open->op_stp;
4630 /* We are moving these outside of the spinlocks to avoid the warnings */
4631 mutex_init(&stp->st_mutex);
4632 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4633
4634 retry:
4635 spin_lock(&oo->oo_owner.so_client->cl_lock);
4636 spin_lock(&fp->fi_lock);
4637
4638 retstp = nfsd4_find_existing_open(fp, open);
4639 if (retstp)
4640 goto out_unlock;
4641
4642 open->op_stp = NULL;
4643 refcount_inc(&stp->st_stid.sc_count);
4644 stp->st_stid.sc_type = NFS4_OPEN_STID;
4645 INIT_LIST_HEAD(&stp->st_locks);
4646 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4647 get_nfs4_file(fp);
4648 stp->st_stid.sc_file = fp;
4649 stp->st_access_bmap = 0;
4650 stp->st_deny_bmap = 0;
4651 stp->st_openstp = NULL;
4652 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4653 list_add(&stp->st_perfile, &fp->fi_stateids);
4654
4655 out_unlock:
4656 spin_unlock(&fp->fi_lock);
4657 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4658 if (retstp) {
4659 /* Handle races with CLOSE */
4660 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4661 nfs4_put_stid(&retstp->st_stid);
4662 goto retry;
4663 }
4664 /* To keep mutex tracking happy */
4665 mutex_unlock(&stp->st_mutex);
4666 stp = retstp;
4667 }
4668 return stp;
4669 }
4670
4671 /*
4672 * In the 4.0 case we need to keep the owners around a little while to handle
4673 * CLOSE replay. We still do need to release any file access that is held by
4674 * them before returning however.
4675 */
4676 static void
move_to_close_lru(struct nfs4_ol_stateid * s,struct net * net)4677 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4678 {
4679 struct nfs4_ol_stateid *last;
4680 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4681 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4682 nfsd_net_id);
4683
4684 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4685
4686 /*
4687 * We know that we hold one reference via nfsd4_close, and another
4688 * "persistent" reference for the client. If the refcount is higher
4689 * than 2, then there are still calls in progress that are using this
4690 * stateid. We can't put the sc_file reference until they are finished.
4691 * Wait for the refcount to drop to 2. Since it has been unhashed,
4692 * there should be no danger of the refcount going back up again at
4693 * this point.
4694 */
4695 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4696
4697 release_all_access(s);
4698 if (s->st_stid.sc_file) {
4699 put_nfs4_file(s->st_stid.sc_file);
4700 s->st_stid.sc_file = NULL;
4701 }
4702
4703 spin_lock(&nn->client_lock);
4704 last = oo->oo_last_closed_stid;
4705 oo->oo_last_closed_stid = s;
4706 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4707 oo->oo_time = ktime_get_boottime_seconds();
4708 spin_unlock(&nn->client_lock);
4709 if (last)
4710 nfs4_put_stid(&last->st_stid);
4711 }
4712
4713 static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_lookup(const struct svc_fh * fhp)4714 nfsd4_file_hash_lookup(const struct svc_fh *fhp)
4715 {
4716 struct inode *inode = d_inode(fhp->fh_dentry);
4717 struct rhlist_head *tmp, *list;
4718 struct nfs4_file *fi;
4719
4720 rcu_read_lock();
4721 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4722 nfs4_file_rhash_params);
4723 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4724 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4725 if (refcount_inc_not_zero(&fi->fi_ref)) {
4726 rcu_read_unlock();
4727 return fi;
4728 }
4729 }
4730 }
4731 rcu_read_unlock();
4732 return NULL;
4733 }
4734
4735 /*
4736 * On hash insertion, identify entries with the same inode but
4737 * distinct filehandles. They will all be on the list returned
4738 * by rhltable_lookup().
4739 *
4740 * inode->i_lock prevents racing insertions from adding an entry
4741 * for the same inode/fhp pair twice.
4742 */
4743 static noinline_for_stack struct nfs4_file *
nfsd4_file_hash_insert(struct nfs4_file * new,const struct svc_fh * fhp)4744 nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
4745 {
4746 struct inode *inode = d_inode(fhp->fh_dentry);
4747 struct rhlist_head *tmp, *list;
4748 struct nfs4_file *ret = NULL;
4749 bool alias_found = false;
4750 struct nfs4_file *fi;
4751 int err;
4752
4753 rcu_read_lock();
4754 spin_lock(&inode->i_lock);
4755
4756 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4757 nfs4_file_rhash_params);
4758 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4759 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4760 if (refcount_inc_not_zero(&fi->fi_ref))
4761 ret = fi;
4762 } else
4763 fi->fi_aliased = alias_found = true;
4764 }
4765 if (ret)
4766 goto out_unlock;
4767
4768 nfsd4_file_init(fhp, new);
4769 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
4770 nfs4_file_rhash_params);
4771 if (err)
4772 goto out_unlock;
4773
4774 new->fi_aliased = alias_found;
4775 ret = new;
4776
4777 out_unlock:
4778 spin_unlock(&inode->i_lock);
4779 rcu_read_unlock();
4780 return ret;
4781 }
4782
nfsd4_file_hash_remove(struct nfs4_file * fi)4783 static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
4784 {
4785 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
4786 nfs4_file_rhash_params);
4787 }
4788
4789 /*
4790 * Called to check deny when READ with all zero stateid or
4791 * WRITE with all zero or all one stateid
4792 */
4793 static __be32
nfs4_share_conflict(struct svc_fh * current_fh,unsigned int deny_type)4794 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4795 {
4796 struct nfs4_file *fp;
4797 __be32 ret = nfs_ok;
4798
4799 fp = nfsd4_file_hash_lookup(current_fh);
4800 if (!fp)
4801 return ret;
4802
4803 /* Check for conflicting share reservations */
4804 spin_lock(&fp->fi_lock);
4805 if (fp->fi_share_deny & deny_type)
4806 ret = nfserr_locked;
4807 spin_unlock(&fp->fi_lock);
4808 put_nfs4_file(fp);
4809 return ret;
4810 }
4811
nfsd4_deleg_present(const struct inode * inode)4812 static bool nfsd4_deleg_present(const struct inode *inode)
4813 {
4814 struct file_lock_context *ctx = locks_inode_context(inode);
4815
4816 return ctx && !list_empty_careful(&ctx->flc_lease);
4817 }
4818
4819 /**
4820 * nfsd_wait_for_delegreturn - wait for delegations to be returned
4821 * @rqstp: the RPC transaction being executed
4822 * @inode: in-core inode of the file being waited for
4823 *
4824 * The timeout prevents deadlock if all nfsd threads happen to be
4825 * tied up waiting for returning delegations.
4826 *
4827 * Return values:
4828 * %true: delegation was returned
4829 * %false: timed out waiting for delegreturn
4830 */
nfsd_wait_for_delegreturn(struct svc_rqst * rqstp,struct inode * inode)4831 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
4832 {
4833 long __maybe_unused timeo;
4834
4835 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
4836 NFSD_DELEGRETURN_TIMEOUT);
4837 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
4838 return timeo > 0;
4839 }
4840
nfsd4_cb_recall_prepare(struct nfsd4_callback * cb)4841 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4842 {
4843 struct nfs4_delegation *dp = cb_to_delegation(cb);
4844 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4845 nfsd_net_id);
4846
4847 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4848
4849 /*
4850 * We can't do this in nfsd_break_deleg_cb because it is
4851 * already holding inode->i_lock.
4852 *
4853 * If the dl_time != 0, then we know that it has already been
4854 * queued for a lease break. Don't queue it again.
4855 */
4856 spin_lock(&state_lock);
4857 if (delegation_hashed(dp) && dp->dl_time == 0) {
4858 dp->dl_time = ktime_get_boottime_seconds();
4859 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4860 }
4861 spin_unlock(&state_lock);
4862 }
4863
nfsd4_cb_recall_done(struct nfsd4_callback * cb,struct rpc_task * task)4864 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4865 struct rpc_task *task)
4866 {
4867 struct nfs4_delegation *dp = cb_to_delegation(cb);
4868
4869 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
4870
4871 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4872 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4873 return 1;
4874
4875 switch (task->tk_status) {
4876 case 0:
4877 return 1;
4878 case -NFS4ERR_DELAY:
4879 rpc_delay(task, 2 * HZ);
4880 return 0;
4881 case -EBADHANDLE:
4882 case -NFS4ERR_BAD_STATEID:
4883 /*
4884 * Race: client probably got cb_recall before open reply
4885 * granting delegation.
4886 */
4887 if (dp->dl_retries--) {
4888 rpc_delay(task, 2 * HZ);
4889 return 0;
4890 }
4891 fallthrough;
4892 default:
4893 return 1;
4894 }
4895 }
4896
nfsd4_cb_recall_release(struct nfsd4_callback * cb)4897 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4898 {
4899 struct nfs4_delegation *dp = cb_to_delegation(cb);
4900
4901 nfs4_put_stid(&dp->dl_stid);
4902 }
4903
4904 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4905 .prepare = nfsd4_cb_recall_prepare,
4906 .done = nfsd4_cb_recall_done,
4907 .release = nfsd4_cb_recall_release,
4908 };
4909
nfsd_break_one_deleg(struct nfs4_delegation * dp)4910 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4911 {
4912 /*
4913 * We're assuming the state code never drops its reference
4914 * without first removing the lease. Since we're in this lease
4915 * callback (and since the lease code is serialized by the
4916 * flc_lock) we know the server hasn't removed the lease yet, and
4917 * we know it's safe to take a reference.
4918 */
4919 refcount_inc(&dp->dl_stid.sc_count);
4920 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
4921 }
4922
4923 /* Called from break_lease() with flc_lock held. */
4924 static bool
nfsd_break_deleg_cb(struct file_lock * fl)4925 nfsd_break_deleg_cb(struct file_lock *fl)
4926 {
4927 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4928 struct nfs4_file *fp = dp->dl_stid.sc_file;
4929 struct nfs4_client *clp = dp->dl_stid.sc_client;
4930 struct nfsd_net *nn;
4931
4932 trace_nfsd_cb_recall(&dp->dl_stid);
4933
4934 dp->dl_recalled = true;
4935 atomic_inc(&clp->cl_delegs_in_recall);
4936 if (try_to_expire_client(clp)) {
4937 nn = net_generic(clp->net, nfsd_net_id);
4938 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
4939 }
4940
4941 /*
4942 * We don't want the locks code to timeout the lease for us;
4943 * we'll remove it ourself if a delegation isn't returned
4944 * in time:
4945 */
4946 fl->fl_break_time = 0;
4947
4948 fp->fi_had_conflict = true;
4949 nfsd_break_one_deleg(dp);
4950 return false;
4951 }
4952
4953 /**
4954 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4955 * @fl: Lock state to check
4956 *
4957 * Return values:
4958 * %true: Lease conflict was resolved
4959 * %false: Lease conflict was not resolved.
4960 */
nfsd_breaker_owns_lease(struct file_lock * fl)4961 static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4962 {
4963 struct nfs4_delegation *dl = fl->fl_owner;
4964 struct svc_rqst *rqst;
4965 struct nfs4_client *clp;
4966
4967 if (!i_am_nfsd())
4968 return false;
4969 rqst = kthread_data(current);
4970 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4971 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4972 return false;
4973 clp = *(rqst->rq_lease_breaker);
4974 return dl->dl_stid.sc_client == clp;
4975 }
4976
4977 static int
nfsd_change_deleg_cb(struct file_lock * onlist,int arg,struct list_head * dispose)4978 nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4979 struct list_head *dispose)
4980 {
4981 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
4982 struct nfs4_client *clp = dp->dl_stid.sc_client;
4983
4984 if (arg & F_UNLCK) {
4985 if (dp->dl_recalled)
4986 atomic_dec(&clp->cl_delegs_in_recall);
4987 return lease_modify(onlist, arg, dispose);
4988 } else
4989 return -EAGAIN;
4990 }
4991
4992 static const struct lock_manager_operations nfsd_lease_mng_ops = {
4993 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4994 .lm_break = nfsd_break_deleg_cb,
4995 .lm_change = nfsd_change_deleg_cb,
4996 };
4997
nfsd4_check_seqid(struct nfsd4_compound_state * cstate,struct nfs4_stateowner * so,u32 seqid)4998 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4999 {
5000 if (nfsd4_has_session(cstate))
5001 return nfs_ok;
5002 if (seqid == so->so_seqid - 1)
5003 return nfserr_replay_me;
5004 if (seqid == so->so_seqid)
5005 return nfs_ok;
5006 return nfserr_bad_seqid;
5007 }
5008
lookup_clientid(clientid_t * clid,bool sessions,struct nfsd_net * nn)5009 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5010 struct nfsd_net *nn)
5011 {
5012 struct nfs4_client *found;
5013
5014 spin_lock(&nn->client_lock);
5015 found = find_confirmed_client(clid, sessions, nn);
5016 if (found)
5017 atomic_inc(&found->cl_rpc_users);
5018 spin_unlock(&nn->client_lock);
5019 return found;
5020 }
5021
set_client(clientid_t * clid,struct nfsd4_compound_state * cstate,struct nfsd_net * nn)5022 static __be32 set_client(clientid_t *clid,
5023 struct nfsd4_compound_state *cstate,
5024 struct nfsd_net *nn)
5025 {
5026 if (cstate->clp) {
5027 if (!same_clid(&cstate->clp->cl_clientid, clid))
5028 return nfserr_stale_clientid;
5029 return nfs_ok;
5030 }
5031 if (STALE_CLIENTID(clid, nn))
5032 return nfserr_stale_clientid;
5033 /*
5034 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5035 * set cstate->clp), so session = false:
5036 */
5037 cstate->clp = lookup_clientid(clid, false, nn);
5038 if (!cstate->clp)
5039 return nfserr_expired;
5040 return nfs_ok;
5041 }
5042
5043 __be32
nfsd4_process_open1(struct nfsd4_compound_state * cstate,struct nfsd4_open * open,struct nfsd_net * nn)5044 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5045 struct nfsd4_open *open, struct nfsd_net *nn)
5046 {
5047 clientid_t *clientid = &open->op_clientid;
5048 struct nfs4_client *clp = NULL;
5049 unsigned int strhashval;
5050 struct nfs4_openowner *oo = NULL;
5051 __be32 status;
5052
5053 /*
5054 * In case we need it later, after we've already created the
5055 * file and don't want to risk a further failure:
5056 */
5057 open->op_file = nfsd4_alloc_file();
5058 if (open->op_file == NULL)
5059 return nfserr_jukebox;
5060
5061 status = set_client(clientid, cstate, nn);
5062 if (status)
5063 return status;
5064 clp = cstate->clp;
5065
5066 strhashval = ownerstr_hashval(&open->op_owner);
5067 oo = find_openstateowner_str(strhashval, open, clp);
5068 open->op_openowner = oo;
5069 if (!oo) {
5070 goto new_owner;
5071 }
5072 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5073 /* Replace unconfirmed owners without checking for replay. */
5074 release_openowner(oo);
5075 open->op_openowner = NULL;
5076 goto new_owner;
5077 }
5078 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5079 if (status)
5080 return status;
5081 goto alloc_stateid;
5082 new_owner:
5083 oo = alloc_init_open_stateowner(strhashval, open, cstate);
5084 if (oo == NULL)
5085 return nfserr_jukebox;
5086 open->op_openowner = oo;
5087 alloc_stateid:
5088 open->op_stp = nfs4_alloc_open_stateid(clp);
5089 if (!open->op_stp)
5090 return nfserr_jukebox;
5091
5092 if (nfsd4_has_session(cstate) &&
5093 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5094 open->op_odstate = alloc_clnt_odstate(clp);
5095 if (!open->op_odstate)
5096 return nfserr_jukebox;
5097 }
5098
5099 return nfs_ok;
5100 }
5101
5102 static inline __be32
nfs4_check_delegmode(struct nfs4_delegation * dp,int flags)5103 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5104 {
5105 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5106 return nfserr_openmode;
5107 else
5108 return nfs_ok;
5109 }
5110
share_access_to_flags(u32 share_access)5111 static int share_access_to_flags(u32 share_access)
5112 {
5113 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5114 }
5115
find_deleg_stateid(struct nfs4_client * cl,stateid_t * s)5116 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
5117 {
5118 struct nfs4_stid *ret;
5119
5120 ret = find_stateid_by_type(cl, s,
5121 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
5122 if (!ret)
5123 return NULL;
5124 return delegstateid(ret);
5125 }
5126
nfsd4_is_deleg_cur(struct nfsd4_open * open)5127 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5128 {
5129 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5130 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5131 }
5132
5133 static __be32
nfs4_check_deleg(struct nfs4_client * cl,struct nfsd4_open * open,struct nfs4_delegation ** dp)5134 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5135 struct nfs4_delegation **dp)
5136 {
5137 int flags;
5138 __be32 status = nfserr_bad_stateid;
5139 struct nfs4_delegation *deleg;
5140
5141 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5142 if (deleg == NULL)
5143 goto out;
5144 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
5145 nfs4_put_stid(&deleg->dl_stid);
5146 if (cl->cl_minorversion)
5147 status = nfserr_deleg_revoked;
5148 goto out;
5149 }
5150 flags = share_access_to_flags(open->op_share_access);
5151 status = nfs4_check_delegmode(deleg, flags);
5152 if (status) {
5153 nfs4_put_stid(&deleg->dl_stid);
5154 goto out;
5155 }
5156 *dp = deleg;
5157 out:
5158 if (!nfsd4_is_deleg_cur(open))
5159 return nfs_ok;
5160 if (status)
5161 return status;
5162 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5163 return nfs_ok;
5164 }
5165
nfs4_access_to_access(u32 nfs4_access)5166 static inline int nfs4_access_to_access(u32 nfs4_access)
5167 {
5168 int flags = 0;
5169
5170 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5171 flags |= NFSD_MAY_READ;
5172 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5173 flags |= NFSD_MAY_WRITE;
5174 return flags;
5175 }
5176
5177 static inline __be32
nfsd4_truncate(struct svc_rqst * rqstp,struct svc_fh * fh,struct nfsd4_open * open)5178 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5179 struct nfsd4_open *open)
5180 {
5181 struct iattr iattr = {
5182 .ia_valid = ATTR_SIZE,
5183 .ia_size = 0,
5184 };
5185 struct nfsd_attrs attrs = {
5186 .na_iattr = &iattr,
5187 };
5188 if (!open->op_truncate)
5189 return 0;
5190 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5191 return nfserr_inval;
5192 return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
5193 }
5194
nfs4_get_vfs_file(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open,bool new_stp)5195 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5196 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5197 struct nfsd4_open *open, bool new_stp)
5198 {
5199 struct nfsd_file *nf = NULL;
5200 __be32 status;
5201 int oflag = nfs4_access_to_omode(open->op_share_access);
5202 int access = nfs4_access_to_access(open->op_share_access);
5203 unsigned char old_access_bmap, old_deny_bmap;
5204
5205 spin_lock(&fp->fi_lock);
5206
5207 /*
5208 * Are we trying to set a deny mode that would conflict with
5209 * current access?
5210 */
5211 status = nfs4_file_check_deny(fp, open->op_share_deny);
5212 if (status != nfs_ok) {
5213 if (status != nfserr_share_denied) {
5214 spin_unlock(&fp->fi_lock);
5215 goto out;
5216 }
5217 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5218 stp, open->op_share_deny, false))
5219 status = nfserr_jukebox;
5220 spin_unlock(&fp->fi_lock);
5221 goto out;
5222 }
5223
5224 /* set access to the file */
5225 status = nfs4_file_get_access(fp, open->op_share_access);
5226 if (status != nfs_ok) {
5227 if (status != nfserr_share_denied) {
5228 spin_unlock(&fp->fi_lock);
5229 goto out;
5230 }
5231 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5232 stp, open->op_share_access, true))
5233 status = nfserr_jukebox;
5234 spin_unlock(&fp->fi_lock);
5235 goto out;
5236 }
5237
5238 /* Set access bits in stateid */
5239 old_access_bmap = stp->st_access_bmap;
5240 set_access(open->op_share_access, stp);
5241
5242 /* Set new deny mask */
5243 old_deny_bmap = stp->st_deny_bmap;
5244 set_deny(open->op_share_deny, stp);
5245 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5246
5247 if (!fp->fi_fds[oflag]) {
5248 spin_unlock(&fp->fi_lock);
5249
5250 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5251 open->op_filp, &nf);
5252 if (status != nfs_ok)
5253 goto out_put_access;
5254
5255 spin_lock(&fp->fi_lock);
5256 if (!fp->fi_fds[oflag]) {
5257 fp->fi_fds[oflag] = nf;
5258 nf = NULL;
5259 }
5260 }
5261 spin_unlock(&fp->fi_lock);
5262 if (nf)
5263 nfsd_file_put(nf);
5264
5265 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5266 access));
5267 if (status)
5268 goto out_put_access;
5269
5270 status = nfsd4_truncate(rqstp, cur_fh, open);
5271 if (status)
5272 goto out_put_access;
5273 out:
5274 return status;
5275 out_put_access:
5276 stp->st_access_bmap = old_access_bmap;
5277 nfs4_file_put_access(fp, open->op_share_access);
5278 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5279 goto out;
5280 }
5281
5282 static __be32
nfs4_upgrade_open(struct svc_rqst * rqstp,struct nfs4_file * fp,struct svc_fh * cur_fh,struct nfs4_ol_stateid * stp,struct nfsd4_open * open)5283 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5284 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5285 struct nfsd4_open *open)
5286 {
5287 __be32 status;
5288 unsigned char old_deny_bmap = stp->st_deny_bmap;
5289
5290 if (!test_access(open->op_share_access, stp))
5291 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5292
5293 /* test and set deny mode */
5294 spin_lock(&fp->fi_lock);
5295 status = nfs4_file_check_deny(fp, open->op_share_deny);
5296 switch (status) {
5297 case nfs_ok:
5298 set_deny(open->op_share_deny, stp);
5299 fp->fi_share_deny |=
5300 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5301 break;
5302 case nfserr_share_denied:
5303 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5304 stp, open->op_share_deny, false))
5305 status = nfserr_jukebox;
5306 break;
5307 }
5308 spin_unlock(&fp->fi_lock);
5309
5310 if (status != nfs_ok)
5311 return status;
5312
5313 status = nfsd4_truncate(rqstp, cur_fh, open);
5314 if (status != nfs_ok)
5315 reset_union_bmap_deny(old_deny_bmap, stp);
5316 return status;
5317 }
5318
5319 /* Should we give out recallable state?: */
nfsd4_cb_channel_good(struct nfs4_client * clp)5320 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5321 {
5322 if (clp->cl_cb_state == NFSD4_CB_UP)
5323 return true;
5324 /*
5325 * In the sessions case, since we don't have to establish a
5326 * separate connection for callbacks, we assume it's OK
5327 * until we hear otherwise:
5328 */
5329 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5330 }
5331
nfs4_alloc_init_lease(struct nfs4_delegation * dp,int flag)5332 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5333 int flag)
5334 {
5335 struct file_lock *fl;
5336
5337 fl = locks_alloc_lock();
5338 if (!fl)
5339 return NULL;
5340 fl->fl_lmops = &nfsd_lease_mng_ops;
5341 fl->fl_flags = FL_DELEG;
5342 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5343 fl->fl_end = OFFSET_MAX;
5344 fl->fl_owner = (fl_owner_t)dp;
5345 fl->fl_pid = current->tgid;
5346 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5347 return fl;
5348 }
5349
nfsd4_check_conflicting_opens(struct nfs4_client * clp,struct nfs4_file * fp)5350 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5351 struct nfs4_file *fp)
5352 {
5353 struct nfs4_ol_stateid *st;
5354 struct file *f = fp->fi_deleg_file->nf_file;
5355 struct inode *ino = file_inode(f);
5356 int writes;
5357
5358 writes = atomic_read(&ino->i_writecount);
5359 if (!writes)
5360 return 0;
5361 /*
5362 * There could be multiple filehandles (hence multiple
5363 * nfs4_files) referencing this file, but that's not too
5364 * common; let's just give up in that case rather than
5365 * trying to go look up all the clients using that other
5366 * nfs4_file as well:
5367 */
5368 if (fp->fi_aliased)
5369 return -EAGAIN;
5370 /*
5371 * If there's a close in progress, make sure that we see it
5372 * clear any fi_fds[] entries before we see it decrement
5373 * i_writecount:
5374 */
5375 smp_mb__after_atomic();
5376
5377 if (fp->fi_fds[O_WRONLY])
5378 writes--;
5379 if (fp->fi_fds[O_RDWR])
5380 writes--;
5381 if (writes > 0)
5382 return -EAGAIN; /* There may be non-NFSv4 writers */
5383 /*
5384 * It's possible there are non-NFSv4 write opens in progress,
5385 * but if they haven't incremented i_writecount yet then they
5386 * also haven't called break lease yet; so, they'll break this
5387 * lease soon enough. So, all that's left to check for is NFSv4
5388 * opens:
5389 */
5390 spin_lock(&fp->fi_lock);
5391 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5392 if (st->st_openstp == NULL /* it's an open */ &&
5393 access_permit_write(st) &&
5394 st->st_stid.sc_client != clp) {
5395 spin_unlock(&fp->fi_lock);
5396 return -EAGAIN;
5397 }
5398 }
5399 spin_unlock(&fp->fi_lock);
5400 /*
5401 * There's a small chance that we could be racing with another
5402 * NFSv4 open. However, any open that hasn't added itself to
5403 * the fi_stateids list also hasn't called break_lease yet; so,
5404 * they'll break this lease soon enough.
5405 */
5406 return 0;
5407 }
5408
5409 /*
5410 * It's possible that between opening the dentry and setting the delegation,
5411 * that it has been renamed or unlinked. Redo the lookup to verify that this
5412 * hasn't happened.
5413 */
5414 static int
nfsd4_verify_deleg_dentry(struct nfsd4_open * open,struct nfs4_file * fp,struct svc_fh * parent)5415 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5416 struct svc_fh *parent)
5417 {
5418 struct svc_export *exp;
5419 struct dentry *child;
5420 __be32 err;
5421
5422 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5423 open->op_fname, open->op_fnamelen,
5424 &exp, &child);
5425
5426 if (err)
5427 return -EAGAIN;
5428
5429 exp_put(exp);
5430 dput(child);
5431 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5432 return -EAGAIN;
5433
5434 return 0;
5435 }
5436
5437 /*
5438 * We avoid breaking delegations held by a client due to its own activity, but
5439 * clearing setuid/setgid bits on a write is an implicit activity and the client
5440 * may not notice and continue using the old mode. Avoid giving out a delegation
5441 * on setuid/setgid files when the client is requesting an open for write.
5442 */
5443 static int
nfsd4_verify_setuid_write(struct nfsd4_open * open,struct nfsd_file * nf)5444 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5445 {
5446 struct inode *inode = file_inode(nf->nf_file);
5447
5448 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5449 (inode->i_mode & (S_ISUID|S_ISGID)))
5450 return -EAGAIN;
5451 return 0;
5452 }
5453
5454 static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * parent)5455 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5456 struct svc_fh *parent)
5457 {
5458 int status = 0;
5459 struct nfs4_client *clp = stp->st_stid.sc_client;
5460 struct nfs4_file *fp = stp->st_stid.sc_file;
5461 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5462 struct nfs4_delegation *dp;
5463 struct nfsd_file *nf = NULL;
5464 struct file_lock *fl;
5465 u32 dl_type;
5466
5467 /*
5468 * The fi_had_conflict and nfs_get_existing_delegation checks
5469 * here are just optimizations; we'll need to recheck them at
5470 * the end:
5471 */
5472 if (fp->fi_had_conflict)
5473 return ERR_PTR(-EAGAIN);
5474
5475 /*
5476 * Try for a write delegation first. RFC8881 section 10.4 says:
5477 *
5478 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
5479 * on its own, all opens."
5480 *
5481 * Furthermore the client can use a write delegation for most READ
5482 * operations as well, so we require a O_RDWR file here.
5483 *
5484 * Offer a write delegation in the case of a BOTH open, and ensure
5485 * we get the O_RDWR descriptor.
5486 */
5487 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
5488 nf = find_rw_file(fp);
5489 dl_type = NFS4_OPEN_DELEGATE_WRITE;
5490 }
5491
5492 /*
5493 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
5494 * file for some reason, then try for a read delegation instead.
5495 */
5496 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
5497 nf = find_readable_file(fp);
5498 dl_type = NFS4_OPEN_DELEGATE_READ;
5499 }
5500
5501 if (!nf)
5502 return ERR_PTR(-EAGAIN);
5503
5504 spin_lock(&state_lock);
5505 spin_lock(&fp->fi_lock);
5506 if (nfs4_delegation_exists(clp, fp))
5507 status = -EAGAIN;
5508 else if (nfsd4_verify_setuid_write(open, nf))
5509 status = -EAGAIN;
5510 else if (!fp->fi_deleg_file) {
5511 fp->fi_deleg_file = nf;
5512 /* increment early to prevent fi_deleg_file from being
5513 * cleared */
5514 fp->fi_delegees = 1;
5515 nf = NULL;
5516 } else
5517 fp->fi_delegees++;
5518 spin_unlock(&fp->fi_lock);
5519 spin_unlock(&state_lock);
5520 if (nf)
5521 nfsd_file_put(nf);
5522 if (status)
5523 return ERR_PTR(status);
5524
5525 status = -ENOMEM;
5526 dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5527 if (!dp)
5528 goto out_delegees;
5529
5530 fl = nfs4_alloc_init_lease(dp, dl_type);
5531 if (!fl)
5532 goto out_clnt_odstate;
5533
5534 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5535 if (fl)
5536 locks_free_lock(fl);
5537 if (status)
5538 goto out_clnt_odstate;
5539
5540 if (parent) {
5541 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5542 if (status)
5543 goto out_unlock;
5544 }
5545
5546 status = nfsd4_check_conflicting_opens(clp, fp);
5547 if (status)
5548 goto out_unlock;
5549
5550 /*
5551 * Now that the deleg is set, check again to ensure that nothing
5552 * raced in and changed the mode while we weren't lookng.
5553 */
5554 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5555 if (status)
5556 goto out_unlock;
5557
5558 status = -EAGAIN;
5559 if (fp->fi_had_conflict)
5560 goto out_unlock;
5561
5562 spin_lock(&state_lock);
5563 spin_lock(&fp->fi_lock);
5564 status = hash_delegation_locked(dp, fp);
5565 spin_unlock(&fp->fi_lock);
5566 spin_unlock(&state_lock);
5567
5568 if (status)
5569 goto out_unlock;
5570
5571 return dp;
5572 out_unlock:
5573 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5574 out_clnt_odstate:
5575 put_clnt_odstate(dp->dl_clnt_odstate);
5576 nfs4_put_stid(&dp->dl_stid);
5577 out_delegees:
5578 put_deleg_file(fp);
5579 return ERR_PTR(status);
5580 }
5581
nfsd4_open_deleg_none_ext(struct nfsd4_open * open,int status)5582 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5583 {
5584 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5585 if (status == -EAGAIN)
5586 open->op_why_no_deleg = WND4_CONTENTION;
5587 else {
5588 open->op_why_no_deleg = WND4_RESOURCE;
5589 switch (open->op_deleg_want) {
5590 case NFS4_SHARE_WANT_READ_DELEG:
5591 case NFS4_SHARE_WANT_WRITE_DELEG:
5592 case NFS4_SHARE_WANT_ANY_DELEG:
5593 break;
5594 case NFS4_SHARE_WANT_CANCEL:
5595 open->op_why_no_deleg = WND4_CANCELLED;
5596 break;
5597 case NFS4_SHARE_WANT_NO_DELEG:
5598 WARN_ON_ONCE(1);
5599 }
5600 }
5601 }
5602
5603 /*
5604 * The Linux NFS server does not offer write delegations to NFSv4.0
5605 * clients in order to avoid conflicts between write delegations and
5606 * GETATTRs requesting CHANGE or SIZE attributes.
5607 *
5608 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
5609 * begins each COMPOUND contains a client ID. Delegation recall can
5610 * be avoided when the server recognizes the client sending a
5611 * GETATTR also holds write delegation it conflicts with.
5612 *
5613 * However, the NFSv4.0 protocol does not enable a server to
5614 * determine that a GETATTR originated from the client holding the
5615 * conflicting delegation versus coming from some other client. Per
5616 * RFC 7530 Section 16.7.5, the server must recall or send a
5617 * CB_GETATTR even when the GETATTR originates from the client that
5618 * holds the conflicting delegation.
5619 *
5620 * An NFSv4.0 client can trigger a pathological situation if it
5621 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
5622 * the same COMPOUND. COMPOUND execution will always stop at the
5623 * GETATTR and the DELEGRETURN will never get executed. The server
5624 * eventually revokes the delegation, which can result in loss of
5625 * open or lock state.
5626 */
5627 static void
nfs4_open_delegation(struct nfsd4_open * open,struct nfs4_ol_stateid * stp,struct svc_fh * currentfh)5628 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5629 struct svc_fh *currentfh)
5630 {
5631 struct nfs4_delegation *dp;
5632 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5633 struct nfs4_client *clp = stp->st_stid.sc_client;
5634 struct svc_fh *parent = NULL;
5635 int cb_up;
5636 int status = 0;
5637
5638 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5639 open->op_recall = false;
5640 switch (open->op_claim_type) {
5641 case NFS4_OPEN_CLAIM_PREVIOUS:
5642 if (!cb_up)
5643 open->op_recall = true;
5644 break;
5645 case NFS4_OPEN_CLAIM_NULL:
5646 parent = currentfh;
5647 fallthrough;
5648 case NFS4_OPEN_CLAIM_FH:
5649 /*
5650 * Let's not give out any delegations till everyone's
5651 * had the chance to reclaim theirs, *and* until
5652 * NLM locks have all been reclaimed:
5653 */
5654 if (locks_in_grace(clp->net))
5655 goto out_no_deleg;
5656 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5657 goto out_no_deleg;
5658 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
5659 !clp->cl_minorversion)
5660 goto out_no_deleg;
5661 break;
5662 default:
5663 goto out_no_deleg;
5664 }
5665 dp = nfs4_set_delegation(open, stp, parent);
5666 if (IS_ERR(dp))
5667 goto out_no_deleg;
5668
5669 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5670
5671 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
5672 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
5673 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
5674 } else {
5675 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5676 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5677 }
5678 nfs4_put_stid(&dp->dl_stid);
5679 return;
5680 out_no_deleg:
5681 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5682 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5683 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5684 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5685 open->op_recall = true;
5686 }
5687
5688 /* 4.1 client asking for a delegation? */
5689 if (open->op_deleg_want)
5690 nfsd4_open_deleg_none_ext(open, status);
5691 return;
5692 }
5693
nfsd4_deleg_xgrade_none_ext(struct nfsd4_open * open,struct nfs4_delegation * dp)5694 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5695 struct nfs4_delegation *dp)
5696 {
5697 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5698 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5699 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5700 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5701 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5702 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5703 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5704 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5705 }
5706 /* Otherwise the client must be confused wanting a delegation
5707 * it already has, therefore we don't return
5708 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5709 */
5710 }
5711
5712 /**
5713 * nfsd4_process_open2 - finish open processing
5714 * @rqstp: the RPC transaction being executed
5715 * @current_fh: NFSv4 COMPOUND's current filehandle
5716 * @open: OPEN arguments
5717 *
5718 * If successful, (1) truncate the file if open->op_truncate was
5719 * set, (2) set open->op_stateid, (3) set open->op_delegation.
5720 *
5721 * Returns %nfs_ok on success; otherwise an nfs4stat value in
5722 * network byte order is returned.
5723 */
5724 __be32
nfsd4_process_open2(struct svc_rqst * rqstp,struct svc_fh * current_fh,struct nfsd4_open * open)5725 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5726 {
5727 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5728 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5729 struct nfs4_file *fp = NULL;
5730 struct nfs4_ol_stateid *stp = NULL;
5731 struct nfs4_delegation *dp = NULL;
5732 __be32 status;
5733 bool new_stp = false;
5734
5735 /*
5736 * Lookup file; if found, lookup stateid and check open request,
5737 * and check for delegations in the process of being recalled.
5738 * If not found, create the nfs4_file struct
5739 */
5740 fp = nfsd4_file_hash_insert(open->op_file, current_fh);
5741 if (unlikely(!fp))
5742 return nfserr_jukebox;
5743 if (fp != open->op_file) {
5744 status = nfs4_check_deleg(cl, open, &dp);
5745 if (status)
5746 goto out;
5747 stp = nfsd4_find_and_lock_existing_open(fp, open);
5748 } else {
5749 open->op_file = NULL;
5750 status = nfserr_bad_stateid;
5751 if (nfsd4_is_deleg_cur(open))
5752 goto out;
5753 }
5754
5755 if (!stp) {
5756 stp = init_open_stateid(fp, open);
5757 if (!open->op_stp)
5758 new_stp = true;
5759 }
5760
5761 /*
5762 * OPEN the file, or upgrade an existing OPEN.
5763 * If truncate fails, the OPEN fails.
5764 *
5765 * stp is already locked.
5766 */
5767 if (!new_stp) {
5768 /* Stateid was found, this is an OPEN upgrade */
5769 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5770 if (status) {
5771 mutex_unlock(&stp->st_mutex);
5772 goto out;
5773 }
5774 } else {
5775 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
5776 if (status) {
5777 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5778 release_open_stateid(stp);
5779 mutex_unlock(&stp->st_mutex);
5780 goto out;
5781 }
5782
5783 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5784 open->op_odstate);
5785 if (stp->st_clnt_odstate == open->op_odstate)
5786 open->op_odstate = NULL;
5787 }
5788
5789 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5790 mutex_unlock(&stp->st_mutex);
5791
5792 if (nfsd4_has_session(&resp->cstate)) {
5793 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5794 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5795 open->op_why_no_deleg = WND4_NOT_WANTED;
5796 goto nodeleg;
5797 }
5798 }
5799
5800 /*
5801 * Attempt to hand out a delegation. No error return, because the
5802 * OPEN succeeds even if we fail.
5803 */
5804 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
5805 nodeleg:
5806 status = nfs_ok;
5807 trace_nfsd_open(&stp->st_stid.sc_stateid);
5808 out:
5809 /* 4.1 client trying to upgrade/downgrade delegation? */
5810 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5811 open->op_deleg_want)
5812 nfsd4_deleg_xgrade_none_ext(open, dp);
5813
5814 if (fp)
5815 put_nfs4_file(fp);
5816 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5817 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5818 /*
5819 * To finish the open response, we just need to set the rflags.
5820 */
5821 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5822 if (nfsd4_has_session(&resp->cstate))
5823 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5824 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5825 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5826
5827 if (dp)
5828 nfs4_put_stid(&dp->dl_stid);
5829 if (stp)
5830 nfs4_put_stid(&stp->st_stid);
5831
5832 return status;
5833 }
5834
nfsd4_cleanup_open_state(struct nfsd4_compound_state * cstate,struct nfsd4_open * open)5835 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5836 struct nfsd4_open *open)
5837 {
5838 if (open->op_openowner) {
5839 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5840
5841 nfsd4_cstate_assign_replay(cstate, so);
5842 nfs4_put_stateowner(so);
5843 }
5844 if (open->op_file)
5845 kmem_cache_free(file_slab, open->op_file);
5846 if (open->op_stp)
5847 nfs4_put_stid(&open->op_stp->st_stid);
5848 if (open->op_odstate)
5849 kmem_cache_free(odstate_slab, open->op_odstate);
5850 }
5851
5852 __be32
nfsd4_renew(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)5853 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5854 union nfsd4_op_u *u)
5855 {
5856 clientid_t *clid = &u->renew;
5857 struct nfs4_client *clp;
5858 __be32 status;
5859 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5860
5861 trace_nfsd_clid_renew(clid);
5862 status = set_client(clid, cstate, nn);
5863 if (status)
5864 return status;
5865 clp = cstate->clp;
5866 if (!list_empty(&clp->cl_delegations)
5867 && clp->cl_cb_state != NFSD4_CB_UP)
5868 return nfserr_cb_path_down;
5869 return nfs_ok;
5870 }
5871
5872 void
nfsd4_end_grace(struct nfsd_net * nn)5873 nfsd4_end_grace(struct nfsd_net *nn)
5874 {
5875 /* do nothing if grace period already ended */
5876 if (nn->grace_ended)
5877 return;
5878
5879 trace_nfsd_grace_complete(nn);
5880 nn->grace_ended = true;
5881 /*
5882 * If the server goes down again right now, an NFSv4
5883 * client will still be allowed to reclaim after it comes back up,
5884 * even if it hasn't yet had a chance to reclaim state this time.
5885 *
5886 */
5887 nfsd4_record_grace_done(nn);
5888 /*
5889 * At this point, NFSv4 clients can still reclaim. But if the
5890 * server crashes, any that have not yet reclaimed will be out
5891 * of luck on the next boot.
5892 *
5893 * (NFSv4.1+ clients are considered to have reclaimed once they
5894 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5895 * have reclaimed after their first OPEN.)
5896 */
5897 locks_end_grace(&nn->nfsd4_manager);
5898 /*
5899 * At this point, and once lockd and/or any other containers
5900 * exit their grace period, further reclaims will fail and
5901 * regular locking can resume.
5902 */
5903 }
5904
5905 /*
5906 * If we've waited a lease period but there are still clients trying to
5907 * reclaim, wait a little longer to give them a chance to finish.
5908 */
clients_still_reclaiming(struct nfsd_net * nn)5909 static bool clients_still_reclaiming(struct nfsd_net *nn)
5910 {
5911 time64_t double_grace_period_end = nn->boot_time +
5912 2 * nn->nfsd4_lease;
5913
5914 if (nn->track_reclaim_completes &&
5915 atomic_read(&nn->nr_reclaim_complete) ==
5916 nn->reclaim_str_hashtbl_size)
5917 return false;
5918 if (!nn->somebody_reclaimed)
5919 return false;
5920 nn->somebody_reclaimed = false;
5921 /*
5922 * If we've given them *two* lease times to reclaim, and they're
5923 * still not done, give up:
5924 */
5925 if (ktime_get_boottime_seconds() > double_grace_period_end)
5926 return false;
5927 return true;
5928 }
5929
5930 struct laundry_time {
5931 time64_t cutoff;
5932 time64_t new_timeo;
5933 };
5934
state_expired(struct laundry_time * lt,time64_t last_refresh)5935 static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5936 {
5937 time64_t time_remaining;
5938
5939 if (last_refresh < lt->cutoff)
5940 return true;
5941 time_remaining = last_refresh - lt->cutoff;
5942 lt->new_timeo = min(lt->new_timeo, time_remaining);
5943 return false;
5944 }
5945
5946 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(struct nfsd_net * nn)5947 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5948 {
5949 spin_lock_init(&nn->nfsd_ssc_lock);
5950 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5951 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5952 }
5953 EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5954
5955 /*
5956 * This is called when nfsd is being shutdown, after all inter_ssc
5957 * cleanup were done, to destroy the ssc delayed unmount list.
5958 */
nfsd4_ssc_shutdown_umount(struct nfsd_net * nn)5959 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5960 {
5961 struct nfsd4_ssc_umount_item *ni = NULL;
5962 struct nfsd4_ssc_umount_item *tmp;
5963
5964 spin_lock(&nn->nfsd_ssc_lock);
5965 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5966 list_del(&ni->nsui_list);
5967 spin_unlock(&nn->nfsd_ssc_lock);
5968 mntput(ni->nsui_vfsmount);
5969 kfree(ni);
5970 spin_lock(&nn->nfsd_ssc_lock);
5971 }
5972 spin_unlock(&nn->nfsd_ssc_lock);
5973 }
5974
nfsd4_ssc_expire_umount(struct nfsd_net * nn)5975 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5976 {
5977 bool do_wakeup = false;
5978 struct nfsd4_ssc_umount_item *ni = NULL;
5979 struct nfsd4_ssc_umount_item *tmp;
5980
5981 spin_lock(&nn->nfsd_ssc_lock);
5982 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5983 if (time_after(jiffies, ni->nsui_expire)) {
5984 if (refcount_read(&ni->nsui_refcnt) > 1)
5985 continue;
5986
5987 /* mark being unmount */
5988 ni->nsui_busy = true;
5989 spin_unlock(&nn->nfsd_ssc_lock);
5990 mntput(ni->nsui_vfsmount);
5991 spin_lock(&nn->nfsd_ssc_lock);
5992
5993 /* waiters need to start from begin of list */
5994 list_del(&ni->nsui_list);
5995 kfree(ni);
5996
5997 /* wakeup ssc_connect waiters */
5998 do_wakeup = true;
5999 continue;
6000 }
6001 break;
6002 }
6003 if (do_wakeup)
6004 wake_up_all(&nn->nfsd_ssc_waitq);
6005 spin_unlock(&nn->nfsd_ssc_lock);
6006 }
6007 #endif
6008
6009 /* Check if any lock belonging to this lockowner has any blockers */
6010 static bool
nfs4_lockowner_has_blockers(struct nfs4_lockowner * lo)6011 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
6012 {
6013 struct file_lock_context *ctx;
6014 struct nfs4_ol_stateid *stp;
6015 struct nfs4_file *nf;
6016
6017 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
6018 nf = stp->st_stid.sc_file;
6019 ctx = locks_inode_context(nf->fi_inode);
6020 if (!ctx)
6021 continue;
6022 if (locks_owner_has_blockers(ctx, lo))
6023 return true;
6024 }
6025 return false;
6026 }
6027
6028 static bool
nfs4_anylock_blockers(struct nfs4_client * clp)6029 nfs4_anylock_blockers(struct nfs4_client *clp)
6030 {
6031 int i;
6032 struct nfs4_stateowner *so;
6033 struct nfs4_lockowner *lo;
6034
6035 if (atomic_read(&clp->cl_delegs_in_recall))
6036 return true;
6037 spin_lock(&clp->cl_lock);
6038 for (i = 0; i < OWNER_HASH_SIZE; i++) {
6039 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6040 so_strhash) {
6041 if (so->so_is_open_owner)
6042 continue;
6043 lo = lockowner(so);
6044 if (nfs4_lockowner_has_blockers(lo)) {
6045 spin_unlock(&clp->cl_lock);
6046 return true;
6047 }
6048 }
6049 }
6050 spin_unlock(&clp->cl_lock);
6051 return false;
6052 }
6053
6054 static void
nfs4_get_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist,struct laundry_time * lt)6055 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
6056 struct laundry_time *lt)
6057 {
6058 unsigned int maxreap, reapcnt = 0;
6059 struct list_head *pos, *next;
6060 struct nfs4_client *clp;
6061
6062 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
6063 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
6064 INIT_LIST_HEAD(reaplist);
6065 spin_lock(&nn->client_lock);
6066 list_for_each_safe(pos, next, &nn->client_lru) {
6067 clp = list_entry(pos, struct nfs4_client, cl_lru);
6068 if (clp->cl_state == NFSD4_EXPIRABLE)
6069 goto exp_client;
6070 if (!state_expired(lt, clp->cl_time))
6071 break;
6072 if (!atomic_read(&clp->cl_rpc_users)) {
6073 if (clp->cl_state == NFSD4_ACTIVE)
6074 atomic_inc(&nn->nfsd_courtesy_clients);
6075 clp->cl_state = NFSD4_COURTESY;
6076 }
6077 if (!client_has_state(clp))
6078 goto exp_client;
6079 if (!nfs4_anylock_blockers(clp))
6080 if (reapcnt >= maxreap)
6081 continue;
6082 exp_client:
6083 if (!mark_client_expired_locked(clp)) {
6084 list_add(&clp->cl_lru, reaplist);
6085 reapcnt++;
6086 }
6087 }
6088 spin_unlock(&nn->client_lock);
6089 }
6090
6091 static void
nfs4_get_courtesy_client_reaplist(struct nfsd_net * nn,struct list_head * reaplist)6092 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6093 struct list_head *reaplist)
6094 {
6095 unsigned int maxreap = 0, reapcnt = 0;
6096 struct list_head *pos, *next;
6097 struct nfs4_client *clp;
6098
6099 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6100 INIT_LIST_HEAD(reaplist);
6101
6102 spin_lock(&nn->client_lock);
6103 list_for_each_safe(pos, next, &nn->client_lru) {
6104 clp = list_entry(pos, struct nfs4_client, cl_lru);
6105 if (clp->cl_state == NFSD4_ACTIVE)
6106 break;
6107 if (reapcnt >= maxreap)
6108 break;
6109 if (!mark_client_expired_locked(clp)) {
6110 list_add(&clp->cl_lru, reaplist);
6111 reapcnt++;
6112 }
6113 }
6114 spin_unlock(&nn->client_lock);
6115 }
6116
6117 static void
nfs4_process_client_reaplist(struct list_head * reaplist)6118 nfs4_process_client_reaplist(struct list_head *reaplist)
6119 {
6120 struct list_head *pos, *next;
6121 struct nfs4_client *clp;
6122
6123 list_for_each_safe(pos, next, reaplist) {
6124 clp = list_entry(pos, struct nfs4_client, cl_lru);
6125 trace_nfsd_clid_purged(&clp->cl_clientid);
6126 list_del_init(&clp->cl_lru);
6127 expire_client(clp);
6128 }
6129 }
6130
6131 static time64_t
nfs4_laundromat(struct nfsd_net * nn)6132 nfs4_laundromat(struct nfsd_net *nn)
6133 {
6134 struct nfs4_openowner *oo;
6135 struct nfs4_delegation *dp;
6136 struct nfs4_ol_stateid *stp;
6137 struct nfsd4_blocked_lock *nbl;
6138 struct list_head *pos, *next, reaplist;
6139 struct laundry_time lt = {
6140 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6141 .new_timeo = nn->nfsd4_lease
6142 };
6143 struct nfs4_cpntf_state *cps;
6144 copy_stateid_t *cps_t;
6145 int i;
6146
6147 if (clients_still_reclaiming(nn)) {
6148 lt.new_timeo = 0;
6149 goto out;
6150 }
6151 nfsd4_end_grace(nn);
6152
6153 spin_lock(&nn->s2s_cp_lock);
6154 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6155 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6156 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6157 state_expired(<, cps->cpntf_time))
6158 _free_cpntf_state_locked(nn, cps);
6159 }
6160 spin_unlock(&nn->s2s_cp_lock);
6161 nfs4_get_client_reaplist(nn, &reaplist, <);
6162 nfs4_process_client_reaplist(&reaplist);
6163
6164 spin_lock(&state_lock);
6165 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6166 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6167 if (!state_expired(<, dp->dl_time))
6168 break;
6169 WARN_ON(!unhash_delegation_locked(dp));
6170 list_add(&dp->dl_recall_lru, &reaplist);
6171 }
6172 spin_unlock(&state_lock);
6173 while (!list_empty(&reaplist)) {
6174 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6175 dl_recall_lru);
6176 list_del_init(&dp->dl_recall_lru);
6177 revoke_delegation(dp);
6178 }
6179
6180 spin_lock(&nn->client_lock);
6181 while (!list_empty(&nn->close_lru)) {
6182 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6183 oo_close_lru);
6184 if (!state_expired(<, oo->oo_time))
6185 break;
6186 list_del_init(&oo->oo_close_lru);
6187 stp = oo->oo_last_closed_stid;
6188 oo->oo_last_closed_stid = NULL;
6189 spin_unlock(&nn->client_lock);
6190 nfs4_put_stid(&stp->st_stid);
6191 spin_lock(&nn->client_lock);
6192 }
6193 spin_unlock(&nn->client_lock);
6194
6195 /*
6196 * It's possible for a client to try and acquire an already held lock
6197 * that is being held for a long time, and then lose interest in it.
6198 * So, we clean out any un-revisited request after a lease period
6199 * under the assumption that the client is no longer interested.
6200 *
6201 * RFC5661, sec. 9.6 states that the client must not rely on getting
6202 * notifications and must continue to poll for locks, even when the
6203 * server supports them. Thus this shouldn't lead to clients blocking
6204 * indefinitely once the lock does become free.
6205 */
6206 BUG_ON(!list_empty(&reaplist));
6207 spin_lock(&nn->blocked_locks_lock);
6208 while (!list_empty(&nn->blocked_locks_lru)) {
6209 nbl = list_first_entry(&nn->blocked_locks_lru,
6210 struct nfsd4_blocked_lock, nbl_lru);
6211 if (!state_expired(<, nbl->nbl_time))
6212 break;
6213 list_move(&nbl->nbl_lru, &reaplist);
6214 list_del_init(&nbl->nbl_list);
6215 }
6216 spin_unlock(&nn->blocked_locks_lock);
6217
6218 while (!list_empty(&reaplist)) {
6219 nbl = list_first_entry(&reaplist,
6220 struct nfsd4_blocked_lock, nbl_lru);
6221 list_del_init(&nbl->nbl_lru);
6222 free_blocked_lock(nbl);
6223 }
6224 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
6225 /* service the server-to-server copy delayed unmount list */
6226 nfsd4_ssc_expire_umount(nn);
6227 #endif
6228 out:
6229 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6230 }
6231
6232 static void laundromat_main(struct work_struct *);
6233
6234 static void
laundromat_main(struct work_struct * laundry)6235 laundromat_main(struct work_struct *laundry)
6236 {
6237 time64_t t;
6238 struct delayed_work *dwork = to_delayed_work(laundry);
6239 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6240 laundromat_work);
6241
6242 t = nfs4_laundromat(nn);
6243 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6244 }
6245
6246 static void
courtesy_client_reaper(struct nfsd_net * nn)6247 courtesy_client_reaper(struct nfsd_net *nn)
6248 {
6249 struct list_head reaplist;
6250
6251 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6252 nfs4_process_client_reaplist(&reaplist);
6253 }
6254
6255 static void
deleg_reaper(struct nfsd_net * nn)6256 deleg_reaper(struct nfsd_net *nn)
6257 {
6258 struct list_head *pos, *next;
6259 struct nfs4_client *clp;
6260 struct list_head cblist;
6261
6262 INIT_LIST_HEAD(&cblist);
6263 spin_lock(&nn->client_lock);
6264 list_for_each_safe(pos, next, &nn->client_lru) {
6265 clp = list_entry(pos, struct nfs4_client, cl_lru);
6266 if (clp->cl_state != NFSD4_ACTIVE ||
6267 list_empty(&clp->cl_delegations) ||
6268 atomic_read(&clp->cl_delegs_in_recall) ||
6269 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6270 (ktime_get_boottime_seconds() -
6271 clp->cl_ra_time < 5)) {
6272 continue;
6273 }
6274 list_add(&clp->cl_ra_cblist, &cblist);
6275
6276 /* release in nfsd4_cb_recall_any_release */
6277 atomic_inc(&clp->cl_rpc_users);
6278 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6279 clp->cl_ra_time = ktime_get_boottime_seconds();
6280 }
6281 spin_unlock(&nn->client_lock);
6282
6283 while (!list_empty(&cblist)) {
6284 clp = list_first_entry(&cblist, struct nfs4_client,
6285 cl_ra_cblist);
6286 list_del_init(&clp->cl_ra_cblist);
6287 clp->cl_ra->ra_keep = 0;
6288 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
6289 trace_nfsd_cb_recall_any(clp->cl_ra);
6290 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6291 }
6292 }
6293
6294 static void
nfsd4_state_shrinker_worker(struct work_struct * work)6295 nfsd4_state_shrinker_worker(struct work_struct *work)
6296 {
6297 struct nfsd_net *nn = container_of(work, struct nfsd_net,
6298 nfsd_shrinker_work);
6299
6300 courtesy_client_reaper(nn);
6301 deleg_reaper(nn);
6302 }
6303
nfs4_check_fh(struct svc_fh * fhp,struct nfs4_stid * stp)6304 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6305 {
6306 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6307 return nfserr_bad_stateid;
6308 return nfs_ok;
6309 }
6310
6311 static
nfs4_check_openmode(struct nfs4_ol_stateid * stp,int flags)6312 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6313 {
6314 __be32 status = nfserr_openmode;
6315
6316 /* For lock stateid's, we test the parent open, not the lock: */
6317 if (stp->st_openstp)
6318 stp = stp->st_openstp;
6319 if ((flags & WR_STATE) && !access_permit_write(stp))
6320 goto out;
6321 if ((flags & RD_STATE) && !access_permit_read(stp))
6322 goto out;
6323 status = nfs_ok;
6324 out:
6325 return status;
6326 }
6327
6328 static inline __be32
check_special_stateids(struct net * net,svc_fh * current_fh,stateid_t * stateid,int flags)6329 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6330 {
6331 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6332 return nfs_ok;
6333 else if (opens_in_grace(net)) {
6334 /* Answer in remaining cases depends on existence of
6335 * conflicting state; so we must wait out the grace period. */
6336 return nfserr_grace;
6337 } else if (flags & WR_STATE)
6338 return nfs4_share_conflict(current_fh,
6339 NFS4_SHARE_DENY_WRITE);
6340 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6341 return nfs4_share_conflict(current_fh,
6342 NFS4_SHARE_DENY_READ);
6343 }
6344
check_stateid_generation(stateid_t * in,stateid_t * ref,bool has_session)6345 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6346 {
6347 /*
6348 * When sessions are used the stateid generation number is ignored
6349 * when it is zero.
6350 */
6351 if (has_session && in->si_generation == 0)
6352 return nfs_ok;
6353
6354 if (in->si_generation == ref->si_generation)
6355 return nfs_ok;
6356
6357 /* If the client sends us a stateid from the future, it's buggy: */
6358 if (nfsd4_stateid_generation_after(in, ref))
6359 return nfserr_bad_stateid;
6360 /*
6361 * However, we could see a stateid from the past, even from a
6362 * non-buggy client. For example, if the client sends a lock
6363 * while some IO is outstanding, the lock may bump si_generation
6364 * while the IO is still in flight. The client could avoid that
6365 * situation by waiting for responses on all the IO requests,
6366 * but better performance may result in retrying IO that
6367 * receives an old_stateid error if requests are rarely
6368 * reordered in flight:
6369 */
6370 return nfserr_old_stateid;
6371 }
6372
nfsd4_stid_check_stateid_generation(stateid_t * in,struct nfs4_stid * s,bool has_session)6373 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6374 {
6375 __be32 ret;
6376
6377 spin_lock(&s->sc_lock);
6378 ret = nfsd4_verify_open_stid(s);
6379 if (ret == nfs_ok)
6380 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6381 spin_unlock(&s->sc_lock);
6382 return ret;
6383 }
6384
nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid * ols)6385 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6386 {
6387 if (ols->st_stateowner->so_is_open_owner &&
6388 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6389 return nfserr_bad_stateid;
6390 return nfs_ok;
6391 }
6392
nfsd4_validate_stateid(struct nfs4_client * cl,stateid_t * stateid)6393 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6394 {
6395 struct nfs4_stid *s;
6396 __be32 status = nfserr_bad_stateid;
6397
6398 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6399 CLOSE_STATEID(stateid))
6400 return status;
6401 spin_lock(&cl->cl_lock);
6402 s = find_stateid_locked(cl, stateid);
6403 if (!s)
6404 goto out_unlock;
6405 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6406 if (status)
6407 goto out_unlock;
6408 switch (s->sc_type) {
6409 case NFS4_DELEG_STID:
6410 status = nfs_ok;
6411 break;
6412 case NFS4_REVOKED_DELEG_STID:
6413 status = nfserr_deleg_revoked;
6414 break;
6415 case NFS4_OPEN_STID:
6416 case NFS4_LOCK_STID:
6417 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6418 break;
6419 default:
6420 printk("unknown stateid type %x\n", s->sc_type);
6421 fallthrough;
6422 case NFS4_CLOSED_STID:
6423 case NFS4_CLOSED_DELEG_STID:
6424 status = nfserr_bad_stateid;
6425 }
6426 out_unlock:
6427 spin_unlock(&cl->cl_lock);
6428 return status;
6429 }
6430
6431 __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid,unsigned char typemask,struct nfs4_stid ** s,struct nfsd_net * nn)6432 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6433 stateid_t *stateid, unsigned char typemask,
6434 struct nfs4_stid **s, struct nfsd_net *nn)
6435 {
6436 __be32 status;
6437 struct nfs4_stid *stid;
6438 bool return_revoked = false;
6439
6440 /*
6441 * only return revoked delegations if explicitly asked.
6442 * otherwise we report revoked or bad_stateid status.
6443 */
6444 if (typemask & NFS4_REVOKED_DELEG_STID)
6445 return_revoked = true;
6446 else if (typemask & NFS4_DELEG_STID)
6447 typemask |= NFS4_REVOKED_DELEG_STID;
6448
6449 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6450 CLOSE_STATEID(stateid))
6451 return nfserr_bad_stateid;
6452 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6453 if (status == nfserr_stale_clientid) {
6454 if (cstate->session)
6455 return nfserr_bad_stateid;
6456 return nfserr_stale_stateid;
6457 }
6458 if (status)
6459 return status;
6460 stid = find_stateid_by_type(cstate->clp, stateid, typemask);
6461 if (!stid)
6462 return nfserr_bad_stateid;
6463 if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6464 nfs4_put_stid(stid);
6465 if (cstate->minorversion)
6466 return nfserr_deleg_revoked;
6467 return nfserr_bad_stateid;
6468 }
6469 *s = stid;
6470 return nfs_ok;
6471 }
6472
6473 static struct nfsd_file *
nfs4_find_file(struct nfs4_stid * s,int flags)6474 nfs4_find_file(struct nfs4_stid *s, int flags)
6475 {
6476 struct nfsd_file *ret = NULL;
6477
6478 if (!s)
6479 return NULL;
6480
6481 switch (s->sc_type) {
6482 case NFS4_DELEG_STID:
6483 spin_lock(&s->sc_file->fi_lock);
6484 ret = nfsd_file_get(s->sc_file->fi_deleg_file);
6485 spin_unlock(&s->sc_file->fi_lock);
6486 break;
6487 case NFS4_OPEN_STID:
6488 case NFS4_LOCK_STID:
6489 if (flags & RD_STATE)
6490 ret = find_readable_file(s->sc_file);
6491 else
6492 ret = find_writeable_file(s->sc_file);
6493 }
6494
6495 return ret;
6496 }
6497
6498 static __be32
nfs4_check_olstateid(struct nfs4_ol_stateid * ols,int flags)6499 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6500 {
6501 __be32 status;
6502
6503 status = nfsd4_check_openowner_confirmed(ols);
6504 if (status)
6505 return status;
6506 return nfs4_check_openmode(ols, flags);
6507 }
6508
6509 static __be32
nfs4_check_file(struct svc_rqst * rqstp,struct svc_fh * fhp,struct nfs4_stid * s,struct nfsd_file ** nfp,int flags)6510 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6511 struct nfsd_file **nfp, int flags)
6512 {
6513 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6514 struct nfsd_file *nf;
6515 __be32 status;
6516
6517 nf = nfs4_find_file(s, flags);
6518 if (nf) {
6519 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6520 acc | NFSD_MAY_OWNER_OVERRIDE);
6521 if (status) {
6522 nfsd_file_put(nf);
6523 goto out;
6524 }
6525 } else {
6526 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6527 if (status)
6528 return status;
6529 }
6530 *nfp = nf;
6531 out:
6532 return status;
6533 }
6534 static void
_free_cpntf_state_locked(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)6535 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6536 {
6537 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6538 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6539 return;
6540 list_del(&cps->cp_list);
6541 idr_remove(&nn->s2s_cp_stateids,
6542 cps->cp_stateid.cs_stid.si_opaque.so_id);
6543 kfree(cps);
6544 }
6545 /*
6546 * A READ from an inter server to server COPY will have a
6547 * copy stateid. Look up the copy notify stateid from the
6548 * idr structure and take a reference on it.
6549 */
manage_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_client * clp,struct nfs4_cpntf_state ** cps)6550 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6551 struct nfs4_client *clp,
6552 struct nfs4_cpntf_state **cps)
6553 {
6554 copy_stateid_t *cps_t;
6555 struct nfs4_cpntf_state *state = NULL;
6556
6557 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6558 return nfserr_bad_stateid;
6559 spin_lock(&nn->s2s_cp_lock);
6560 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6561 if (cps_t) {
6562 state = container_of(cps_t, struct nfs4_cpntf_state,
6563 cp_stateid);
6564 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6565 state = NULL;
6566 goto unlock;
6567 }
6568 if (!clp)
6569 refcount_inc(&state->cp_stateid.cs_count);
6570 else
6571 _free_cpntf_state_locked(nn, state);
6572 }
6573 unlock:
6574 spin_unlock(&nn->s2s_cp_lock);
6575 if (!state)
6576 return nfserr_bad_stateid;
6577 if (!clp)
6578 *cps = state;
6579 return 0;
6580 }
6581
find_cpntf_state(struct nfsd_net * nn,stateid_t * st,struct nfs4_stid ** stid)6582 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6583 struct nfs4_stid **stid)
6584 {
6585 __be32 status;
6586 struct nfs4_cpntf_state *cps = NULL;
6587 struct nfs4_client *found;
6588
6589 status = manage_cpntf_state(nn, st, NULL, &cps);
6590 if (status)
6591 return status;
6592
6593 cps->cpntf_time = ktime_get_boottime_seconds();
6594
6595 status = nfserr_expired;
6596 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6597 if (!found)
6598 goto out;
6599
6600 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6601 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6602 if (*stid)
6603 status = nfs_ok;
6604 else
6605 status = nfserr_bad_stateid;
6606
6607 put_client_renew(found);
6608 out:
6609 nfs4_put_cpntf_state(nn, cps);
6610 return status;
6611 }
6612
nfs4_put_cpntf_state(struct nfsd_net * nn,struct nfs4_cpntf_state * cps)6613 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6614 {
6615 spin_lock(&nn->s2s_cp_lock);
6616 _free_cpntf_state_locked(nn, cps);
6617 spin_unlock(&nn->s2s_cp_lock);
6618 }
6619
6620 /**
6621 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
6622 * @rqstp: incoming request from client
6623 * @cstate: current compound state
6624 * @fhp: filehandle associated with requested stateid
6625 * @stateid: stateid (provided by client)
6626 * @flags: flags describing type of operation to be done
6627 * @nfp: optional nfsd_file return pointer (may be NULL)
6628 * @cstid: optional returned nfs4_stid pointer (may be NULL)
6629 *
6630 * Given info from the client, look up a nfs4_stid for the operation. On
6631 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
6632 * associated with it.
6633 */
6634 __be32
nfs4_preprocess_stateid_op(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,struct svc_fh * fhp,stateid_t * stateid,int flags,struct nfsd_file ** nfp,struct nfs4_stid ** cstid)6635 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6636 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6637 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6638 struct nfs4_stid **cstid)
6639 {
6640 struct net *net = SVC_NET(rqstp);
6641 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6642 struct nfs4_stid *s = NULL;
6643 __be32 status;
6644
6645 if (nfp)
6646 *nfp = NULL;
6647
6648 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6649 if (cstid)
6650 status = nfserr_bad_stateid;
6651 else
6652 status = check_special_stateids(net, fhp, stateid,
6653 flags);
6654 goto done;
6655 }
6656
6657 status = nfsd4_lookup_stateid(cstate, stateid,
6658 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6659 &s, nn);
6660 if (status == nfserr_bad_stateid)
6661 status = find_cpntf_state(nn, stateid, &s);
6662 if (status)
6663 return status;
6664 status = nfsd4_stid_check_stateid_generation(stateid, s,
6665 nfsd4_has_session(cstate));
6666 if (status)
6667 goto out;
6668
6669 switch (s->sc_type) {
6670 case NFS4_DELEG_STID:
6671 status = nfs4_check_delegmode(delegstateid(s), flags);
6672 break;
6673 case NFS4_OPEN_STID:
6674 case NFS4_LOCK_STID:
6675 status = nfs4_check_olstateid(openlockstateid(s), flags);
6676 break;
6677 default:
6678 status = nfserr_bad_stateid;
6679 break;
6680 }
6681 if (status)
6682 goto out;
6683 status = nfs4_check_fh(fhp, s);
6684
6685 done:
6686 if (status == nfs_ok && nfp)
6687 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6688 out:
6689 if (s) {
6690 if (!status && cstid)
6691 *cstid = s;
6692 else
6693 nfs4_put_stid(s);
6694 }
6695 return status;
6696 }
6697
6698 /*
6699 * Test if the stateid is valid
6700 */
6701 __be32
nfsd4_test_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6702 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6703 union nfsd4_op_u *u)
6704 {
6705 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6706 struct nfsd4_test_stateid_id *stateid;
6707 struct nfs4_client *cl = cstate->clp;
6708
6709 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6710 stateid->ts_id_status =
6711 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6712
6713 return nfs_ok;
6714 }
6715
6716 static __be32
nfsd4_free_lock_stateid(stateid_t * stateid,struct nfs4_stid * s)6717 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6718 {
6719 struct nfs4_ol_stateid *stp = openlockstateid(s);
6720 __be32 ret;
6721
6722 ret = nfsd4_lock_ol_stateid(stp);
6723 if (ret)
6724 goto out_put_stid;
6725
6726 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6727 if (ret)
6728 goto out;
6729
6730 ret = nfserr_locks_held;
6731 if (check_for_locks(stp->st_stid.sc_file,
6732 lockowner(stp->st_stateowner)))
6733 goto out;
6734
6735 release_lock_stateid(stp);
6736 ret = nfs_ok;
6737
6738 out:
6739 mutex_unlock(&stp->st_mutex);
6740 out_put_stid:
6741 nfs4_put_stid(s);
6742 return ret;
6743 }
6744
6745 __be32
nfsd4_free_stateid(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6746 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6747 union nfsd4_op_u *u)
6748 {
6749 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6750 stateid_t *stateid = &free_stateid->fr_stateid;
6751 struct nfs4_stid *s;
6752 struct nfs4_delegation *dp;
6753 struct nfs4_client *cl = cstate->clp;
6754 __be32 ret = nfserr_bad_stateid;
6755
6756 spin_lock(&cl->cl_lock);
6757 s = find_stateid_locked(cl, stateid);
6758 if (!s)
6759 goto out_unlock;
6760 spin_lock(&s->sc_lock);
6761 switch (s->sc_type) {
6762 case NFS4_DELEG_STID:
6763 ret = nfserr_locks_held;
6764 break;
6765 case NFS4_OPEN_STID:
6766 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6767 if (ret)
6768 break;
6769 ret = nfserr_locks_held;
6770 break;
6771 case NFS4_LOCK_STID:
6772 spin_unlock(&s->sc_lock);
6773 refcount_inc(&s->sc_count);
6774 spin_unlock(&cl->cl_lock);
6775 ret = nfsd4_free_lock_stateid(stateid, s);
6776 goto out;
6777 case NFS4_REVOKED_DELEG_STID:
6778 spin_unlock(&s->sc_lock);
6779 dp = delegstateid(s);
6780 list_del_init(&dp->dl_recall_lru);
6781 spin_unlock(&cl->cl_lock);
6782 nfs4_put_stid(s);
6783 ret = nfs_ok;
6784 goto out;
6785 /* Default falls through and returns nfserr_bad_stateid */
6786 }
6787 spin_unlock(&s->sc_lock);
6788 out_unlock:
6789 spin_unlock(&cl->cl_lock);
6790 out:
6791 return ret;
6792 }
6793
6794 static inline int
setlkflg(int type)6795 setlkflg (int type)
6796 {
6797 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6798 RD_STATE : WR_STATE;
6799 }
6800
nfs4_seqid_op_checks(struct nfsd4_compound_state * cstate,stateid_t * stateid,u32 seqid,struct nfs4_ol_stateid * stp)6801 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6802 {
6803 struct svc_fh *current_fh = &cstate->current_fh;
6804 struct nfs4_stateowner *sop = stp->st_stateowner;
6805 __be32 status;
6806
6807 status = nfsd4_check_seqid(cstate, sop, seqid);
6808 if (status)
6809 return status;
6810 status = nfsd4_lock_ol_stateid(stp);
6811 if (status != nfs_ok)
6812 return status;
6813 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6814 if (status == nfs_ok)
6815 status = nfs4_check_fh(current_fh, &stp->st_stid);
6816 if (status != nfs_ok)
6817 mutex_unlock(&stp->st_mutex);
6818 return status;
6819 }
6820
6821 /**
6822 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
6823 * @cstate: compund state
6824 * @seqid: seqid (provided by client)
6825 * @stateid: stateid (provided by client)
6826 * @typemask: mask of allowable types for this operation
6827 * @stpp: return pointer for the stateid found
6828 * @nn: net namespace for request
6829 *
6830 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
6831 * return it in @stpp. On a nfs_ok return, the returned stateid will
6832 * have its st_mutex locked.
6833 */
6834 static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,char typemask,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6835 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6836 stateid_t *stateid, char typemask,
6837 struct nfs4_ol_stateid **stpp,
6838 struct nfsd_net *nn)
6839 {
6840 __be32 status;
6841 struct nfs4_stid *s;
6842 struct nfs4_ol_stateid *stp = NULL;
6843
6844 trace_nfsd_preprocess(seqid, stateid);
6845
6846 *stpp = NULL;
6847 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6848 if (status)
6849 return status;
6850 stp = openlockstateid(s);
6851 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6852
6853 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6854 if (!status)
6855 *stpp = stp;
6856 else
6857 nfs4_put_stid(&stp->st_stid);
6858 return status;
6859 }
6860
nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state * cstate,u32 seqid,stateid_t * stateid,struct nfs4_ol_stateid ** stpp,struct nfsd_net * nn)6861 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6862 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6863 {
6864 __be32 status;
6865 struct nfs4_openowner *oo;
6866 struct nfs4_ol_stateid *stp;
6867
6868 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6869 NFS4_OPEN_STID, &stp, nn);
6870 if (status)
6871 return status;
6872 oo = openowner(stp->st_stateowner);
6873 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6874 mutex_unlock(&stp->st_mutex);
6875 nfs4_put_stid(&stp->st_stid);
6876 return nfserr_bad_stateid;
6877 }
6878 *stpp = stp;
6879 return nfs_ok;
6880 }
6881
6882 __be32
nfsd4_open_confirm(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6883 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6884 union nfsd4_op_u *u)
6885 {
6886 struct nfsd4_open_confirm *oc = &u->open_confirm;
6887 __be32 status;
6888 struct nfs4_openowner *oo;
6889 struct nfs4_ol_stateid *stp;
6890 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6891
6892 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6893 cstate->current_fh.fh_dentry);
6894
6895 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6896 if (status)
6897 return status;
6898
6899 status = nfs4_preprocess_seqid_op(cstate,
6900 oc->oc_seqid, &oc->oc_req_stateid,
6901 NFS4_OPEN_STID, &stp, nn);
6902 if (status)
6903 goto out;
6904 oo = openowner(stp->st_stateowner);
6905 status = nfserr_bad_stateid;
6906 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6907 mutex_unlock(&stp->st_mutex);
6908 goto put_stateid;
6909 }
6910 oo->oo_flags |= NFS4_OO_CONFIRMED;
6911 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6912 mutex_unlock(&stp->st_mutex);
6913 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6914 nfsd4_client_record_create(oo->oo_owner.so_client);
6915 status = nfs_ok;
6916 put_stateid:
6917 nfs4_put_stid(&stp->st_stid);
6918 out:
6919 nfsd4_bump_seqid(cstate, status);
6920 return status;
6921 }
6922
nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid * stp,u32 access)6923 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6924 {
6925 if (!test_access(access, stp))
6926 return;
6927 nfs4_file_put_access(stp->st_stid.sc_file, access);
6928 clear_access(access, stp);
6929 }
6930
nfs4_stateid_downgrade(struct nfs4_ol_stateid * stp,u32 to_access)6931 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6932 {
6933 switch (to_access) {
6934 case NFS4_SHARE_ACCESS_READ:
6935 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6936 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6937 break;
6938 case NFS4_SHARE_ACCESS_WRITE:
6939 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6940 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6941 break;
6942 case NFS4_SHARE_ACCESS_BOTH:
6943 break;
6944 default:
6945 WARN_ON_ONCE(1);
6946 }
6947 }
6948
6949 __be32
nfsd4_open_downgrade(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)6950 nfsd4_open_downgrade(struct svc_rqst *rqstp,
6951 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6952 {
6953 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6954 __be32 status;
6955 struct nfs4_ol_stateid *stp;
6956 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6957
6958 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6959 cstate->current_fh.fh_dentry);
6960
6961 /* We don't yet support WANT bits: */
6962 if (od->od_deleg_want)
6963 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6964 od->od_deleg_want);
6965
6966 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6967 &od->od_stateid, &stp, nn);
6968 if (status)
6969 goto out;
6970 status = nfserr_inval;
6971 if (!test_access(od->od_share_access, stp)) {
6972 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6973 stp->st_access_bmap, od->od_share_access);
6974 goto put_stateid;
6975 }
6976 if (!test_deny(od->od_share_deny, stp)) {
6977 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6978 stp->st_deny_bmap, od->od_share_deny);
6979 goto put_stateid;
6980 }
6981 nfs4_stateid_downgrade(stp, od->od_share_access);
6982 reset_union_bmap_deny(od->od_share_deny, stp);
6983 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6984 status = nfs_ok;
6985 put_stateid:
6986 mutex_unlock(&stp->st_mutex);
6987 nfs4_put_stid(&stp->st_stid);
6988 out:
6989 nfsd4_bump_seqid(cstate, status);
6990 return status;
6991 }
6992
nfsd4_close_open_stateid(struct nfs4_ol_stateid * s)6993 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6994 {
6995 struct nfs4_client *clp = s->st_stid.sc_client;
6996 bool unhashed;
6997 LIST_HEAD(reaplist);
6998 struct nfs4_ol_stateid *stp;
6999
7000 spin_lock(&clp->cl_lock);
7001 unhashed = unhash_open_stateid(s, &reaplist);
7002
7003 if (clp->cl_minorversion) {
7004 if (unhashed)
7005 put_ol_stateid_locked(s, &reaplist);
7006 spin_unlock(&clp->cl_lock);
7007 list_for_each_entry(stp, &reaplist, st_locks)
7008 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7009 free_ol_stateid_reaplist(&reaplist);
7010 } else {
7011 spin_unlock(&clp->cl_lock);
7012 free_ol_stateid_reaplist(&reaplist);
7013 if (unhashed)
7014 move_to_close_lru(s, clp->net);
7015 }
7016 }
7017
7018 /*
7019 * nfs4_unlock_state() called after encode
7020 */
7021 __be32
nfsd4_close(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7022 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7023 union nfsd4_op_u *u)
7024 {
7025 struct nfsd4_close *close = &u->close;
7026 __be32 status;
7027 struct nfs4_ol_stateid *stp;
7028 struct net *net = SVC_NET(rqstp);
7029 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7030
7031 dprintk("NFSD: nfsd4_close on file %pd\n",
7032 cstate->current_fh.fh_dentry);
7033
7034 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
7035 &close->cl_stateid,
7036 NFS4_OPEN_STID|NFS4_CLOSED_STID,
7037 &stp, nn);
7038 nfsd4_bump_seqid(cstate, status);
7039 if (status)
7040 goto out;
7041
7042 stp->st_stid.sc_type = NFS4_CLOSED_STID;
7043
7044 /*
7045 * Technically we don't _really_ have to increment or copy it, since
7046 * it should just be gone after this operation and we clobber the
7047 * copied value below, but we continue to do so here just to ensure
7048 * that racing ops see that there was a state change.
7049 */
7050 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
7051
7052 nfsd4_close_open_stateid(stp);
7053 mutex_unlock(&stp->st_mutex);
7054
7055 /* v4.1+ suggests that we send a special stateid in here, since the
7056 * clients should just ignore this anyway. Since this is not useful
7057 * for v4.0 clients either, we set it to the special close_stateid
7058 * universally.
7059 *
7060 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
7061 */
7062 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
7063
7064 /* put reference from nfs4_preprocess_seqid_op */
7065 nfs4_put_stid(&stp->st_stid);
7066 out:
7067 return status;
7068 }
7069
7070 __be32
nfsd4_delegreturn(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7071 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7072 union nfsd4_op_u *u)
7073 {
7074 struct nfsd4_delegreturn *dr = &u->delegreturn;
7075 struct nfs4_delegation *dp;
7076 stateid_t *stateid = &dr->dr_stateid;
7077 struct nfs4_stid *s;
7078 __be32 status;
7079 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7080
7081 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7082 return status;
7083
7084 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
7085 if (status)
7086 goto out;
7087 dp = delegstateid(s);
7088 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
7089 if (status)
7090 goto put_stateid;
7091
7092 trace_nfsd_deleg_return(stateid);
7093 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7094 destroy_delegation(dp);
7095 put_stateid:
7096 nfs4_put_stid(&dp->dl_stid);
7097 out:
7098 return status;
7099 }
7100
7101 /* last octet in a range */
7102 static inline u64
last_byte_offset(u64 start,u64 len)7103 last_byte_offset(u64 start, u64 len)
7104 {
7105 u64 end;
7106
7107 WARN_ON_ONCE(!len);
7108 end = start + len;
7109 return end > start ? end - 1: NFS4_MAX_UINT64;
7110 }
7111
7112 /*
7113 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7114 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7115 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7116 * locking, this prevents us from being completely protocol-compliant. The
7117 * real solution to this problem is to start using unsigned file offsets in
7118 * the VFS, but this is a very deep change!
7119 */
7120 static inline void
nfs4_transform_lock_offset(struct file_lock * lock)7121 nfs4_transform_lock_offset(struct file_lock *lock)
7122 {
7123 if (lock->fl_start < 0)
7124 lock->fl_start = OFFSET_MAX;
7125 if (lock->fl_end < 0)
7126 lock->fl_end = OFFSET_MAX;
7127 }
7128
7129 static fl_owner_t
nfsd4_lm_get_owner(fl_owner_t owner)7130 nfsd4_lm_get_owner(fl_owner_t owner)
7131 {
7132 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7133
7134 nfs4_get_stateowner(&lo->lo_owner);
7135 return owner;
7136 }
7137
7138 static void
nfsd4_lm_put_owner(fl_owner_t owner)7139 nfsd4_lm_put_owner(fl_owner_t owner)
7140 {
7141 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7142
7143 if (lo)
7144 nfs4_put_stateowner(&lo->lo_owner);
7145 }
7146
7147 /* return pointer to struct nfs4_client if client is expirable */
7148 static bool
nfsd4_lm_lock_expirable(struct file_lock * cfl)7149 nfsd4_lm_lock_expirable(struct file_lock *cfl)
7150 {
7151 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
7152 struct nfs4_client *clp = lo->lo_owner.so_client;
7153 struct nfsd_net *nn;
7154
7155 if (try_to_expire_client(clp)) {
7156 nn = net_generic(clp->net, nfsd_net_id);
7157 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7158 return true;
7159 }
7160 return false;
7161 }
7162
7163 /* schedule laundromat to run immediately and wait for it to complete */
7164 static void
nfsd4_lm_expire_lock(void)7165 nfsd4_lm_expire_lock(void)
7166 {
7167 flush_workqueue(laundry_wq);
7168 }
7169
7170 static void
nfsd4_lm_notify(struct file_lock * fl)7171 nfsd4_lm_notify(struct file_lock *fl)
7172 {
7173 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
7174 struct net *net = lo->lo_owner.so_client->net;
7175 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7176 struct nfsd4_blocked_lock *nbl = container_of(fl,
7177 struct nfsd4_blocked_lock, nbl_lock);
7178 bool queue = false;
7179
7180 /* An empty list means that something else is going to be using it */
7181 spin_lock(&nn->blocked_locks_lock);
7182 if (!list_empty(&nbl->nbl_list)) {
7183 list_del_init(&nbl->nbl_list);
7184 list_del_init(&nbl->nbl_lru);
7185 queue = true;
7186 }
7187 spin_unlock(&nn->blocked_locks_lock);
7188
7189 if (queue) {
7190 trace_nfsd_cb_notify_lock(lo, nbl);
7191 nfsd4_run_cb(&nbl->nbl_cb);
7192 }
7193 }
7194
7195 static const struct lock_manager_operations nfsd_posix_mng_ops = {
7196 .lm_mod_owner = THIS_MODULE,
7197 .lm_notify = nfsd4_lm_notify,
7198 .lm_get_owner = nfsd4_lm_get_owner,
7199 .lm_put_owner = nfsd4_lm_put_owner,
7200 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7201 .lm_expire_lock = nfsd4_lm_expire_lock,
7202 };
7203
7204 static inline void
nfs4_set_lock_denied(struct file_lock * fl,struct nfsd4_lock_denied * deny)7205 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7206 {
7207 struct nfs4_lockowner *lo;
7208
7209 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7210 lo = (struct nfs4_lockowner *) fl->fl_owner;
7211 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7212 GFP_KERNEL);
7213 if (!deny->ld_owner.data)
7214 /* We just don't care that much */
7215 goto nevermind;
7216 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7217 } else {
7218 nevermind:
7219 deny->ld_owner.len = 0;
7220 deny->ld_owner.data = NULL;
7221 deny->ld_clientid.cl_boot = 0;
7222 deny->ld_clientid.cl_id = 0;
7223 }
7224 deny->ld_start = fl->fl_start;
7225 deny->ld_length = NFS4_MAX_UINT64;
7226 if (fl->fl_end != NFS4_MAX_UINT64)
7227 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7228 deny->ld_type = NFS4_READ_LT;
7229 if (fl->fl_type != F_RDLCK)
7230 deny->ld_type = NFS4_WRITE_LT;
7231 }
7232
7233 static struct nfs4_lockowner *
find_lockowner_str_locked(struct nfs4_client * clp,struct xdr_netobj * owner)7234 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7235 {
7236 unsigned int strhashval = ownerstr_hashval(owner);
7237 struct nfs4_stateowner *so;
7238
7239 lockdep_assert_held(&clp->cl_lock);
7240
7241 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7242 so_strhash) {
7243 if (so->so_is_open_owner)
7244 continue;
7245 if (same_owner_str(so, owner))
7246 return lockowner(nfs4_get_stateowner(so));
7247 }
7248 return NULL;
7249 }
7250
7251 static struct nfs4_lockowner *
find_lockowner_str(struct nfs4_client * clp,struct xdr_netobj * owner)7252 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7253 {
7254 struct nfs4_lockowner *lo;
7255
7256 spin_lock(&clp->cl_lock);
7257 lo = find_lockowner_str_locked(clp, owner);
7258 spin_unlock(&clp->cl_lock);
7259 return lo;
7260 }
7261
nfs4_unhash_lockowner(struct nfs4_stateowner * sop)7262 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7263 {
7264 unhash_lockowner_locked(lockowner(sop));
7265 }
7266
nfs4_free_lockowner(struct nfs4_stateowner * sop)7267 static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7268 {
7269 struct nfs4_lockowner *lo = lockowner(sop);
7270
7271 kmem_cache_free(lockowner_slab, lo);
7272 }
7273
7274 static const struct nfs4_stateowner_operations lockowner_ops = {
7275 .so_unhash = nfs4_unhash_lockowner,
7276 .so_free = nfs4_free_lockowner,
7277 };
7278
7279 /*
7280 * Alloc a lock owner structure.
7281 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7282 * occurred.
7283 *
7284 * strhashval = ownerstr_hashval
7285 */
7286 static struct nfs4_lockowner *
alloc_init_lock_stateowner(unsigned int strhashval,struct nfs4_client * clp,struct nfs4_ol_stateid * open_stp,struct nfsd4_lock * lock)7287 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7288 struct nfs4_ol_stateid *open_stp,
7289 struct nfsd4_lock *lock)
7290 {
7291 struct nfs4_lockowner *lo, *ret;
7292
7293 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7294 if (!lo)
7295 return NULL;
7296 INIT_LIST_HEAD(&lo->lo_blocked);
7297 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7298 lo->lo_owner.so_is_open_owner = 0;
7299 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7300 lo->lo_owner.so_ops = &lockowner_ops;
7301 spin_lock(&clp->cl_lock);
7302 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7303 if (ret == NULL) {
7304 list_add(&lo->lo_owner.so_strhash,
7305 &clp->cl_ownerstr_hashtbl[strhashval]);
7306 ret = lo;
7307 } else
7308 nfs4_free_stateowner(&lo->lo_owner);
7309
7310 spin_unlock(&clp->cl_lock);
7311 return ret;
7312 }
7313
7314 static struct nfs4_ol_stateid *
find_lock_stateid(const struct nfs4_lockowner * lo,const struct nfs4_ol_stateid * ost)7315 find_lock_stateid(const struct nfs4_lockowner *lo,
7316 const struct nfs4_ol_stateid *ost)
7317 {
7318 struct nfs4_ol_stateid *lst;
7319
7320 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7321
7322 /* If ost is not hashed, ost->st_locks will not be valid */
7323 if (!nfs4_ol_stateid_unhashed(ost))
7324 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7325 if (lst->st_stateowner == &lo->lo_owner) {
7326 refcount_inc(&lst->st_stid.sc_count);
7327 return lst;
7328 }
7329 }
7330 return NULL;
7331 }
7332
7333 static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid * stp,struct nfs4_lockowner * lo,struct nfs4_file * fp,struct inode * inode,struct nfs4_ol_stateid * open_stp)7334 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7335 struct nfs4_file *fp, struct inode *inode,
7336 struct nfs4_ol_stateid *open_stp)
7337 {
7338 struct nfs4_client *clp = lo->lo_owner.so_client;
7339 struct nfs4_ol_stateid *retstp;
7340
7341 mutex_init(&stp->st_mutex);
7342 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7343 retry:
7344 spin_lock(&clp->cl_lock);
7345 if (nfs4_ol_stateid_unhashed(open_stp))
7346 goto out_close;
7347 retstp = find_lock_stateid(lo, open_stp);
7348 if (retstp)
7349 goto out_found;
7350 refcount_inc(&stp->st_stid.sc_count);
7351 stp->st_stid.sc_type = NFS4_LOCK_STID;
7352 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7353 get_nfs4_file(fp);
7354 stp->st_stid.sc_file = fp;
7355 stp->st_access_bmap = 0;
7356 stp->st_deny_bmap = open_stp->st_deny_bmap;
7357 stp->st_openstp = open_stp;
7358 spin_lock(&fp->fi_lock);
7359 list_add(&stp->st_locks, &open_stp->st_locks);
7360 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7361 list_add(&stp->st_perfile, &fp->fi_stateids);
7362 spin_unlock(&fp->fi_lock);
7363 spin_unlock(&clp->cl_lock);
7364 return stp;
7365 out_found:
7366 spin_unlock(&clp->cl_lock);
7367 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7368 nfs4_put_stid(&retstp->st_stid);
7369 goto retry;
7370 }
7371 /* To keep mutex tracking happy */
7372 mutex_unlock(&stp->st_mutex);
7373 return retstp;
7374 out_close:
7375 spin_unlock(&clp->cl_lock);
7376 mutex_unlock(&stp->st_mutex);
7377 return NULL;
7378 }
7379
7380 static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner * lo,struct nfs4_file * fi,struct inode * inode,struct nfs4_ol_stateid * ost,bool * new)7381 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7382 struct inode *inode, struct nfs4_ol_stateid *ost,
7383 bool *new)
7384 {
7385 struct nfs4_stid *ns = NULL;
7386 struct nfs4_ol_stateid *lst;
7387 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7388 struct nfs4_client *clp = oo->oo_owner.so_client;
7389
7390 *new = false;
7391 spin_lock(&clp->cl_lock);
7392 lst = find_lock_stateid(lo, ost);
7393 spin_unlock(&clp->cl_lock);
7394 if (lst != NULL) {
7395 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7396 goto out;
7397 nfs4_put_stid(&lst->st_stid);
7398 }
7399 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7400 if (ns == NULL)
7401 return NULL;
7402
7403 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7404 if (lst == openlockstateid(ns))
7405 *new = true;
7406 else
7407 nfs4_put_stid(ns);
7408 out:
7409 return lst;
7410 }
7411
7412 static int
check_lock_length(u64 offset,u64 length)7413 check_lock_length(u64 offset, u64 length)
7414 {
7415 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7416 (length > ~offset)));
7417 }
7418
get_lock_access(struct nfs4_ol_stateid * lock_stp,u32 access)7419 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7420 {
7421 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7422
7423 lockdep_assert_held(&fp->fi_lock);
7424
7425 if (test_access(access, lock_stp))
7426 return;
7427 __nfs4_file_get_access(fp, access);
7428 set_access(access, lock_stp);
7429 }
7430
7431 static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state * cstate,struct nfs4_ol_stateid * ost,struct nfsd4_lock * lock,struct nfs4_ol_stateid ** plst,bool * new)7432 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7433 struct nfs4_ol_stateid *ost,
7434 struct nfsd4_lock *lock,
7435 struct nfs4_ol_stateid **plst, bool *new)
7436 {
7437 __be32 status;
7438 struct nfs4_file *fi = ost->st_stid.sc_file;
7439 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7440 struct nfs4_client *cl = oo->oo_owner.so_client;
7441 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7442 struct nfs4_lockowner *lo;
7443 struct nfs4_ol_stateid *lst;
7444 unsigned int strhashval;
7445
7446 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7447 if (!lo) {
7448 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7449 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7450 if (lo == NULL)
7451 return nfserr_jukebox;
7452 } else {
7453 /* with an existing lockowner, seqids must be the same */
7454 status = nfserr_bad_seqid;
7455 if (!cstate->minorversion &&
7456 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7457 goto out;
7458 }
7459
7460 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7461 if (lst == NULL) {
7462 status = nfserr_jukebox;
7463 goto out;
7464 }
7465
7466 status = nfs_ok;
7467 *plst = lst;
7468 out:
7469 nfs4_put_stateowner(&lo->lo_owner);
7470 return status;
7471 }
7472
7473 /*
7474 * LOCK operation
7475 */
7476 __be32
nfsd4_lock(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7477 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7478 union nfsd4_op_u *u)
7479 {
7480 struct nfsd4_lock *lock = &u->lock;
7481 struct nfs4_openowner *open_sop = NULL;
7482 struct nfs4_lockowner *lock_sop = NULL;
7483 struct nfs4_ol_stateid *lock_stp = NULL;
7484 struct nfs4_ol_stateid *open_stp = NULL;
7485 struct nfs4_file *fp;
7486 struct nfsd_file *nf = NULL;
7487 struct nfsd4_blocked_lock *nbl = NULL;
7488 struct file_lock *file_lock = NULL;
7489 struct file_lock *conflock = NULL;
7490 struct super_block *sb;
7491 __be32 status = 0;
7492 int lkflg;
7493 int err;
7494 bool new = false;
7495 unsigned char fl_type;
7496 unsigned int fl_flags = FL_POSIX;
7497 struct net *net = SVC_NET(rqstp);
7498 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7499
7500 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7501 (long long) lock->lk_offset,
7502 (long long) lock->lk_length);
7503
7504 if (check_lock_length(lock->lk_offset, lock->lk_length))
7505 return nfserr_inval;
7506
7507 if ((status = fh_verify(rqstp, &cstate->current_fh,
7508 S_IFREG, NFSD_MAY_LOCK))) {
7509 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7510 return status;
7511 }
7512 sb = cstate->current_fh.fh_dentry->d_sb;
7513
7514 if (lock->lk_is_new) {
7515 if (nfsd4_has_session(cstate))
7516 /* See rfc 5661 18.10.3: given clientid is ignored: */
7517 memcpy(&lock->lk_new_clientid,
7518 &cstate->clp->cl_clientid,
7519 sizeof(clientid_t));
7520
7521 /* validate and update open stateid and open seqid */
7522 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7523 lock->lk_new_open_seqid,
7524 &lock->lk_new_open_stateid,
7525 &open_stp, nn);
7526 if (status)
7527 goto out;
7528 mutex_unlock(&open_stp->st_mutex);
7529 open_sop = openowner(open_stp->st_stateowner);
7530 status = nfserr_bad_stateid;
7531 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7532 &lock->lk_new_clientid))
7533 goto out;
7534 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7535 &lock_stp, &new);
7536 } else {
7537 status = nfs4_preprocess_seqid_op(cstate,
7538 lock->lk_old_lock_seqid,
7539 &lock->lk_old_lock_stateid,
7540 NFS4_LOCK_STID, &lock_stp, nn);
7541 }
7542 if (status)
7543 goto out;
7544 lock_sop = lockowner(lock_stp->st_stateowner);
7545
7546 lkflg = setlkflg(lock->lk_type);
7547 status = nfs4_check_openmode(lock_stp, lkflg);
7548 if (status)
7549 goto out;
7550
7551 status = nfserr_grace;
7552 if (locks_in_grace(net) && !lock->lk_reclaim)
7553 goto out;
7554 status = nfserr_no_grace;
7555 if (!locks_in_grace(net) && lock->lk_reclaim)
7556 goto out;
7557
7558 if (lock->lk_reclaim)
7559 fl_flags |= FL_RECLAIM;
7560
7561 fp = lock_stp->st_stid.sc_file;
7562 switch (lock->lk_type) {
7563 case NFS4_READW_LT:
7564 if (nfsd4_has_session(cstate) ||
7565 exportfs_lock_op_is_async(sb->s_export_op))
7566 fl_flags |= FL_SLEEP;
7567 fallthrough;
7568 case NFS4_READ_LT:
7569 spin_lock(&fp->fi_lock);
7570 nf = find_readable_file_locked(fp);
7571 if (nf)
7572 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7573 spin_unlock(&fp->fi_lock);
7574 fl_type = F_RDLCK;
7575 break;
7576 case NFS4_WRITEW_LT:
7577 if (nfsd4_has_session(cstate) ||
7578 exportfs_lock_op_is_async(sb->s_export_op))
7579 fl_flags |= FL_SLEEP;
7580 fallthrough;
7581 case NFS4_WRITE_LT:
7582 spin_lock(&fp->fi_lock);
7583 nf = find_writeable_file_locked(fp);
7584 if (nf)
7585 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7586 spin_unlock(&fp->fi_lock);
7587 fl_type = F_WRLCK;
7588 break;
7589 default:
7590 status = nfserr_inval;
7591 goto out;
7592 }
7593
7594 if (!nf) {
7595 status = nfserr_openmode;
7596 goto out;
7597 }
7598
7599 /*
7600 * Most filesystems with their own ->lock operations will block
7601 * the nfsd thread waiting to acquire the lock. That leads to
7602 * deadlocks (we don't want every nfsd thread tied up waiting
7603 * for file locks), so don't attempt blocking lock notifications
7604 * on those filesystems:
7605 */
7606 if (!exportfs_lock_op_is_async(sb->s_export_op))
7607 fl_flags &= ~FL_SLEEP;
7608
7609 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7610 if (!nbl) {
7611 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7612 status = nfserr_jukebox;
7613 goto out;
7614 }
7615
7616 file_lock = &nbl->nbl_lock;
7617 file_lock->fl_type = fl_type;
7618 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7619 file_lock->fl_pid = current->tgid;
7620 file_lock->fl_file = nf->nf_file;
7621 file_lock->fl_flags = fl_flags;
7622 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7623 file_lock->fl_start = lock->lk_offset;
7624 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7625 nfs4_transform_lock_offset(file_lock);
7626
7627 conflock = locks_alloc_lock();
7628 if (!conflock) {
7629 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7630 status = nfserr_jukebox;
7631 goto out;
7632 }
7633
7634 if (fl_flags & FL_SLEEP) {
7635 nbl->nbl_time = ktime_get_boottime_seconds();
7636 spin_lock(&nn->blocked_locks_lock);
7637 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7638 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
7639 kref_get(&nbl->nbl_kref);
7640 spin_unlock(&nn->blocked_locks_lock);
7641 }
7642
7643 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
7644 switch (err) {
7645 case 0: /* success! */
7646 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
7647 status = 0;
7648 if (lock->lk_reclaim)
7649 nn->somebody_reclaimed = true;
7650 break;
7651 case FILE_LOCK_DEFERRED:
7652 kref_put(&nbl->nbl_kref, free_nbl);
7653 nbl = NULL;
7654 fallthrough;
7655 case -EAGAIN: /* conflock holds conflicting lock */
7656 status = nfserr_denied;
7657 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7658 nfs4_set_lock_denied(conflock, &lock->lk_denied);
7659 break;
7660 case -EDEADLK:
7661 status = nfserr_deadlock;
7662 break;
7663 default:
7664 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7665 status = nfserrno(err);
7666 break;
7667 }
7668 out:
7669 if (nbl) {
7670 /* dequeue it if we queued it before */
7671 if (fl_flags & FL_SLEEP) {
7672 spin_lock(&nn->blocked_locks_lock);
7673 if (!list_empty(&nbl->nbl_list) &&
7674 !list_empty(&nbl->nbl_lru)) {
7675 list_del_init(&nbl->nbl_list);
7676 list_del_init(&nbl->nbl_lru);
7677 kref_put(&nbl->nbl_kref, free_nbl);
7678 }
7679 /* nbl can use one of lists to be linked to reaplist */
7680 spin_unlock(&nn->blocked_locks_lock);
7681 }
7682 free_blocked_lock(nbl);
7683 }
7684 if (nf)
7685 nfsd_file_put(nf);
7686 if (lock_stp) {
7687 /* Bump seqid manually if the 4.0 replay owner is openowner */
7688 if (cstate->replay_owner &&
7689 cstate->replay_owner != &lock_sop->lo_owner &&
7690 seqid_mutating_err(ntohl(status)))
7691 lock_sop->lo_owner.so_seqid++;
7692
7693 /*
7694 * If this is a new, never-before-used stateid, and we are
7695 * returning an error, then just go ahead and release it.
7696 */
7697 if (status && new)
7698 release_lock_stateid(lock_stp);
7699
7700 mutex_unlock(&lock_stp->st_mutex);
7701
7702 nfs4_put_stid(&lock_stp->st_stid);
7703 }
7704 if (open_stp)
7705 nfs4_put_stid(&open_stp->st_stid);
7706 nfsd4_bump_seqid(cstate, status);
7707 if (conflock)
7708 locks_free_lock(conflock);
7709 return status;
7710 }
7711
nfsd4_lock_release(union nfsd4_op_u * u)7712 void nfsd4_lock_release(union nfsd4_op_u *u)
7713 {
7714 struct nfsd4_lock *lock = &u->lock;
7715 struct nfsd4_lock_denied *deny = &lock->lk_denied;
7716
7717 kfree(deny->ld_owner.data);
7718 }
7719
7720 /*
7721 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7722 * so we do a temporary open here just to get an open file to pass to
7723 * vfs_test_lock.
7724 */
nfsd_test_lock(struct svc_rqst * rqstp,struct svc_fh * fhp,struct file_lock * lock)7725 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7726 {
7727 struct nfsd_file *nf;
7728 struct inode *inode;
7729 __be32 err;
7730
7731 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7732 if (err)
7733 return err;
7734 inode = fhp->fh_dentry->d_inode;
7735 inode_lock(inode); /* to block new leases till after test_lock: */
7736 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
7737 if (err)
7738 goto out;
7739 lock->fl_file = nf->nf_file;
7740 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7741 lock->fl_file = NULL;
7742 out:
7743 inode_unlock(inode);
7744 nfsd_file_put(nf);
7745 return err;
7746 }
7747
7748 /*
7749 * LOCKT operation
7750 */
7751 __be32
nfsd4_lockt(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7752 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7753 union nfsd4_op_u *u)
7754 {
7755 struct nfsd4_lockt *lockt = &u->lockt;
7756 struct file_lock *file_lock = NULL;
7757 struct nfs4_lockowner *lo = NULL;
7758 __be32 status;
7759 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7760
7761 if (locks_in_grace(SVC_NET(rqstp)))
7762 return nfserr_grace;
7763
7764 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7765 return nfserr_inval;
7766
7767 if (!nfsd4_has_session(cstate)) {
7768 status = set_client(&lockt->lt_clientid, cstate, nn);
7769 if (status)
7770 goto out;
7771 }
7772
7773 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7774 goto out;
7775
7776 file_lock = locks_alloc_lock();
7777 if (!file_lock) {
7778 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7779 status = nfserr_jukebox;
7780 goto out;
7781 }
7782
7783 switch (lockt->lt_type) {
7784 case NFS4_READ_LT:
7785 case NFS4_READW_LT:
7786 file_lock->fl_type = F_RDLCK;
7787 break;
7788 case NFS4_WRITE_LT:
7789 case NFS4_WRITEW_LT:
7790 file_lock->fl_type = F_WRLCK;
7791 break;
7792 default:
7793 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7794 status = nfserr_inval;
7795 goto out;
7796 }
7797
7798 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7799 if (lo)
7800 file_lock->fl_owner = (fl_owner_t)lo;
7801 file_lock->fl_pid = current->tgid;
7802 file_lock->fl_flags = FL_POSIX;
7803
7804 file_lock->fl_start = lockt->lt_offset;
7805 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7806
7807 nfs4_transform_lock_offset(file_lock);
7808
7809 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7810 if (status)
7811 goto out;
7812
7813 if (file_lock->fl_type != F_UNLCK) {
7814 status = nfserr_denied;
7815 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7816 }
7817 out:
7818 if (lo)
7819 nfs4_put_stateowner(&lo->lo_owner);
7820 if (file_lock)
7821 locks_free_lock(file_lock);
7822 return status;
7823 }
7824
nfsd4_lockt_release(union nfsd4_op_u * u)7825 void nfsd4_lockt_release(union nfsd4_op_u *u)
7826 {
7827 struct nfsd4_lockt *lockt = &u->lockt;
7828 struct nfsd4_lock_denied *deny = &lockt->lt_denied;
7829
7830 kfree(deny->ld_owner.data);
7831 }
7832
7833 __be32
nfsd4_locku(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7834 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7835 union nfsd4_op_u *u)
7836 {
7837 struct nfsd4_locku *locku = &u->locku;
7838 struct nfs4_ol_stateid *stp;
7839 struct nfsd_file *nf = NULL;
7840 struct file_lock *file_lock = NULL;
7841 __be32 status;
7842 int err;
7843 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7844
7845 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7846 (long long) locku->lu_offset,
7847 (long long) locku->lu_length);
7848
7849 if (check_lock_length(locku->lu_offset, locku->lu_length))
7850 return nfserr_inval;
7851
7852 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7853 &locku->lu_stateid, NFS4_LOCK_STID,
7854 &stp, nn);
7855 if (status)
7856 goto out;
7857 nf = find_any_file(stp->st_stid.sc_file);
7858 if (!nf) {
7859 status = nfserr_lock_range;
7860 goto put_stateid;
7861 }
7862 file_lock = locks_alloc_lock();
7863 if (!file_lock) {
7864 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7865 status = nfserr_jukebox;
7866 goto put_file;
7867 }
7868
7869 file_lock->fl_type = F_UNLCK;
7870 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7871 file_lock->fl_pid = current->tgid;
7872 file_lock->fl_file = nf->nf_file;
7873 file_lock->fl_flags = FL_POSIX;
7874 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7875 file_lock->fl_start = locku->lu_offset;
7876
7877 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7878 locku->lu_length);
7879 nfs4_transform_lock_offset(file_lock);
7880
7881 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7882 if (err) {
7883 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7884 goto out_nfserr;
7885 }
7886 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7887 put_file:
7888 nfsd_file_put(nf);
7889 put_stateid:
7890 mutex_unlock(&stp->st_mutex);
7891 nfs4_put_stid(&stp->st_stid);
7892 out:
7893 nfsd4_bump_seqid(cstate, status);
7894 if (file_lock)
7895 locks_free_lock(file_lock);
7896 return status;
7897
7898 out_nfserr:
7899 status = nfserrno(err);
7900 goto put_file;
7901 }
7902
7903 /*
7904 * returns
7905 * true: locks held by lockowner
7906 * false: no locks held by lockowner
7907 */
7908 static bool
check_for_locks(struct nfs4_file * fp,struct nfs4_lockowner * lowner)7909 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7910 {
7911 struct file_lock *fl;
7912 int status = false;
7913 struct nfsd_file *nf;
7914 struct inode *inode;
7915 struct file_lock_context *flctx;
7916
7917 spin_lock(&fp->fi_lock);
7918 nf = find_any_file_locked(fp);
7919 if (!nf) {
7920 /* Any valid lock stateid should have some sort of access */
7921 WARN_ON_ONCE(1);
7922 goto out;
7923 }
7924
7925 inode = file_inode(nf->nf_file);
7926 flctx = locks_inode_context(inode);
7927
7928 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7929 spin_lock(&flctx->flc_lock);
7930 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7931 if (fl->fl_owner == (fl_owner_t)lowner) {
7932 status = true;
7933 break;
7934 }
7935 }
7936 spin_unlock(&flctx->flc_lock);
7937 }
7938 out:
7939 spin_unlock(&fp->fi_lock);
7940 return status;
7941 }
7942
7943 /**
7944 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
7945 * @rqstp: RPC transaction
7946 * @cstate: NFSv4 COMPOUND state
7947 * @u: RELEASE_LOCKOWNER arguments
7948 *
7949 * Check if theree are any locks still held and if not - free the lockowner
7950 * and any lock state that is owned.
7951 *
7952 * Return values:
7953 * %nfs_ok: lockowner released or not found
7954 * %nfserr_locks_held: lockowner still in use
7955 * %nfserr_stale_clientid: clientid no longer active
7956 * %nfserr_expired: clientid not recognized
7957 */
7958 __be32
nfsd4_release_lockowner(struct svc_rqst * rqstp,struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)7959 nfsd4_release_lockowner(struct svc_rqst *rqstp,
7960 struct nfsd4_compound_state *cstate,
7961 union nfsd4_op_u *u)
7962 {
7963 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7964 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7965 clientid_t *clid = &rlockowner->rl_clientid;
7966 struct nfs4_ol_stateid *stp;
7967 struct nfs4_lockowner *lo;
7968 struct nfs4_client *clp;
7969 LIST_HEAD(reaplist);
7970 __be32 status;
7971
7972 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7973 clid->cl_boot, clid->cl_id);
7974
7975 status = set_client(clid, cstate, nn);
7976 if (status)
7977 return status;
7978 clp = cstate->clp;
7979
7980 spin_lock(&clp->cl_lock);
7981 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7982 if (!lo) {
7983 spin_unlock(&clp->cl_lock);
7984 return nfs_ok;
7985 }
7986
7987 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
7988 if (check_for_locks(stp->st_stid.sc_file, lo)) {
7989 spin_unlock(&clp->cl_lock);
7990 nfs4_put_stateowner(&lo->lo_owner);
7991 return nfserr_locks_held;
7992 }
7993 }
7994 unhash_lockowner_locked(lo);
7995 while (!list_empty(&lo->lo_owner.so_stateids)) {
7996 stp = list_first_entry(&lo->lo_owner.so_stateids,
7997 struct nfs4_ol_stateid,
7998 st_perstateowner);
7999 WARN_ON(!unhash_lock_stateid(stp));
8000 put_ol_stateid_locked(stp, &reaplist);
8001 }
8002 spin_unlock(&clp->cl_lock);
8003
8004 free_ol_stateid_reaplist(&reaplist);
8005 remove_blocked_locks(lo);
8006 nfs4_put_stateowner(&lo->lo_owner);
8007 return nfs_ok;
8008 }
8009
8010 static inline struct nfs4_client_reclaim *
alloc_reclaim(void)8011 alloc_reclaim(void)
8012 {
8013 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
8014 }
8015
8016 bool
nfs4_has_reclaimed_state(struct xdr_netobj name,struct nfsd_net * nn)8017 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
8018 {
8019 struct nfs4_client_reclaim *crp;
8020
8021 crp = nfsd4_find_reclaim_client(name, nn);
8022 return (crp && crp->cr_clp);
8023 }
8024
8025 /*
8026 * failure => all reset bets are off, nfserr_no_grace...
8027 *
8028 * The caller is responsible for freeing name.data if NULL is returned (it
8029 * will be freed in nfs4_remove_reclaim_record in the normal case).
8030 */
8031 struct nfs4_client_reclaim *
nfs4_client_to_reclaim(struct xdr_netobj name,struct xdr_netobj princhash,struct nfsd_net * nn)8032 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
8033 struct nfsd_net *nn)
8034 {
8035 unsigned int strhashval;
8036 struct nfs4_client_reclaim *crp;
8037
8038 crp = alloc_reclaim();
8039 if (crp) {
8040 strhashval = clientstr_hashval(name);
8041 INIT_LIST_HEAD(&crp->cr_strhash);
8042 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
8043 crp->cr_name.data = name.data;
8044 crp->cr_name.len = name.len;
8045 crp->cr_princhash.data = princhash.data;
8046 crp->cr_princhash.len = princhash.len;
8047 crp->cr_clp = NULL;
8048 nn->reclaim_str_hashtbl_size++;
8049 }
8050 return crp;
8051 }
8052
8053 void
nfs4_remove_reclaim_record(struct nfs4_client_reclaim * crp,struct nfsd_net * nn)8054 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
8055 {
8056 list_del(&crp->cr_strhash);
8057 kfree(crp->cr_name.data);
8058 kfree(crp->cr_princhash.data);
8059 kfree(crp);
8060 nn->reclaim_str_hashtbl_size--;
8061 }
8062
8063 void
nfs4_release_reclaim(struct nfsd_net * nn)8064 nfs4_release_reclaim(struct nfsd_net *nn)
8065 {
8066 struct nfs4_client_reclaim *crp = NULL;
8067 int i;
8068
8069 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8070 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
8071 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
8072 struct nfs4_client_reclaim, cr_strhash);
8073 nfs4_remove_reclaim_record(crp, nn);
8074 }
8075 }
8076 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
8077 }
8078
8079 /*
8080 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
8081 struct nfs4_client_reclaim *
nfsd4_find_reclaim_client(struct xdr_netobj name,struct nfsd_net * nn)8082 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
8083 {
8084 unsigned int strhashval;
8085 struct nfs4_client_reclaim *crp = NULL;
8086
8087 strhashval = clientstr_hashval(name);
8088 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
8089 if (compare_blob(&crp->cr_name, &name) == 0) {
8090 return crp;
8091 }
8092 }
8093 return NULL;
8094 }
8095
8096 __be32
nfs4_check_open_reclaim(struct nfs4_client * clp)8097 nfs4_check_open_reclaim(struct nfs4_client *clp)
8098 {
8099 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8100 return nfserr_no_grace;
8101
8102 if (nfsd4_client_record_check(clp))
8103 return nfserr_reclaim_bad;
8104
8105 return nfs_ok;
8106 }
8107
8108 /*
8109 * Since the lifetime of a delegation isn't limited to that of an open, a
8110 * client may quite reasonably hang on to a delegation as long as it has
8111 * the inode cached. This becomes an obvious problem the first time a
8112 * client's inode cache approaches the size of the server's total memory.
8113 *
8114 * For now we avoid this problem by imposing a hard limit on the number
8115 * of delegations, which varies according to the server's memory size.
8116 */
8117 static void
set_max_delegations(void)8118 set_max_delegations(void)
8119 {
8120 /*
8121 * Allow at most 4 delegations per megabyte of RAM. Quick
8122 * estimates suggest that in the worst case (where every delegation
8123 * is for a different inode), a delegation could take about 1.5K,
8124 * giving a worst case usage of about 6% of memory.
8125 */
8126 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8127 }
8128
nfs4_state_create_net(struct net * net)8129 static int nfs4_state_create_net(struct net *net)
8130 {
8131 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8132 int i;
8133
8134 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8135 sizeof(struct list_head),
8136 GFP_KERNEL);
8137 if (!nn->conf_id_hashtbl)
8138 goto err;
8139 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8140 sizeof(struct list_head),
8141 GFP_KERNEL);
8142 if (!nn->unconf_id_hashtbl)
8143 goto err_unconf_id;
8144 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8145 sizeof(struct list_head),
8146 GFP_KERNEL);
8147 if (!nn->sessionid_hashtbl)
8148 goto err_sessionid;
8149
8150 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8151 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8152 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
8153 }
8154 for (i = 0; i < SESSION_HASH_SIZE; i++)
8155 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8156 nn->conf_name_tree = RB_ROOT;
8157 nn->unconf_name_tree = RB_ROOT;
8158 nn->boot_time = ktime_get_real_seconds();
8159 nn->grace_ended = false;
8160 nn->nfsd4_manager.block_opens = true;
8161 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8162 INIT_LIST_HEAD(&nn->client_lru);
8163 INIT_LIST_HEAD(&nn->close_lru);
8164 INIT_LIST_HEAD(&nn->del_recall_lru);
8165 spin_lock_init(&nn->client_lock);
8166 spin_lock_init(&nn->s2s_cp_lock);
8167 idr_init(&nn->s2s_cp_stateids);
8168
8169 spin_lock_init(&nn->blocked_locks_lock);
8170 INIT_LIST_HEAD(&nn->blocked_locks_lru);
8171
8172 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8173 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8174 get_net(net);
8175
8176 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
8177 if (!nn->nfsd_client_shrinker)
8178 goto err_shrinker;
8179
8180 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
8181 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
8182 nn->nfsd_client_shrinker->private_data = nn;
8183
8184 shrinker_register(nn->nfsd_client_shrinker);
8185
8186 return 0;
8187
8188 err_shrinker:
8189 put_net(net);
8190 kfree(nn->sessionid_hashtbl);
8191 err_sessionid:
8192 kfree(nn->unconf_id_hashtbl);
8193 err_unconf_id:
8194 kfree(nn->conf_id_hashtbl);
8195 err:
8196 return -ENOMEM;
8197 }
8198
8199 static void
nfs4_state_destroy_net(struct net * net)8200 nfs4_state_destroy_net(struct net *net)
8201 {
8202 int i;
8203 struct nfs4_client *clp = NULL;
8204 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8205
8206 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8207 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8208 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8209 destroy_client(clp);
8210 }
8211 }
8212
8213 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8214
8215 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8216 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8217 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8218 destroy_client(clp);
8219 }
8220 }
8221
8222 kfree(nn->sessionid_hashtbl);
8223 kfree(nn->unconf_id_hashtbl);
8224 kfree(nn->conf_id_hashtbl);
8225 put_net(net);
8226 }
8227
8228 int
nfs4_state_start_net(struct net * net)8229 nfs4_state_start_net(struct net *net)
8230 {
8231 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8232 int ret;
8233
8234 ret = nfs4_state_create_net(net);
8235 if (ret)
8236 return ret;
8237 locks_start_grace(net, &nn->nfsd4_manager);
8238 nfsd4_client_tracking_init(net);
8239 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8240 goto skip_grace;
8241 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8242 nn->nfsd4_grace, net->ns.inum);
8243 trace_nfsd_grace_start(nn);
8244 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8245 return 0;
8246
8247 skip_grace:
8248 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8249 net->ns.inum);
8250 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8251 nfsd4_end_grace(nn);
8252 return 0;
8253 }
8254
8255 /* initialization to perform when the nfsd service is started: */
8256
8257 int
nfs4_state_start(void)8258 nfs4_state_start(void)
8259 {
8260 int ret;
8261
8262 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8263 if (ret)
8264 return ret;
8265
8266 ret = nfsd4_create_callback_queue();
8267 if (ret) {
8268 rhltable_destroy(&nfs4_file_rhltable);
8269 return ret;
8270 }
8271
8272 set_max_delegations();
8273 return 0;
8274 }
8275
8276 void
nfs4_state_shutdown_net(struct net * net)8277 nfs4_state_shutdown_net(struct net *net)
8278 {
8279 struct nfs4_delegation *dp = NULL;
8280 struct list_head *pos, *next, reaplist;
8281 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8282
8283 shrinker_free(nn->nfsd_client_shrinker);
8284 cancel_work(&nn->nfsd_shrinker_work);
8285 cancel_delayed_work_sync(&nn->laundromat_work);
8286 locks_end_grace(&nn->nfsd4_manager);
8287
8288 INIT_LIST_HEAD(&reaplist);
8289 spin_lock(&state_lock);
8290 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8291 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8292 WARN_ON(!unhash_delegation_locked(dp));
8293 list_add(&dp->dl_recall_lru, &reaplist);
8294 }
8295 spin_unlock(&state_lock);
8296 list_for_each_safe(pos, next, &reaplist) {
8297 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8298 list_del_init(&dp->dl_recall_lru);
8299 destroy_unhashed_deleg(dp);
8300 }
8301
8302 nfsd4_client_tracking_exit(net);
8303 nfs4_state_destroy_net(net);
8304 #ifdef CONFIG_NFSD_V4_2_INTER_SSC
8305 nfsd4_ssc_shutdown_umount(nn);
8306 #endif
8307 }
8308
8309 void
nfs4_state_shutdown(void)8310 nfs4_state_shutdown(void)
8311 {
8312 nfsd4_destroy_callback_queue();
8313 rhltable_destroy(&nfs4_file_rhltable);
8314 }
8315
8316 static void
get_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8317 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8318 {
8319 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8320 CURRENT_STATEID(stateid))
8321 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8322 }
8323
8324 static void
put_stateid(struct nfsd4_compound_state * cstate,stateid_t * stateid)8325 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8326 {
8327 if (cstate->minorversion) {
8328 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8329 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8330 }
8331 }
8332
8333 void
clear_current_stateid(struct nfsd4_compound_state * cstate)8334 clear_current_stateid(struct nfsd4_compound_state *cstate)
8335 {
8336 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8337 }
8338
8339 /*
8340 * functions to set current state id
8341 */
8342 void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8343 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8344 union nfsd4_op_u *u)
8345 {
8346 put_stateid(cstate, &u->open_downgrade.od_stateid);
8347 }
8348
8349 void
nfsd4_set_openstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8350 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8351 union nfsd4_op_u *u)
8352 {
8353 put_stateid(cstate, &u->open.op_stateid);
8354 }
8355
8356 void
nfsd4_set_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8357 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8358 union nfsd4_op_u *u)
8359 {
8360 put_stateid(cstate, &u->close.cl_stateid);
8361 }
8362
8363 void
nfsd4_set_lockstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8364 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8365 union nfsd4_op_u *u)
8366 {
8367 put_stateid(cstate, &u->lock.lk_resp_stateid);
8368 }
8369
8370 /*
8371 * functions to consume current state id
8372 */
8373
8374 void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8375 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8376 union nfsd4_op_u *u)
8377 {
8378 get_stateid(cstate, &u->open_downgrade.od_stateid);
8379 }
8380
8381 void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8382 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8383 union nfsd4_op_u *u)
8384 {
8385 get_stateid(cstate, &u->delegreturn.dr_stateid);
8386 }
8387
8388 void
nfsd4_get_freestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8389 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8390 union nfsd4_op_u *u)
8391 {
8392 get_stateid(cstate, &u->free_stateid.fr_stateid);
8393 }
8394
8395 void
nfsd4_get_setattrstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8396 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8397 union nfsd4_op_u *u)
8398 {
8399 get_stateid(cstate, &u->setattr.sa_stateid);
8400 }
8401
8402 void
nfsd4_get_closestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8403 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8404 union nfsd4_op_u *u)
8405 {
8406 get_stateid(cstate, &u->close.cl_stateid);
8407 }
8408
8409 void
nfsd4_get_lockustateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8410 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8411 union nfsd4_op_u *u)
8412 {
8413 get_stateid(cstate, &u->locku.lu_stateid);
8414 }
8415
8416 void
nfsd4_get_readstateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8417 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8418 union nfsd4_op_u *u)
8419 {
8420 get_stateid(cstate, &u->read.rd_stateid);
8421 }
8422
8423 void
nfsd4_get_writestateid(struct nfsd4_compound_state * cstate,union nfsd4_op_u * u)8424 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8425 union nfsd4_op_u *u)
8426 {
8427 get_stateid(cstate, &u->write.wr_stateid);
8428 }
8429
8430 /**
8431 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8432 * @rqstp: RPC transaction context
8433 * @inode: file to be checked for a conflict
8434 *
8435 * This function is called when there is a conflict between a write
8436 * delegation and a change/size GETATTR from another client. The server
8437 * must either use the CB_GETATTR to get the current values of the
8438 * attributes from the client that holds the delegation or recall the
8439 * delegation before replying to the GETATTR. See RFC 8881 section
8440 * 18.7.4.
8441 *
8442 * The current implementation does not support CB_GETATTR yet. However
8443 * this can avoid recalling the delegation could be added in follow up
8444 * work.
8445 *
8446 * Returns 0 if there is no conflict; otherwise an nfs_stat
8447 * code is returned.
8448 */
8449 __be32
nfsd4_deleg_getattr_conflict(struct svc_rqst * rqstp,struct inode * inode)8450 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
8451 {
8452 __be32 status;
8453 struct file_lock_context *ctx;
8454 struct file_lock *fl;
8455 struct nfs4_delegation *dp;
8456
8457 ctx = locks_inode_context(inode);
8458 if (!ctx)
8459 return 0;
8460 spin_lock(&ctx->flc_lock);
8461 list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
8462 if (fl->fl_flags == FL_LAYOUT)
8463 continue;
8464 if (fl->fl_lmops != &nfsd_lease_mng_ops) {
8465 /*
8466 * non-nfs lease, if it's a lease with F_RDLCK then
8467 * we are done; there isn't any write delegation
8468 * on this inode
8469 */
8470 if (fl->fl_type == F_RDLCK)
8471 break;
8472 goto break_lease;
8473 }
8474 if (fl->fl_type == F_WRLCK) {
8475 dp = fl->fl_owner;
8476 if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
8477 spin_unlock(&ctx->flc_lock);
8478 return 0;
8479 }
8480 break_lease:
8481 spin_unlock(&ctx->flc_lock);
8482 nfsd_stats_wdeleg_getattr_inc();
8483 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8484 if (status != nfserr_jukebox ||
8485 !nfsd_wait_for_delegreturn(rqstp, inode))
8486 return status;
8487 return 0;
8488 }
8489 break;
8490 }
8491 spin_unlock(&ctx->flc_lock);
8492 return 0;
8493 }
8494