1 /* 2 * Copyright (c) 2001 The Regents of the University of Michigan. 3 * All rights reserved. 4 * 5 * Kendrick Smith <kmsmith@umich.edu> 6 * Andy Adamson <kandros@umich.edu> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 #include <linux/file.h> 36 #include <linux/fs.h> 37 #include <linux/slab.h> 38 #include <linux/namei.h> 39 #include <linux/swap.h> 40 #include <linux/pagemap.h> 41 #include <linux/ratelimit.h> 42 #include <linux/sunrpc/svcauth_gss.h> 43 #include <linux/sunrpc/addr.h> 44 #include <linux/jhash.h> 45 #include <linux/string_helpers.h> 46 #include <linux/fsnotify.h> 47 #include <linux/rhashtable.h> 48 #include <linux/nfs_ssc.h> 49 50 #include "xdr4.h" 51 #include "xdr4cb.h" 52 #include "vfs.h" 53 #include "current_stateid.h" 54 55 #include "netns.h" 56 #include "pnfs.h" 57 #include "filecache.h" 58 #include "trace.h" 59 60 #define NFSDDBG_FACILITY NFSDDBG_PROC 61 62 #define all_ones {{ ~0, ~0}, ~0} 63 static const stateid_t one_stateid = { 64 .si_generation = ~0, 65 .si_opaque = all_ones, 66 }; 67 static const stateid_t zero_stateid = { 68 /* all fields zero */ 69 }; 70 static const stateid_t currentstateid = { 71 .si_generation = 1, 72 }; 73 static const stateid_t close_stateid = { 74 .si_generation = 0xffffffffU, 75 }; 76 77 static u64 current_sessionid = 1; 78 79 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t))) 80 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t))) 81 #define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t))) 82 #define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t))) 83 84 /* forward declarations */ 85 static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner); 86 static void nfs4_free_ol_stateid(struct nfs4_stid *stid); 87 void nfsd4_end_grace(struct nfsd_net *nn); 88 static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps); 89 static void nfsd4_file_hash_remove(struct nfs4_file *fi); 90 static void deleg_reaper(struct nfsd_net *nn); 91 92 /* Locking: */ 93 94 /* 95 * Currently used for the del_recall_lru and file hash table. In an 96 * effort to decrease the scope of the client_mutex, this spinlock may 97 * eventually cover more: 98 */ 99 static DEFINE_SPINLOCK(state_lock); 100 101 enum nfsd4_st_mutex_lock_subclass { 102 OPEN_STATEID_MUTEX = 0, 103 LOCK_STATEID_MUTEX = 1, 104 }; 105 106 /* 107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for 108 * the refcount on the open stateid to drop. 109 */ 110 static DECLARE_WAIT_QUEUE_HEAD(close_wq); 111 112 /* 113 * A waitqueue where a writer to clients/#/ctl destroying a client can 114 * wait for cl_rpc_users to drop to 0 and then for the client to be 115 * unhashed. 116 */ 117 static DECLARE_WAIT_QUEUE_HEAD(expiry_wq); 118 119 static struct kmem_cache *client_slab; 120 static struct kmem_cache *openowner_slab; 121 static struct kmem_cache *lockowner_slab; 122 static struct kmem_cache *file_slab; 123 static struct kmem_cache *stateid_slab; 124 static struct kmem_cache *deleg_slab; 125 static struct kmem_cache *odstate_slab; 126 127 static void free_session(struct nfsd4_session *); 128 129 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops; 130 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops; 131 static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops; 132 133 static struct workqueue_struct *laundry_wq; 134 135 int nfsd4_create_laundry_wq(void) 136 { 137 int rc = 0; 138 139 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4"); 140 if (laundry_wq == NULL) 141 rc = -ENOMEM; 142 return rc; 143 } 144 145 void nfsd4_destroy_laundry_wq(void) 146 { 147 destroy_workqueue(laundry_wq); 148 } 149 150 static bool is_session_dead(struct nfsd4_session *ses) 151 { 152 return ses->se_dead; 153 } 154 155 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me) 156 { 157 if (atomic_read(&ses->se_ref) > ref_held_by_me) 158 return nfserr_jukebox; 159 ses->se_dead = true; 160 return nfs_ok; 161 } 162 163 static bool is_client_expired(struct nfs4_client *clp) 164 { 165 return clp->cl_time == 0; 166 } 167 168 static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn, 169 struct nfs4_client *clp) 170 { 171 if (clp->cl_state != NFSD4_ACTIVE) 172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0); 173 } 174 175 static __be32 get_client_locked(struct nfs4_client *clp) 176 { 177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 178 179 lockdep_assert_held(&nn->client_lock); 180 181 if (is_client_expired(clp)) 182 return nfserr_expired; 183 atomic_inc(&clp->cl_rpc_users); 184 nfsd4_dec_courtesy_client_count(nn, clp); 185 clp->cl_state = NFSD4_ACTIVE; 186 return nfs_ok; 187 } 188 189 /* must be called under the client_lock */ 190 static inline void 191 renew_client_locked(struct nfs4_client *clp) 192 { 193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 194 195 if (is_client_expired(clp)) { 196 WARN_ON(1); 197 printk("%s: client (clientid %08x/%08x) already expired\n", 198 __func__, 199 clp->cl_clientid.cl_boot, 200 clp->cl_clientid.cl_id); 201 return; 202 } 203 204 list_move_tail(&clp->cl_lru, &nn->client_lru); 205 clp->cl_time = ktime_get_boottime_seconds(); 206 nfsd4_dec_courtesy_client_count(nn, clp); 207 clp->cl_state = NFSD4_ACTIVE; 208 } 209 210 static void put_client_renew_locked(struct nfs4_client *clp) 211 { 212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 213 214 lockdep_assert_held(&nn->client_lock); 215 216 if (!atomic_dec_and_test(&clp->cl_rpc_users)) 217 return; 218 if (!is_client_expired(clp)) 219 renew_client_locked(clp); 220 else 221 wake_up_all(&expiry_wq); 222 } 223 224 static void put_client_renew(struct nfs4_client *clp) 225 { 226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 227 228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock)) 229 return; 230 if (!is_client_expired(clp)) 231 renew_client_locked(clp); 232 else 233 wake_up_all(&expiry_wq); 234 spin_unlock(&nn->client_lock); 235 } 236 237 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) 238 { 239 __be32 status; 240 241 if (is_session_dead(ses)) 242 return nfserr_badsession; 243 status = get_client_locked(ses->se_client); 244 if (status) 245 return status; 246 atomic_inc(&ses->se_ref); 247 return nfs_ok; 248 } 249 250 static void nfsd4_put_session_locked(struct nfsd4_session *ses) 251 { 252 struct nfs4_client *clp = ses->se_client; 253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 254 255 lockdep_assert_held(&nn->client_lock); 256 257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses)) 258 free_session(ses); 259 put_client_renew_locked(clp); 260 } 261 262 static void nfsd4_put_session(struct nfsd4_session *ses) 263 { 264 struct nfs4_client *clp = ses->se_client; 265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 266 267 spin_lock(&nn->client_lock); 268 nfsd4_put_session_locked(ses); 269 spin_unlock(&nn->client_lock); 270 } 271 272 static struct nfsd4_blocked_lock * 273 find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 274 struct nfsd_net *nn) 275 { 276 struct nfsd4_blocked_lock *cur, *found = NULL; 277 278 spin_lock(&nn->blocked_locks_lock); 279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { 280 if (fh_match(fh, &cur->nbl_fh)) { 281 list_del_init(&cur->nbl_list); 282 WARN_ON(list_empty(&cur->nbl_lru)); 283 list_del_init(&cur->nbl_lru); 284 found = cur; 285 break; 286 } 287 } 288 spin_unlock(&nn->blocked_locks_lock); 289 if (found) 290 locks_delete_block(&found->nbl_lock); 291 return found; 292 } 293 294 static struct nfsd4_blocked_lock * 295 find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, 296 struct nfsd_net *nn) 297 { 298 struct nfsd4_blocked_lock *nbl; 299 300 nbl = find_blocked_lock(lo, fh, nn); 301 if (!nbl) { 302 nbl = kmalloc(sizeof(*nbl), GFP_KERNEL); 303 if (nbl) { 304 INIT_LIST_HEAD(&nbl->nbl_list); 305 INIT_LIST_HEAD(&nbl->nbl_lru); 306 fh_copy_shallow(&nbl->nbl_fh, fh); 307 locks_init_lock(&nbl->nbl_lock); 308 kref_init(&nbl->nbl_kref); 309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, 310 &nfsd4_cb_notify_lock_ops, 311 NFSPROC4_CLNT_CB_NOTIFY_LOCK); 312 } 313 } 314 return nbl; 315 } 316 317 static void 318 free_nbl(struct kref *kref) 319 { 320 struct nfsd4_blocked_lock *nbl; 321 322 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref); 323 locks_release_private(&nbl->nbl_lock); 324 kfree(nbl); 325 } 326 327 static void 328 free_blocked_lock(struct nfsd4_blocked_lock *nbl) 329 { 330 locks_delete_block(&nbl->nbl_lock); 331 kref_put(&nbl->nbl_kref, free_nbl); 332 } 333 334 static void 335 remove_blocked_locks(struct nfs4_lockowner *lo) 336 { 337 struct nfs4_client *clp = lo->lo_owner.so_client; 338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 339 struct nfsd4_blocked_lock *nbl; 340 LIST_HEAD(reaplist); 341 342 /* Dequeue all blocked locks */ 343 spin_lock(&nn->blocked_locks_lock); 344 while (!list_empty(&lo->lo_blocked)) { 345 nbl = list_first_entry(&lo->lo_blocked, 346 struct nfsd4_blocked_lock, 347 nbl_list); 348 list_del_init(&nbl->nbl_list); 349 WARN_ON(list_empty(&nbl->nbl_lru)); 350 list_move(&nbl->nbl_lru, &reaplist); 351 } 352 spin_unlock(&nn->blocked_locks_lock); 353 354 /* Now free them */ 355 while (!list_empty(&reaplist)) { 356 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock, 357 nbl_lru); 358 list_del_init(&nbl->nbl_lru); 359 free_blocked_lock(nbl); 360 } 361 } 362 363 static void 364 nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb) 365 { 366 struct nfsd4_blocked_lock *nbl = container_of(cb, 367 struct nfsd4_blocked_lock, nbl_cb); 368 locks_delete_block(&nbl->nbl_lock); 369 } 370 371 static int 372 nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task) 373 { 374 trace_nfsd_cb_notify_lock_done(&zero_stateid, task); 375 376 /* 377 * Since this is just an optimization, we don't try very hard if it 378 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and 379 * just quit trying on anything else. 380 */ 381 switch (task->tk_status) { 382 case -NFS4ERR_DELAY: 383 rpc_delay(task, 1 * HZ); 384 return 0; 385 default: 386 return 1; 387 } 388 } 389 390 static void 391 nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb) 392 { 393 struct nfsd4_blocked_lock *nbl = container_of(cb, 394 struct nfsd4_blocked_lock, nbl_cb); 395 396 free_blocked_lock(nbl); 397 } 398 399 static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = { 400 .prepare = nfsd4_cb_notify_lock_prepare, 401 .done = nfsd4_cb_notify_lock_done, 402 .release = nfsd4_cb_notify_lock_release, 403 .opcode = OP_CB_NOTIFY_LOCK, 404 }; 405 406 /* 407 * We store the NONE, READ, WRITE, and BOTH bits separately in the 408 * st_{access,deny}_bmap field of the stateid, in order to track not 409 * only what share bits are currently in force, but also what 410 * combinations of share bits previous opens have used. This allows us 411 * to enforce the recommendation in 412 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that 413 * the server return an error if the client attempt to downgrade to a 414 * combination of share bits not explicable by closing some of its 415 * previous opens. 416 * 417 * This enforcement is arguably incomplete, since we don't keep 418 * track of access/deny bit combinations; so, e.g., we allow: 419 * 420 * OPEN allow read, deny write 421 * OPEN allow both, deny none 422 * DOWNGRADE allow read, deny none 423 * 424 * which we should reject. 425 * 426 * But you could also argue that our current code is already overkill, 427 * since it only exists to return NFS4ERR_INVAL on incorrect client 428 * behavior. 429 */ 430 static unsigned int 431 bmap_to_share_mode(unsigned long bmap) 432 { 433 int i; 434 unsigned int access = 0; 435 436 for (i = 1; i < 4; i++) { 437 if (test_bit(i, &bmap)) 438 access |= i; 439 } 440 return access; 441 } 442 443 /* set share access for a given stateid */ 444 static inline void 445 set_access(u32 access, struct nfs4_ol_stateid *stp) 446 { 447 unsigned char mask = 1 << access; 448 449 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 450 stp->st_access_bmap |= mask; 451 } 452 453 /* clear share access for a given stateid */ 454 static inline void 455 clear_access(u32 access, struct nfs4_ol_stateid *stp) 456 { 457 unsigned char mask = 1 << access; 458 459 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH); 460 stp->st_access_bmap &= ~mask; 461 } 462 463 /* test whether a given stateid has access */ 464 static inline bool 465 test_access(u32 access, struct nfs4_ol_stateid *stp) 466 { 467 unsigned char mask = 1 << access; 468 469 return (bool)(stp->st_access_bmap & mask); 470 } 471 472 /* set share deny for a given stateid */ 473 static inline void 474 set_deny(u32 deny, struct nfs4_ol_stateid *stp) 475 { 476 unsigned char mask = 1 << deny; 477 478 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 479 stp->st_deny_bmap |= mask; 480 } 481 482 /* clear share deny for a given stateid */ 483 static inline void 484 clear_deny(u32 deny, struct nfs4_ol_stateid *stp) 485 { 486 unsigned char mask = 1 << deny; 487 488 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH); 489 stp->st_deny_bmap &= ~mask; 490 } 491 492 /* test whether a given stateid is denying specific access */ 493 static inline bool 494 test_deny(u32 deny, struct nfs4_ol_stateid *stp) 495 { 496 unsigned char mask = 1 << deny; 497 498 return (bool)(stp->st_deny_bmap & mask); 499 } 500 501 static int nfs4_access_to_omode(u32 access) 502 { 503 switch (access & NFS4_SHARE_ACCESS_BOTH) { 504 case NFS4_SHARE_ACCESS_READ: 505 return O_RDONLY; 506 case NFS4_SHARE_ACCESS_WRITE: 507 return O_WRONLY; 508 case NFS4_SHARE_ACCESS_BOTH: 509 return O_RDWR; 510 } 511 WARN_ON_ONCE(1); 512 return O_RDONLY; 513 } 514 515 static inline int 516 access_permit_read(struct nfs4_ol_stateid *stp) 517 { 518 return test_access(NFS4_SHARE_ACCESS_READ, stp) || 519 test_access(NFS4_SHARE_ACCESS_BOTH, stp) || 520 test_access(NFS4_SHARE_ACCESS_WRITE, stp); 521 } 522 523 static inline int 524 access_permit_write(struct nfs4_ol_stateid *stp) 525 { 526 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) || 527 test_access(NFS4_SHARE_ACCESS_BOTH, stp); 528 } 529 530 static inline struct nfs4_stateowner * 531 nfs4_get_stateowner(struct nfs4_stateowner *sop) 532 { 533 atomic_inc(&sop->so_count); 534 return sop; 535 } 536 537 static int 538 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner) 539 { 540 return (sop->so_owner.len == owner->len) && 541 0 == memcmp(sop->so_owner.data, owner->data, owner->len); 542 } 543 544 static struct nfs4_openowner * 545 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open, 546 struct nfs4_client *clp) 547 { 548 struct nfs4_stateowner *so; 549 550 lockdep_assert_held(&clp->cl_lock); 551 552 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval], 553 so_strhash) { 554 if (!so->so_is_open_owner) 555 continue; 556 if (same_owner_str(so, &open->op_owner)) 557 return openowner(nfs4_get_stateowner(so)); 558 } 559 return NULL; 560 } 561 562 static inline u32 563 opaque_hashval(const void *ptr, int nbytes) 564 { 565 unsigned char *cptr = (unsigned char *) ptr; 566 567 u32 x = 0; 568 while (nbytes--) { 569 x *= 37; 570 x += *cptr++; 571 } 572 return x; 573 } 574 575 void 576 put_nfs4_file(struct nfs4_file *fi) 577 { 578 if (refcount_dec_and_test(&fi->fi_ref)) { 579 nfsd4_file_hash_remove(fi); 580 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); 581 WARN_ON_ONCE(!list_empty(&fi->fi_delegations)); 582 kfree_rcu(fi, fi_rcu); 583 } 584 } 585 586 static struct nfsd_file * 587 find_writeable_file_locked(struct nfs4_file *f) 588 { 589 struct nfsd_file *ret; 590 591 lockdep_assert_held(&f->fi_lock); 592 593 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); 594 if (!ret) 595 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 596 return ret; 597 } 598 599 static struct nfsd_file * 600 find_writeable_file(struct nfs4_file *f) 601 { 602 struct nfsd_file *ret; 603 604 spin_lock(&f->fi_lock); 605 ret = find_writeable_file_locked(f); 606 spin_unlock(&f->fi_lock); 607 608 return ret; 609 } 610 611 static struct nfsd_file * 612 find_readable_file_locked(struct nfs4_file *f) 613 { 614 struct nfsd_file *ret; 615 616 lockdep_assert_held(&f->fi_lock); 617 618 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); 619 if (!ret) 620 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 621 return ret; 622 } 623 624 static struct nfsd_file * 625 find_readable_file(struct nfs4_file *f) 626 { 627 struct nfsd_file *ret; 628 629 spin_lock(&f->fi_lock); 630 ret = find_readable_file_locked(f); 631 spin_unlock(&f->fi_lock); 632 633 return ret; 634 } 635 636 static struct nfsd_file * 637 find_rw_file(struct nfs4_file *f) 638 { 639 struct nfsd_file *ret; 640 641 spin_lock(&f->fi_lock); 642 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 643 spin_unlock(&f->fi_lock); 644 645 return ret; 646 } 647 648 struct nfsd_file * 649 find_any_file(struct nfs4_file *f) 650 { 651 struct nfsd_file *ret; 652 653 if (!f) 654 return NULL; 655 spin_lock(&f->fi_lock); 656 ret = nfsd_file_get(f->fi_fds[O_RDWR]); 657 if (!ret) { 658 ret = nfsd_file_get(f->fi_fds[O_WRONLY]); 659 if (!ret) 660 ret = nfsd_file_get(f->fi_fds[O_RDONLY]); 661 } 662 spin_unlock(&f->fi_lock); 663 return ret; 664 } 665 666 static struct nfsd_file *find_any_file_locked(struct nfs4_file *f) 667 { 668 lockdep_assert_held(&f->fi_lock); 669 670 if (f->fi_fds[O_RDWR]) 671 return f->fi_fds[O_RDWR]; 672 if (f->fi_fds[O_WRONLY]) 673 return f->fi_fds[O_WRONLY]; 674 if (f->fi_fds[O_RDONLY]) 675 return f->fi_fds[O_RDONLY]; 676 return NULL; 677 } 678 679 static atomic_long_t num_delegations; 680 unsigned long max_delegations; 681 682 /* 683 * Open owner state (share locks) 684 */ 685 686 /* hash tables for lock and open owners */ 687 #define OWNER_HASH_BITS 8 688 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS) 689 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1) 690 691 static unsigned int ownerstr_hashval(struct xdr_netobj *ownername) 692 { 693 unsigned int ret; 694 695 ret = opaque_hashval(ownername->data, ownername->len); 696 return ret & OWNER_HASH_MASK; 697 } 698 699 static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp; 700 701 static const struct rhashtable_params nfs4_file_rhash_params = { 702 .key_len = sizeof_field(struct nfs4_file, fi_inode), 703 .key_offset = offsetof(struct nfs4_file, fi_inode), 704 .head_offset = offsetof(struct nfs4_file, fi_rlist), 705 706 /* 707 * Start with a single page hash table to reduce resizing churn 708 * on light workloads. 709 */ 710 .min_size = 256, 711 .automatic_shrinking = true, 712 }; 713 714 /* 715 * Check if courtesy clients have conflicting access and resolve it if possible 716 * 717 * access: is op_share_access if share_access is true. 718 * Check if access mode, op_share_access, would conflict with 719 * the current deny mode of the file 'fp'. 720 * access: is op_share_deny if share_access is false. 721 * Check if the deny mode, op_share_deny, would conflict with 722 * current access of the file 'fp'. 723 * stp: skip checking this entry. 724 * new_stp: normal open, not open upgrade. 725 * 726 * Function returns: 727 * false - access/deny mode conflict with normal client. 728 * true - no conflict or conflict with courtesy client(s) is resolved. 729 */ 730 static bool 731 nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp, 732 struct nfs4_ol_stateid *stp, u32 access, bool share_access) 733 { 734 struct nfs4_ol_stateid *st; 735 bool resolvable = true; 736 unsigned char bmap; 737 struct nfsd_net *nn; 738 struct nfs4_client *clp; 739 740 lockdep_assert_held(&fp->fi_lock); 741 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 742 /* ignore lock stateid */ 743 if (st->st_openstp) 744 continue; 745 if (st == stp && new_stp) 746 continue; 747 /* check file access against deny mode or vice versa */ 748 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap; 749 if (!(access & bmap_to_share_mode(bmap))) 750 continue; 751 clp = st->st_stid.sc_client; 752 if (try_to_expire_client(clp)) 753 continue; 754 resolvable = false; 755 break; 756 } 757 if (resolvable) { 758 clp = stp->st_stid.sc_client; 759 nn = net_generic(clp->net, nfsd_net_id); 760 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 761 } 762 return resolvable; 763 } 764 765 static void 766 __nfs4_file_get_access(struct nfs4_file *fp, u32 access) 767 { 768 lockdep_assert_held(&fp->fi_lock); 769 770 if (access & NFS4_SHARE_ACCESS_WRITE) 771 atomic_inc(&fp->fi_access[O_WRONLY]); 772 if (access & NFS4_SHARE_ACCESS_READ) 773 atomic_inc(&fp->fi_access[O_RDONLY]); 774 } 775 776 static __be32 777 nfs4_file_get_access(struct nfs4_file *fp, u32 access) 778 { 779 lockdep_assert_held(&fp->fi_lock); 780 781 /* Does this access mode make sense? */ 782 if (access & ~NFS4_SHARE_ACCESS_BOTH) 783 return nfserr_inval; 784 785 /* Does it conflict with a deny mode already set? */ 786 if ((access & fp->fi_share_deny) != 0) 787 return nfserr_share_denied; 788 789 __nfs4_file_get_access(fp, access); 790 return nfs_ok; 791 } 792 793 static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny) 794 { 795 /* Common case is that there is no deny mode. */ 796 if (deny) { 797 /* Does this deny mode make sense? */ 798 if (deny & ~NFS4_SHARE_DENY_BOTH) 799 return nfserr_inval; 800 801 if ((deny & NFS4_SHARE_DENY_READ) && 802 atomic_read(&fp->fi_access[O_RDONLY])) 803 return nfserr_share_denied; 804 805 if ((deny & NFS4_SHARE_DENY_WRITE) && 806 atomic_read(&fp->fi_access[O_WRONLY])) 807 return nfserr_share_denied; 808 } 809 return nfs_ok; 810 } 811 812 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) 813 { 814 might_lock(&fp->fi_lock); 815 816 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) { 817 struct nfsd_file *f1 = NULL; 818 struct nfsd_file *f2 = NULL; 819 820 swap(f1, fp->fi_fds[oflag]); 821 if (atomic_read(&fp->fi_access[1 - oflag]) == 0) 822 swap(f2, fp->fi_fds[O_RDWR]); 823 spin_unlock(&fp->fi_lock); 824 if (f1) 825 nfsd_file_put(f1); 826 if (f2) 827 nfsd_file_put(f2); 828 } 829 } 830 831 static void nfs4_file_put_access(struct nfs4_file *fp, u32 access) 832 { 833 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH); 834 835 if (access & NFS4_SHARE_ACCESS_WRITE) 836 __nfs4_file_put_access(fp, O_WRONLY); 837 if (access & NFS4_SHARE_ACCESS_READ) 838 __nfs4_file_put_access(fp, O_RDONLY); 839 } 840 841 /* 842 * Allocate a new open/delegation state counter. This is needed for 843 * pNFS for proper return on close semantics. 844 * 845 * Note that we only allocate it for pNFS-enabled exports, otherwise 846 * all pointers to struct nfs4_clnt_odstate are always NULL. 847 */ 848 static struct nfs4_clnt_odstate * 849 alloc_clnt_odstate(struct nfs4_client *clp) 850 { 851 struct nfs4_clnt_odstate *co; 852 853 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); 854 if (co) { 855 co->co_client = clp; 856 refcount_set(&co->co_odcount, 1); 857 } 858 return co; 859 } 860 861 static void 862 hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co) 863 { 864 struct nfs4_file *fp = co->co_file; 865 866 lockdep_assert_held(&fp->fi_lock); 867 list_add(&co->co_perfile, &fp->fi_clnt_odstate); 868 } 869 870 static inline void 871 get_clnt_odstate(struct nfs4_clnt_odstate *co) 872 { 873 if (co) 874 refcount_inc(&co->co_odcount); 875 } 876 877 static void 878 put_clnt_odstate(struct nfs4_clnt_odstate *co) 879 { 880 struct nfs4_file *fp; 881 882 if (!co) 883 return; 884 885 fp = co->co_file; 886 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { 887 list_del(&co->co_perfile); 888 spin_unlock(&fp->fi_lock); 889 890 nfsd4_return_all_file_layouts(co->co_client, fp); 891 kmem_cache_free(odstate_slab, co); 892 } 893 } 894 895 static struct nfs4_clnt_odstate * 896 find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new) 897 { 898 struct nfs4_clnt_odstate *co; 899 struct nfs4_client *cl; 900 901 if (!new) 902 return NULL; 903 904 cl = new->co_client; 905 906 spin_lock(&fp->fi_lock); 907 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { 908 if (co->co_client == cl) { 909 get_clnt_odstate(co); 910 goto out; 911 } 912 } 913 co = new; 914 co->co_file = fp; 915 hash_clnt_odstate_locked(new); 916 out: 917 spin_unlock(&fp->fi_lock); 918 return co; 919 } 920 921 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab, 922 void (*sc_free)(struct nfs4_stid *)) 923 { 924 struct nfs4_stid *stid; 925 int new_id; 926 927 stid = kmem_cache_zalloc(slab, GFP_KERNEL); 928 if (!stid) 929 return NULL; 930 931 idr_preload(GFP_KERNEL); 932 spin_lock(&cl->cl_lock); 933 /* Reserving 0 for start of file in nfsdfs "states" file: */ 934 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT); 935 spin_unlock(&cl->cl_lock); 936 idr_preload_end(); 937 if (new_id < 0) 938 goto out_free; 939 940 stid->sc_free = sc_free; 941 stid->sc_client = cl; 942 stid->sc_stateid.si_opaque.so_id = new_id; 943 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; 944 /* Will be incremented before return to client: */ 945 refcount_set(&stid->sc_count, 1); 946 spin_lock_init(&stid->sc_lock); 947 INIT_LIST_HEAD(&stid->sc_cp_list); 948 949 return stid; 950 out_free: 951 kmem_cache_free(slab, stid); 952 return NULL; 953 } 954 955 /* 956 * Create a unique stateid_t to represent each COPY. 957 */ 958 static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid, 959 unsigned char cs_type) 960 { 961 int new_id; 962 963 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time; 964 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id; 965 966 idr_preload(GFP_KERNEL); 967 spin_lock(&nn->s2s_cp_lock); 968 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT); 969 stid->cs_stid.si_opaque.so_id = new_id; 970 stid->cs_stid.si_generation = 1; 971 spin_unlock(&nn->s2s_cp_lock); 972 idr_preload_end(); 973 if (new_id < 0) 974 return 0; 975 stid->cs_type = cs_type; 976 return 1; 977 } 978 979 int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy) 980 { 981 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID); 982 } 983 984 struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn, 985 struct nfs4_stid *p_stid) 986 { 987 struct nfs4_cpntf_state *cps; 988 989 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL); 990 if (!cps) 991 return NULL; 992 cps->cpntf_time = ktime_get_boottime_seconds(); 993 refcount_set(&cps->cp_stateid.cs_count, 1); 994 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID)) 995 goto out_free; 996 spin_lock(&nn->s2s_cp_lock); 997 list_add(&cps->cp_list, &p_stid->sc_cp_list); 998 spin_unlock(&nn->s2s_cp_lock); 999 return cps; 1000 out_free: 1001 kfree(cps); 1002 return NULL; 1003 } 1004 1005 void nfs4_free_copy_state(struct nfsd4_copy *copy) 1006 { 1007 struct nfsd_net *nn; 1008 1009 if (copy->cp_stateid.cs_type != NFS4_COPY_STID) 1010 return; 1011 nn = net_generic(copy->cp_clp->net, nfsd_net_id); 1012 spin_lock(&nn->s2s_cp_lock); 1013 idr_remove(&nn->s2s_cp_stateids, 1014 copy->cp_stateid.cs_stid.si_opaque.so_id); 1015 spin_unlock(&nn->s2s_cp_lock); 1016 } 1017 1018 static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid) 1019 { 1020 struct nfs4_cpntf_state *cps; 1021 struct nfsd_net *nn; 1022 1023 nn = net_generic(net, nfsd_net_id); 1024 spin_lock(&nn->s2s_cp_lock); 1025 while (!list_empty(&stid->sc_cp_list)) { 1026 cps = list_first_entry(&stid->sc_cp_list, 1027 struct nfs4_cpntf_state, cp_list); 1028 _free_cpntf_state_locked(nn, cps); 1029 } 1030 spin_unlock(&nn->s2s_cp_lock); 1031 } 1032 1033 static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp) 1034 { 1035 struct nfs4_stid *stid; 1036 1037 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid); 1038 if (!stid) 1039 return NULL; 1040 1041 return openlockstateid(stid); 1042 } 1043 1044 /* 1045 * As the sc_free callback of deleg, this may be called by nfs4_put_stid 1046 * in nfsd_break_one_deleg. 1047 * Considering nfsd_break_one_deleg is called with the flc->flc_lock held, 1048 * this function mustn't ever sleep. 1049 */ 1050 static void nfs4_free_deleg(struct nfs4_stid *stid) 1051 { 1052 struct nfs4_delegation *dp = delegstateid(stid); 1053 1054 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list)); 1055 WARN_ON_ONCE(!list_empty(&dp->dl_perfile)); 1056 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt)); 1057 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru)); 1058 kmem_cache_free(deleg_slab, stid); 1059 atomic_long_dec(&num_delegations); 1060 } 1061 1062 /* 1063 * When we recall a delegation, we should be careful not to hand it 1064 * out again straight away. 1065 * To ensure this we keep a pair of bloom filters ('new' and 'old') 1066 * in which the filehandles of recalled delegations are "stored". 1067 * If a filehandle appear in either filter, a delegation is blocked. 1068 * When a delegation is recalled, the filehandle is stored in the "new" 1069 * filter. 1070 * Every 30 seconds we swap the filters and clear the "new" one, 1071 * unless both are empty of course. This results in delegations for a 1072 * given filehandle being blocked for between 30 and 60 seconds. 1073 * 1074 * Each filter is 256 bits. We hash the filehandle to 32bit and use the 1075 * low 3 bytes as hash-table indices. 1076 * 1077 * 'blocked_delegations_lock', which is always taken in block_delegations(), 1078 * is used to manage concurrent access. Testing does not need the lock 1079 * except when swapping the two filters. 1080 */ 1081 static DEFINE_SPINLOCK(blocked_delegations_lock); 1082 static struct bloom_pair { 1083 int entries, old_entries; 1084 time64_t swap_time; 1085 int new; /* index into 'set' */ 1086 DECLARE_BITMAP(set[2], 256); 1087 } blocked_delegations; 1088 1089 static int delegation_blocked(struct knfsd_fh *fh) 1090 { 1091 u32 hash; 1092 struct bloom_pair *bd = &blocked_delegations; 1093 1094 if (bd->entries == 0) 1095 return 0; 1096 if (ktime_get_seconds() - bd->swap_time > 30) { 1097 spin_lock(&blocked_delegations_lock); 1098 if (ktime_get_seconds() - bd->swap_time > 30) { 1099 bd->entries -= bd->old_entries; 1100 bd->old_entries = bd->entries; 1101 bd->new = 1-bd->new; 1102 memset(bd->set[bd->new], 0, 1103 sizeof(bd->set[0])); 1104 bd->swap_time = ktime_get_seconds(); 1105 } 1106 spin_unlock(&blocked_delegations_lock); 1107 } 1108 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1109 if (test_bit(hash&255, bd->set[0]) && 1110 test_bit((hash>>8)&255, bd->set[0]) && 1111 test_bit((hash>>16)&255, bd->set[0])) 1112 return 1; 1113 1114 if (test_bit(hash&255, bd->set[1]) && 1115 test_bit((hash>>8)&255, bd->set[1]) && 1116 test_bit((hash>>16)&255, bd->set[1])) 1117 return 1; 1118 1119 return 0; 1120 } 1121 1122 static void block_delegations(struct knfsd_fh *fh) 1123 { 1124 u32 hash; 1125 struct bloom_pair *bd = &blocked_delegations; 1126 1127 hash = jhash(&fh->fh_raw, fh->fh_size, 0); 1128 1129 spin_lock(&blocked_delegations_lock); 1130 __set_bit(hash&255, bd->set[bd->new]); 1131 __set_bit((hash>>8)&255, bd->set[bd->new]); 1132 __set_bit((hash>>16)&255, bd->set[bd->new]); 1133 if (bd->entries == 0) 1134 bd->swap_time = ktime_get_seconds(); 1135 bd->entries += 1; 1136 spin_unlock(&blocked_delegations_lock); 1137 } 1138 1139 static struct nfs4_delegation * 1140 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp, 1141 struct nfs4_clnt_odstate *odstate, u32 dl_type) 1142 { 1143 struct nfs4_delegation *dp; 1144 struct nfs4_stid *stid; 1145 long n; 1146 1147 dprintk("NFSD alloc_init_deleg\n"); 1148 n = atomic_long_inc_return(&num_delegations); 1149 if (n < 0 || n > max_delegations) 1150 goto out_dec; 1151 if (delegation_blocked(&fp->fi_fhandle)) 1152 goto out_dec; 1153 stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg); 1154 if (stid == NULL) 1155 goto out_dec; 1156 dp = delegstateid(stid); 1157 1158 /* 1159 * delegation seqid's are never incremented. The 4.1 special 1160 * meaning of seqid 0 isn't meaningful, really, but let's avoid 1161 * 0 anyway just for consistency and use 1: 1162 */ 1163 dp->dl_stid.sc_stateid.si_generation = 1; 1164 INIT_LIST_HEAD(&dp->dl_perfile); 1165 INIT_LIST_HEAD(&dp->dl_perclnt); 1166 INIT_LIST_HEAD(&dp->dl_recall_lru); 1167 dp->dl_clnt_odstate = odstate; 1168 get_clnt_odstate(odstate); 1169 dp->dl_type = dl_type; 1170 dp->dl_retries = 1; 1171 dp->dl_recalled = false; 1172 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client, 1173 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL); 1174 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client, 1175 &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR); 1176 dp->dl_cb_fattr.ncf_file_modified = false; 1177 get_nfs4_file(fp); 1178 dp->dl_stid.sc_file = fp; 1179 return dp; 1180 out_dec: 1181 atomic_long_dec(&num_delegations); 1182 return NULL; 1183 } 1184 1185 void 1186 nfs4_put_stid(struct nfs4_stid *s) 1187 { 1188 struct nfs4_file *fp = s->sc_file; 1189 struct nfs4_client *clp = s->sc_client; 1190 1191 might_lock(&clp->cl_lock); 1192 1193 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { 1194 wake_up_all(&close_wq); 1195 return; 1196 } 1197 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1198 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) 1199 atomic_dec(&s->sc_client->cl_admin_revoked); 1200 nfs4_free_cpntf_statelist(clp->net, s); 1201 spin_unlock(&clp->cl_lock); 1202 s->sc_free(s); 1203 if (fp) 1204 put_nfs4_file(fp); 1205 } 1206 1207 void 1208 nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid) 1209 { 1210 stateid_t *src = &stid->sc_stateid; 1211 1212 spin_lock(&stid->sc_lock); 1213 if (unlikely(++src->si_generation == 0)) 1214 src->si_generation = 1; 1215 memcpy(dst, src, sizeof(*dst)); 1216 spin_unlock(&stid->sc_lock); 1217 } 1218 1219 static void put_deleg_file(struct nfs4_file *fp) 1220 { 1221 struct nfsd_file *nf = NULL; 1222 1223 spin_lock(&fp->fi_lock); 1224 if (--fp->fi_delegees == 0) 1225 swap(nf, fp->fi_deleg_file); 1226 spin_unlock(&fp->fi_lock); 1227 1228 if (nf) 1229 nfsd_file_put(nf); 1230 } 1231 1232 static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp) 1233 { 1234 struct nfs4_file *fp = dp->dl_stid.sc_file; 1235 struct nfsd_file *nf = fp->fi_deleg_file; 1236 1237 WARN_ON_ONCE(!fp->fi_delegees); 1238 1239 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp); 1240 put_deleg_file(fp); 1241 } 1242 1243 static void destroy_unhashed_deleg(struct nfs4_delegation *dp) 1244 { 1245 put_clnt_odstate(dp->dl_clnt_odstate); 1246 nfs4_unlock_deleg_lease(dp); 1247 nfs4_put_stid(&dp->dl_stid); 1248 } 1249 1250 /** 1251 * nfs4_delegation_exists - Discover if this delegation already exists 1252 * @clp: a pointer to the nfs4_client we're granting a delegation to 1253 * @fp: a pointer to the nfs4_file we're granting a delegation on 1254 * 1255 * Return: 1256 * On success: true iff an existing delegation is found 1257 */ 1258 1259 static bool 1260 nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp) 1261 { 1262 struct nfs4_delegation *searchdp = NULL; 1263 struct nfs4_client *searchclp = NULL; 1264 1265 lockdep_assert_held(&state_lock); 1266 lockdep_assert_held(&fp->fi_lock); 1267 1268 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) { 1269 searchclp = searchdp->dl_stid.sc_client; 1270 if (clp == searchclp) { 1271 return true; 1272 } 1273 } 1274 return false; 1275 } 1276 1277 /** 1278 * hash_delegation_locked - Add a delegation to the appropriate lists 1279 * @dp: a pointer to the nfs4_delegation we are adding. 1280 * @fp: a pointer to the nfs4_file we're granting a delegation on 1281 * 1282 * Return: 1283 * On success: NULL if the delegation was successfully hashed. 1284 * 1285 * On error: -EAGAIN if one was previously granted to this 1286 * nfs4_client for this nfs4_file. Delegation is not hashed. 1287 * 1288 */ 1289 1290 static int 1291 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) 1292 { 1293 struct nfs4_client *clp = dp->dl_stid.sc_client; 1294 1295 lockdep_assert_held(&state_lock); 1296 lockdep_assert_held(&fp->fi_lock); 1297 lockdep_assert_held(&clp->cl_lock); 1298 1299 if (nfs4_delegation_exists(clp, fp)) 1300 return -EAGAIN; 1301 refcount_inc(&dp->dl_stid.sc_count); 1302 dp->dl_stid.sc_type = SC_TYPE_DELEG; 1303 list_add(&dp->dl_perfile, &fp->fi_delegations); 1304 list_add(&dp->dl_perclnt, &clp->cl_delegations); 1305 return 0; 1306 } 1307 1308 static bool delegation_hashed(struct nfs4_delegation *dp) 1309 { 1310 return !(list_empty(&dp->dl_perfile)); 1311 } 1312 1313 static bool 1314 unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask) 1315 { 1316 struct nfs4_file *fp = dp->dl_stid.sc_file; 1317 1318 lockdep_assert_held(&state_lock); 1319 1320 if (!delegation_hashed(dp)) 1321 return false; 1322 1323 if (statusmask == SC_STATUS_REVOKED && 1324 dp->dl_stid.sc_client->cl_minorversion == 0) 1325 statusmask = SC_STATUS_CLOSED; 1326 dp->dl_stid.sc_status |= statusmask; 1327 if (statusmask & SC_STATUS_ADMIN_REVOKED) 1328 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked); 1329 1330 /* Ensure that deleg break won't try to requeue it */ 1331 ++dp->dl_time; 1332 spin_lock(&fp->fi_lock); 1333 list_del_init(&dp->dl_perclnt); 1334 list_del_init(&dp->dl_recall_lru); 1335 list_del_init(&dp->dl_perfile); 1336 spin_unlock(&fp->fi_lock); 1337 return true; 1338 } 1339 1340 static void destroy_delegation(struct nfs4_delegation *dp) 1341 { 1342 bool unhashed; 1343 1344 spin_lock(&state_lock); 1345 unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED); 1346 spin_unlock(&state_lock); 1347 if (unhashed) 1348 destroy_unhashed_deleg(dp); 1349 } 1350 1351 /** 1352 * revoke_delegation - perform nfs4 delegation structure cleanup 1353 * @dp: pointer to the delegation 1354 * 1355 * This function assumes that it's called either from the administrative 1356 * interface (nfsd4_revoke_states()) that's revoking a specific delegation 1357 * stateid or it's called from a laundromat thread (nfsd4_landromat()) that 1358 * determined that this specific state has expired and needs to be revoked 1359 * (both mark state with the appropriate stid sc_status mode). It is also 1360 * assumed that a reference was taken on the @dp state. 1361 * 1362 * If this function finds that the @dp state is SC_STATUS_FREED it means 1363 * that a FREE_STATEID operation for this stateid has been processed and 1364 * we can proceed to removing it from recalled list. However, if @dp state 1365 * isn't marked SC_STATUS_FREED, it means we need place it on the cl_revoked 1366 * list and wait for the FREE_STATEID to arrive from the client. At the same 1367 * time, we need to mark it as SC_STATUS_FREEABLE to indicate to the 1368 * nfsd4_free_stateid() function that this stateid has already been added 1369 * to the cl_revoked list and that nfsd4_free_stateid() is now responsible 1370 * for removing it from the list. Inspection of where the delegation state 1371 * in the revocation process is protected by the clp->cl_lock. 1372 */ 1373 static void revoke_delegation(struct nfs4_delegation *dp) 1374 { 1375 struct nfs4_client *clp = dp->dl_stid.sc_client; 1376 1377 WARN_ON(!list_empty(&dp->dl_recall_lru)); 1378 WARN_ON_ONCE(dp->dl_stid.sc_client->cl_minorversion > 0 && 1379 !(dp->dl_stid.sc_status & 1380 (SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED))); 1381 1382 trace_nfsd_stid_revoke(&dp->dl_stid); 1383 1384 spin_lock(&clp->cl_lock); 1385 if (dp->dl_stid.sc_status & SC_STATUS_FREED) { 1386 list_del_init(&dp->dl_recall_lru); 1387 goto out; 1388 } 1389 list_add(&dp->dl_recall_lru, &clp->cl_revoked); 1390 dp->dl_stid.sc_status |= SC_STATUS_FREEABLE; 1391 out: 1392 spin_unlock(&clp->cl_lock); 1393 destroy_unhashed_deleg(dp); 1394 } 1395 1396 /* 1397 * SETCLIENTID state 1398 */ 1399 1400 static unsigned int clientid_hashval(u32 id) 1401 { 1402 return id & CLIENT_HASH_MASK; 1403 } 1404 1405 static unsigned int clientstr_hashval(struct xdr_netobj name) 1406 { 1407 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK; 1408 } 1409 1410 /* 1411 * A stateid that had a deny mode associated with it is being released 1412 * or downgraded. Recalculate the deny mode on the file. 1413 */ 1414 static void 1415 recalculate_deny_mode(struct nfs4_file *fp) 1416 { 1417 struct nfs4_ol_stateid *stp; 1418 u32 old_deny; 1419 1420 spin_lock(&fp->fi_lock); 1421 old_deny = fp->fi_share_deny; 1422 fp->fi_share_deny = 0; 1423 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 1424 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap); 1425 if (fp->fi_share_deny == old_deny) 1426 break; 1427 } 1428 spin_unlock(&fp->fi_lock); 1429 } 1430 1431 static void 1432 reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp) 1433 { 1434 int i; 1435 bool change = false; 1436 1437 for (i = 1; i < 4; i++) { 1438 if ((i & deny) != i) { 1439 change = true; 1440 clear_deny(i, stp); 1441 } 1442 } 1443 1444 /* Recalculate per-file deny mode if there was a change */ 1445 if (change) 1446 recalculate_deny_mode(stp->st_stid.sc_file); 1447 } 1448 1449 /* release all access and file references for a given stateid */ 1450 static void 1451 release_all_access(struct nfs4_ol_stateid *stp) 1452 { 1453 int i; 1454 struct nfs4_file *fp = stp->st_stid.sc_file; 1455 1456 if (fp && stp->st_deny_bmap != 0) 1457 recalculate_deny_mode(fp); 1458 1459 for (i = 1; i < 4; i++) { 1460 if (test_access(i, stp)) 1461 nfs4_file_put_access(stp->st_stid.sc_file, i); 1462 clear_access(i, stp); 1463 } 1464 } 1465 1466 static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop) 1467 { 1468 kfree(sop->so_owner.data); 1469 sop->so_ops->so_free(sop); 1470 } 1471 1472 static void nfs4_put_stateowner(struct nfs4_stateowner *sop) 1473 { 1474 struct nfs4_client *clp = sop->so_client; 1475 1476 might_lock(&clp->cl_lock); 1477 1478 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock)) 1479 return; 1480 sop->so_ops->so_unhash(sop); 1481 spin_unlock(&clp->cl_lock); 1482 nfs4_free_stateowner(sop); 1483 } 1484 1485 static bool 1486 nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp) 1487 { 1488 return list_empty(&stp->st_perfile); 1489 } 1490 1491 static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) 1492 { 1493 struct nfs4_file *fp = stp->st_stid.sc_file; 1494 1495 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); 1496 1497 if (list_empty(&stp->st_perfile)) 1498 return false; 1499 1500 spin_lock(&fp->fi_lock); 1501 list_del_init(&stp->st_perfile); 1502 spin_unlock(&fp->fi_lock); 1503 list_del(&stp->st_perstateowner); 1504 return true; 1505 } 1506 1507 static void nfs4_free_ol_stateid(struct nfs4_stid *stid) 1508 { 1509 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1510 1511 put_clnt_odstate(stp->st_clnt_odstate); 1512 release_all_access(stp); 1513 if (stp->st_stateowner) 1514 nfs4_put_stateowner(stp->st_stateowner); 1515 WARN_ON(!list_empty(&stid->sc_cp_list)); 1516 kmem_cache_free(stateid_slab, stid); 1517 } 1518 1519 static void nfs4_free_lock_stateid(struct nfs4_stid *stid) 1520 { 1521 struct nfs4_ol_stateid *stp = openlockstateid(stid); 1522 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner); 1523 struct nfsd_file *nf; 1524 1525 nf = find_any_file(stp->st_stid.sc_file); 1526 if (nf) { 1527 get_file(nf->nf_file); 1528 filp_close(nf->nf_file, (fl_owner_t)lo); 1529 nfsd_file_put(nf); 1530 } 1531 nfs4_free_ol_stateid(stid); 1532 } 1533 1534 /* 1535 * Put the persistent reference to an already unhashed generic stateid, while 1536 * holding the cl_lock. If it's the last reference, then put it onto the 1537 * reaplist for later destruction. 1538 */ 1539 static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, 1540 struct list_head *reaplist) 1541 { 1542 struct nfs4_stid *s = &stp->st_stid; 1543 struct nfs4_client *clp = s->sc_client; 1544 1545 lockdep_assert_held(&clp->cl_lock); 1546 1547 WARN_ON_ONCE(!list_empty(&stp->st_locks)); 1548 1549 if (!refcount_dec_and_test(&s->sc_count)) { 1550 wake_up_all(&close_wq); 1551 return; 1552 } 1553 1554 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id); 1555 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) 1556 atomic_dec(&s->sc_client->cl_admin_revoked); 1557 list_add(&stp->st_locks, reaplist); 1558 } 1559 1560 static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) 1561 { 1562 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1563 1564 if (!unhash_ol_stateid(stp)) 1565 return false; 1566 list_del_init(&stp->st_locks); 1567 stp->st_stid.sc_status |= SC_STATUS_CLOSED; 1568 return true; 1569 } 1570 1571 static void release_lock_stateid(struct nfs4_ol_stateid *stp) 1572 { 1573 struct nfs4_client *clp = stp->st_stid.sc_client; 1574 bool unhashed; 1575 1576 spin_lock(&clp->cl_lock); 1577 unhashed = unhash_lock_stateid(stp); 1578 spin_unlock(&clp->cl_lock); 1579 if (unhashed) 1580 nfs4_put_stid(&stp->st_stid); 1581 } 1582 1583 static void unhash_lockowner_locked(struct nfs4_lockowner *lo) 1584 { 1585 struct nfs4_client *clp = lo->lo_owner.so_client; 1586 1587 lockdep_assert_held(&clp->cl_lock); 1588 1589 list_del_init(&lo->lo_owner.so_strhash); 1590 } 1591 1592 /* 1593 * Free a list of generic stateids that were collected earlier after being 1594 * fully unhashed. 1595 */ 1596 static void 1597 free_ol_stateid_reaplist(struct list_head *reaplist) 1598 { 1599 struct nfs4_ol_stateid *stp; 1600 struct nfs4_file *fp; 1601 1602 might_sleep(); 1603 1604 while (!list_empty(reaplist)) { 1605 stp = list_first_entry(reaplist, struct nfs4_ol_stateid, 1606 st_locks); 1607 list_del(&stp->st_locks); 1608 fp = stp->st_stid.sc_file; 1609 stp->st_stid.sc_free(&stp->st_stid); 1610 if (fp) 1611 put_nfs4_file(fp); 1612 } 1613 } 1614 1615 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, 1616 struct list_head *reaplist) 1617 { 1618 struct nfs4_ol_stateid *stp; 1619 1620 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); 1621 1622 while (!list_empty(&open_stp->st_locks)) { 1623 stp = list_entry(open_stp->st_locks.next, 1624 struct nfs4_ol_stateid, st_locks); 1625 unhash_lock_stateid(stp); 1626 put_ol_stateid_locked(stp, reaplist); 1627 } 1628 } 1629 1630 static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, 1631 struct list_head *reaplist) 1632 { 1633 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); 1634 1635 if (!unhash_ol_stateid(stp)) 1636 return false; 1637 release_open_stateid_locks(stp, reaplist); 1638 return true; 1639 } 1640 1641 static void release_open_stateid(struct nfs4_ol_stateid *stp) 1642 { 1643 LIST_HEAD(reaplist); 1644 1645 spin_lock(&stp->st_stid.sc_client->cl_lock); 1646 stp->st_stid.sc_status |= SC_STATUS_CLOSED; 1647 if (unhash_open_stateid(stp, &reaplist)) 1648 put_ol_stateid_locked(stp, &reaplist); 1649 spin_unlock(&stp->st_stid.sc_client->cl_lock); 1650 free_ol_stateid_reaplist(&reaplist); 1651 } 1652 1653 static bool nfs4_openowner_unhashed(struct nfs4_openowner *oo) 1654 { 1655 lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); 1656 1657 return list_empty(&oo->oo_owner.so_strhash) && 1658 list_empty(&oo->oo_perclient); 1659 } 1660 1661 static void unhash_openowner_locked(struct nfs4_openowner *oo) 1662 { 1663 struct nfs4_client *clp = oo->oo_owner.so_client; 1664 1665 lockdep_assert_held(&clp->cl_lock); 1666 1667 list_del_init(&oo->oo_owner.so_strhash); 1668 list_del_init(&oo->oo_perclient); 1669 } 1670 1671 static void release_last_closed_stateid(struct nfs4_openowner *oo) 1672 { 1673 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net, 1674 nfsd_net_id); 1675 struct nfs4_ol_stateid *s; 1676 1677 spin_lock(&nn->client_lock); 1678 s = oo->oo_last_closed_stid; 1679 if (s) { 1680 list_del_init(&oo->oo_close_lru); 1681 oo->oo_last_closed_stid = NULL; 1682 } 1683 spin_unlock(&nn->client_lock); 1684 if (s) 1685 nfs4_put_stid(&s->st_stid); 1686 } 1687 1688 static void release_openowner(struct nfs4_openowner *oo) 1689 { 1690 struct nfs4_ol_stateid *stp; 1691 struct nfs4_client *clp = oo->oo_owner.so_client; 1692 LIST_HEAD(reaplist); 1693 1694 spin_lock(&clp->cl_lock); 1695 unhash_openowner_locked(oo); 1696 while (!list_empty(&oo->oo_owner.so_stateids)) { 1697 stp = list_first_entry(&oo->oo_owner.so_stateids, 1698 struct nfs4_ol_stateid, st_perstateowner); 1699 if (unhash_open_stateid(stp, &reaplist)) 1700 put_ol_stateid_locked(stp, &reaplist); 1701 } 1702 spin_unlock(&clp->cl_lock); 1703 free_ol_stateid_reaplist(&reaplist); 1704 release_last_closed_stateid(oo); 1705 nfs4_put_stateowner(&oo->oo_owner); 1706 } 1707 1708 static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp, 1709 struct super_block *sb, 1710 unsigned int sc_types) 1711 { 1712 unsigned long id, tmp; 1713 struct nfs4_stid *stid; 1714 1715 spin_lock(&clp->cl_lock); 1716 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) 1717 if ((stid->sc_type & sc_types) && 1718 stid->sc_status == 0 && 1719 stid->sc_file->fi_inode->i_sb == sb) { 1720 refcount_inc(&stid->sc_count); 1721 break; 1722 } 1723 spin_unlock(&clp->cl_lock); 1724 return stid; 1725 } 1726 1727 /** 1728 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem 1729 * @net: used to identify instance of nfsd (there is one per net namespace) 1730 * @sb: super_block used to identify target filesystem 1731 * 1732 * All nfs4 states (open, lock, delegation, layout) held by the server instance 1733 * and associated with a file on the given filesystem will be revoked resulting 1734 * in any files being closed and so all references from nfsd to the filesystem 1735 * being released. Thus nfsd will no longer prevent the filesystem from being 1736 * unmounted. 1737 * 1738 * The clients which own the states will subsequently being notified that the 1739 * states have been "admin-revoked". 1740 */ 1741 void nfsd4_revoke_states(struct net *net, struct super_block *sb) 1742 { 1743 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 1744 unsigned int idhashval; 1745 unsigned int sc_types; 1746 1747 sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT; 1748 1749 spin_lock(&nn->client_lock); 1750 for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) { 1751 struct list_head *head = &nn->conf_id_hashtbl[idhashval]; 1752 struct nfs4_client *clp; 1753 retry: 1754 list_for_each_entry(clp, head, cl_idhash) { 1755 struct nfs4_stid *stid = find_one_sb_stid(clp, sb, 1756 sc_types); 1757 if (stid) { 1758 struct nfs4_ol_stateid *stp; 1759 struct nfs4_delegation *dp; 1760 struct nfs4_layout_stateid *ls; 1761 1762 spin_unlock(&nn->client_lock); 1763 switch (stid->sc_type) { 1764 case SC_TYPE_OPEN: 1765 stp = openlockstateid(stid); 1766 mutex_lock_nested(&stp->st_mutex, 1767 OPEN_STATEID_MUTEX); 1768 1769 spin_lock(&clp->cl_lock); 1770 if (stid->sc_status == 0) { 1771 stid->sc_status |= 1772 SC_STATUS_ADMIN_REVOKED; 1773 atomic_inc(&clp->cl_admin_revoked); 1774 spin_unlock(&clp->cl_lock); 1775 release_all_access(stp); 1776 } else 1777 spin_unlock(&clp->cl_lock); 1778 mutex_unlock(&stp->st_mutex); 1779 break; 1780 case SC_TYPE_LOCK: 1781 stp = openlockstateid(stid); 1782 mutex_lock_nested(&stp->st_mutex, 1783 LOCK_STATEID_MUTEX); 1784 spin_lock(&clp->cl_lock); 1785 if (stid->sc_status == 0) { 1786 struct nfs4_lockowner *lo = 1787 lockowner(stp->st_stateowner); 1788 struct nfsd_file *nf; 1789 1790 stid->sc_status |= 1791 SC_STATUS_ADMIN_REVOKED; 1792 atomic_inc(&clp->cl_admin_revoked); 1793 spin_unlock(&clp->cl_lock); 1794 nf = find_any_file(stp->st_stid.sc_file); 1795 if (nf) { 1796 get_file(nf->nf_file); 1797 filp_close(nf->nf_file, 1798 (fl_owner_t)lo); 1799 nfsd_file_put(nf); 1800 } 1801 release_all_access(stp); 1802 } else 1803 spin_unlock(&clp->cl_lock); 1804 mutex_unlock(&stp->st_mutex); 1805 break; 1806 case SC_TYPE_DELEG: 1807 refcount_inc(&stid->sc_count); 1808 dp = delegstateid(stid); 1809 spin_lock(&state_lock); 1810 if (!unhash_delegation_locked( 1811 dp, SC_STATUS_ADMIN_REVOKED)) 1812 dp = NULL; 1813 spin_unlock(&state_lock); 1814 if (dp) 1815 revoke_delegation(dp); 1816 break; 1817 case SC_TYPE_LAYOUT: 1818 ls = layoutstateid(stid); 1819 nfsd4_close_layout(ls); 1820 break; 1821 } 1822 nfs4_put_stid(stid); 1823 spin_lock(&nn->client_lock); 1824 if (clp->cl_minorversion == 0) 1825 /* Allow cleanup after a lease period. 1826 * store_release ensures cleanup will 1827 * see any newly revoked states if it 1828 * sees the time updated. 1829 */ 1830 nn->nfs40_last_revoke = 1831 ktime_get_boottime_seconds(); 1832 goto retry; 1833 } 1834 } 1835 } 1836 spin_unlock(&nn->client_lock); 1837 } 1838 1839 static inline int 1840 hash_sessionid(struct nfs4_sessionid *sessionid) 1841 { 1842 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid; 1843 1844 return sid->sequence % SESSION_HASH_SIZE; 1845 } 1846 1847 #ifdef CONFIG_SUNRPC_DEBUG 1848 static inline void 1849 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1850 { 1851 u32 *ptr = (u32 *)(&sessionid->data[0]); 1852 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]); 1853 } 1854 #else 1855 static inline void 1856 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) 1857 { 1858 } 1859 #endif 1860 1861 /* 1862 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it 1863 * won't be used for replay. 1864 */ 1865 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) 1866 { 1867 struct nfs4_stateowner *so = cstate->replay_owner; 1868 1869 if (nfserr == nfserr_replay_me) 1870 return; 1871 1872 if (!seqid_mutating_err(ntohl(nfserr))) { 1873 nfsd4_cstate_clear_replay(cstate); 1874 return; 1875 } 1876 if (!so) 1877 return; 1878 if (so->so_is_open_owner) 1879 release_last_closed_stateid(openowner(so)); 1880 so->so_seqid++; 1881 return; 1882 } 1883 1884 static void 1885 gen_sessionid(struct nfsd4_session *ses) 1886 { 1887 struct nfs4_client *clp = ses->se_client; 1888 struct nfsd4_sessionid *sid; 1889 1890 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data; 1891 sid->clientid = clp->cl_clientid; 1892 sid->sequence = current_sessionid++; 1893 sid->reserved = 0; 1894 } 1895 1896 /* 1897 * The protocol defines ca_maxresponssize_cached to include the size of 1898 * the rpc header, but all we need to cache is the data starting after 1899 * the end of the initial SEQUENCE operation--the rest we regenerate 1900 * each time. Therefore we can advertise a ca_maxresponssize_cached 1901 * value that is the number of bytes in our cache plus a few additional 1902 * bytes. In order to stay on the safe side, and not promise more than 1903 * we can cache, those additional bytes must be the minimum possible: 24 1904 * bytes of rpc header (xid through accept state, with AUTH_NULL 1905 * verifier), 12 for the compound header (with zero-length tag), and 44 1906 * for the SEQUENCE op response: 1907 */ 1908 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44) 1909 1910 static struct shrinker *nfsd_slot_shrinker; 1911 static DEFINE_SPINLOCK(nfsd_session_list_lock); 1912 static LIST_HEAD(nfsd_session_list); 1913 /* The sum of "target_slots-1" on every session. The shrinker can push this 1914 * down, though it can take a little while for the memory to actually 1915 * be freed. The "-1" is because we can never free slot 0 while the 1916 * session is active. 1917 */ 1918 static atomic_t nfsd_total_target_slots = ATOMIC_INIT(0); 1919 1920 static void 1921 free_session_slots(struct nfsd4_session *ses, int from) 1922 { 1923 int i; 1924 1925 if (from >= ses->se_fchannel.maxreqs) 1926 return; 1927 1928 for (i = from; i < ses->se_fchannel.maxreqs; i++) { 1929 struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); 1930 1931 /* 1932 * Save the seqid in case we reactivate this slot. 1933 * This will never require a memory allocation so GFP 1934 * flag is irrelevant 1935 */ 1936 xa_store(&ses->se_slots, i, xa_mk_value(slot->sl_seqid), 0); 1937 free_svc_cred(&slot->sl_cred); 1938 kfree(slot); 1939 } 1940 ses->se_fchannel.maxreqs = from; 1941 if (ses->se_target_maxslots > from) { 1942 int new_target = from ?: 1; 1943 atomic_sub(ses->se_target_maxslots - new_target, &nfsd_total_target_slots); 1944 ses->se_target_maxslots = new_target; 1945 } 1946 } 1947 1948 /** 1949 * reduce_session_slots - reduce the target max-slots of a session if possible 1950 * @ses: The session to affect 1951 * @dec: how much to decrease the target by 1952 * 1953 * This interface can be used by a shrinker to reduce the target max-slots 1954 * for a session so that some slots can eventually be freed. 1955 * It uses spin_trylock() as it may be called in a context where another 1956 * spinlock is held that has a dependency on client_lock. As shrinkers are 1957 * best-effort, skiping a session is client_lock is already held has no 1958 * great coast 1959 * 1960 * Return value: 1961 * The number of slots that the target was reduced by. 1962 */ 1963 static int 1964 reduce_session_slots(struct nfsd4_session *ses, int dec) 1965 { 1966 struct nfsd_net *nn = net_generic(ses->se_client->net, 1967 nfsd_net_id); 1968 int ret = 0; 1969 1970 if (ses->se_target_maxslots <= 1) 1971 return ret; 1972 if (!spin_trylock(&nn->client_lock)) 1973 return ret; 1974 ret = min(dec, ses->se_target_maxslots-1); 1975 ses->se_target_maxslots -= ret; 1976 atomic_sub(ret, &nfsd_total_target_slots); 1977 ses->se_slot_gen += 1; 1978 if (ses->se_slot_gen == 0) { 1979 int i; 1980 ses->se_slot_gen = 1; 1981 for (i = 0; i < ses->se_fchannel.maxreqs; i++) { 1982 struct nfsd4_slot *slot = xa_load(&ses->se_slots, i); 1983 slot->sl_generation = 0; 1984 } 1985 } 1986 spin_unlock(&nn->client_lock); 1987 return ret; 1988 } 1989 1990 static struct nfsd4_slot *nfsd4_alloc_slot(struct nfsd4_channel_attrs *fattrs, 1991 int index, gfp_t gfp) 1992 { 1993 struct nfsd4_slot *slot; 1994 size_t size; 1995 1996 /* 1997 * The RPC and NFS session headers are never saved in 1998 * the slot reply cache buffer. 1999 */ 2000 size = fattrs->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ ? 2001 0 : fattrs->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; 2002 2003 slot = kzalloc(struct_size(slot, sl_data, size), gfp); 2004 if (!slot) 2005 return NULL; 2006 slot->sl_index = index; 2007 return slot; 2008 } 2009 2010 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs, 2011 struct nfsd4_channel_attrs *battrs) 2012 { 2013 int numslots = fattrs->maxreqs; 2014 struct nfsd4_session *new; 2015 struct nfsd4_slot *slot; 2016 int i; 2017 2018 new = kzalloc(sizeof(*new), GFP_KERNEL); 2019 if (!new) 2020 return NULL; 2021 xa_init(&new->se_slots); 2022 2023 slot = nfsd4_alloc_slot(fattrs, 0, GFP_KERNEL); 2024 if (!slot || xa_is_err(xa_store(&new->se_slots, 0, slot, GFP_KERNEL))) 2025 goto out_free; 2026 2027 for (i = 1; i < numslots; i++) { 2028 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN; 2029 slot = nfsd4_alloc_slot(fattrs, i, gfp); 2030 if (!slot) 2031 break; 2032 if (xa_is_err(xa_store(&new->se_slots, i, slot, gfp))) { 2033 kfree(slot); 2034 break; 2035 } 2036 } 2037 fattrs->maxreqs = i; 2038 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs)); 2039 new->se_target_maxslots = i; 2040 atomic_add(i - 1, &nfsd_total_target_slots); 2041 new->se_cb_slot_avail = ~0U; 2042 new->se_cb_highest_slot = min(battrs->maxreqs - 1, 2043 NFSD_BC_SLOT_TABLE_SIZE - 1); 2044 spin_lock_init(&new->se_lock); 2045 return new; 2046 out_free: 2047 kfree(slot); 2048 xa_destroy(&new->se_slots); 2049 kfree(new); 2050 return NULL; 2051 } 2052 2053 static void free_conn(struct nfsd4_conn *c) 2054 { 2055 svc_xprt_put(c->cn_xprt); 2056 kfree(c); 2057 } 2058 2059 static void nfsd4_conn_lost(struct svc_xpt_user *u) 2060 { 2061 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user); 2062 struct nfs4_client *clp = c->cn_session->se_client; 2063 2064 trace_nfsd_cb_lost(clp); 2065 2066 spin_lock(&clp->cl_lock); 2067 if (!list_empty(&c->cn_persession)) { 2068 list_del(&c->cn_persession); 2069 free_conn(c); 2070 } 2071 nfsd4_probe_callback(clp); 2072 spin_unlock(&clp->cl_lock); 2073 } 2074 2075 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) 2076 { 2077 struct nfsd4_conn *conn; 2078 2079 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL); 2080 if (!conn) 2081 return NULL; 2082 svc_xprt_get(rqstp->rq_xprt); 2083 conn->cn_xprt = rqstp->rq_xprt; 2084 conn->cn_flags = flags; 2085 INIT_LIST_HEAD(&conn->cn_xpt_user.list); 2086 return conn; 2087 } 2088 2089 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 2090 { 2091 conn->cn_session = ses; 2092 list_add(&conn->cn_persession, &ses->se_conns); 2093 } 2094 2095 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses) 2096 { 2097 struct nfs4_client *clp = ses->se_client; 2098 2099 spin_lock(&clp->cl_lock); 2100 __nfsd4_hash_conn(conn, ses); 2101 spin_unlock(&clp->cl_lock); 2102 } 2103 2104 static int nfsd4_register_conn(struct nfsd4_conn *conn) 2105 { 2106 conn->cn_xpt_user.callback = nfsd4_conn_lost; 2107 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user); 2108 } 2109 2110 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses) 2111 { 2112 int ret; 2113 2114 nfsd4_hash_conn(conn, ses); 2115 ret = nfsd4_register_conn(conn); 2116 if (ret) 2117 /* oops; xprt is already down: */ 2118 nfsd4_conn_lost(&conn->cn_xpt_user); 2119 /* We may have gained or lost a callback channel: */ 2120 nfsd4_probe_callback_sync(ses->se_client); 2121 } 2122 2123 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses) 2124 { 2125 u32 dir = NFS4_CDFC4_FORE; 2126 2127 if (cses->flags & SESSION4_BACK_CHAN) 2128 dir |= NFS4_CDFC4_BACK; 2129 return alloc_conn(rqstp, dir); 2130 } 2131 2132 /* must be called under client_lock */ 2133 static void nfsd4_del_conns(struct nfsd4_session *s) 2134 { 2135 struct nfs4_client *clp = s->se_client; 2136 struct nfsd4_conn *c; 2137 2138 spin_lock(&clp->cl_lock); 2139 while (!list_empty(&s->se_conns)) { 2140 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession); 2141 list_del_init(&c->cn_persession); 2142 spin_unlock(&clp->cl_lock); 2143 2144 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user); 2145 free_conn(c); 2146 2147 spin_lock(&clp->cl_lock); 2148 } 2149 spin_unlock(&clp->cl_lock); 2150 } 2151 2152 static void __free_session(struct nfsd4_session *ses) 2153 { 2154 free_session_slots(ses, 0); 2155 xa_destroy(&ses->se_slots); 2156 kfree(ses); 2157 } 2158 2159 static void free_session(struct nfsd4_session *ses) 2160 { 2161 nfsd4_del_conns(ses); 2162 __free_session(ses); 2163 } 2164 2165 static unsigned long 2166 nfsd_slot_count(struct shrinker *s, struct shrink_control *sc) 2167 { 2168 unsigned long cnt = atomic_read(&nfsd_total_target_slots); 2169 2170 return cnt ? cnt : SHRINK_EMPTY; 2171 } 2172 2173 static unsigned long 2174 nfsd_slot_scan(struct shrinker *s, struct shrink_control *sc) 2175 { 2176 struct nfsd4_session *ses; 2177 unsigned long scanned = 0; 2178 unsigned long freed = 0; 2179 2180 spin_lock(&nfsd_session_list_lock); 2181 list_for_each_entry(ses, &nfsd_session_list, se_all_sessions) { 2182 freed += reduce_session_slots(ses, 1); 2183 scanned += 1; 2184 if (scanned >= sc->nr_to_scan) { 2185 /* Move starting point for next scan */ 2186 list_move(&nfsd_session_list, &ses->se_all_sessions); 2187 break; 2188 } 2189 } 2190 spin_unlock(&nfsd_session_list_lock); 2191 sc->nr_scanned = scanned; 2192 return freed; 2193 } 2194 2195 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) 2196 { 2197 int idx; 2198 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 2199 2200 new->se_client = clp; 2201 gen_sessionid(new); 2202 2203 INIT_LIST_HEAD(&new->se_conns); 2204 2205 atomic_set(&new->se_ref, 0); 2206 new->se_dead = false; 2207 new->se_cb_prog = cses->callback_prog; 2208 new->se_cb_sec = cses->cb_sec; 2209 2210 for (idx = 0; idx < NFSD_BC_SLOT_TABLE_SIZE; ++idx) 2211 new->se_cb_seq_nr[idx] = 1; 2212 2213 idx = hash_sessionid(&new->se_sessionid); 2214 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); 2215 spin_lock(&clp->cl_lock); 2216 list_add(&new->se_perclnt, &clp->cl_sessions); 2217 spin_unlock(&clp->cl_lock); 2218 2219 spin_lock(&nfsd_session_list_lock); 2220 list_add_tail(&new->se_all_sessions, &nfsd_session_list); 2221 spin_unlock(&nfsd_session_list_lock); 2222 2223 { 2224 struct sockaddr *sa = svc_addr(rqstp); 2225 /* 2226 * This is a little silly; with sessions there's no real 2227 * use for the callback address. Use the peer address 2228 * as a reasonable default for now, but consider fixing 2229 * the rpc client not to require an address in the 2230 * future: 2231 */ 2232 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa); 2233 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa); 2234 } 2235 } 2236 2237 /* caller must hold client_lock */ 2238 static struct nfsd4_session * 2239 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net) 2240 { 2241 struct nfsd4_session *elem; 2242 int idx; 2243 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 2244 2245 lockdep_assert_held(&nn->client_lock); 2246 2247 dump_sessionid(__func__, sessionid); 2248 idx = hash_sessionid(sessionid); 2249 /* Search in the appropriate list */ 2250 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) { 2251 if (!memcmp(elem->se_sessionid.data, sessionid->data, 2252 NFS4_MAX_SESSIONID_LEN)) { 2253 return elem; 2254 } 2255 } 2256 2257 dprintk("%s: session not found\n", __func__); 2258 return NULL; 2259 } 2260 2261 static struct nfsd4_session * 2262 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net, 2263 __be32 *ret) 2264 { 2265 struct nfsd4_session *session; 2266 __be32 status = nfserr_badsession; 2267 2268 session = __find_in_sessionid_hashtbl(sessionid, net); 2269 if (!session) 2270 goto out; 2271 status = nfsd4_get_session_locked(session); 2272 if (status) 2273 session = NULL; 2274 out: 2275 *ret = status; 2276 return session; 2277 } 2278 2279 /* caller must hold client_lock */ 2280 static void 2281 unhash_session(struct nfsd4_session *ses) 2282 { 2283 struct nfs4_client *clp = ses->se_client; 2284 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2285 2286 lockdep_assert_held(&nn->client_lock); 2287 2288 list_del(&ses->se_hash); 2289 spin_lock(&ses->se_client->cl_lock); 2290 list_del(&ses->se_perclnt); 2291 spin_unlock(&ses->se_client->cl_lock); 2292 spin_lock(&nfsd_session_list_lock); 2293 list_del(&ses->se_all_sessions); 2294 spin_unlock(&nfsd_session_list_lock); 2295 } 2296 2297 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ 2298 static int 2299 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) 2300 { 2301 /* 2302 * We're assuming the clid was not given out from a boot 2303 * precisely 2^32 (about 136 years) before this one. That seems 2304 * a safe assumption: 2305 */ 2306 if (clid->cl_boot == (u32)nn->boot_time) 2307 return 0; 2308 trace_nfsd_clid_stale(clid); 2309 return 1; 2310 } 2311 2312 static struct nfs4_client *alloc_client(struct xdr_netobj name, 2313 struct nfsd_net *nn) 2314 { 2315 struct nfs4_client *clp; 2316 int i; 2317 2318 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients && 2319 atomic_read(&nn->nfsd_courtesy_clients) > 0) 2320 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 2321 2322 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL); 2323 if (clp == NULL) 2324 return NULL; 2325 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL); 2326 if (clp->cl_name.data == NULL) 2327 goto err_no_name; 2328 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE, 2329 sizeof(struct list_head), 2330 GFP_KERNEL); 2331 if (!clp->cl_ownerstr_hashtbl) 2332 goto err_no_hashtbl; 2333 clp->cl_callback_wq = alloc_ordered_workqueue("nfsd4_callbacks", 0); 2334 if (!clp->cl_callback_wq) 2335 goto err_no_callback_wq; 2336 2337 for (i = 0; i < OWNER_HASH_SIZE; i++) 2338 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]); 2339 INIT_LIST_HEAD(&clp->cl_sessions); 2340 idr_init(&clp->cl_stateids); 2341 atomic_set(&clp->cl_rpc_users, 0); 2342 clp->cl_cb_state = NFSD4_CB_UNKNOWN; 2343 clp->cl_state = NFSD4_ACTIVE; 2344 atomic_inc(&nn->nfs4_client_count); 2345 atomic_set(&clp->cl_delegs_in_recall, 0); 2346 INIT_LIST_HEAD(&clp->cl_idhash); 2347 INIT_LIST_HEAD(&clp->cl_openowners); 2348 INIT_LIST_HEAD(&clp->cl_delegations); 2349 INIT_LIST_HEAD(&clp->cl_lru); 2350 INIT_LIST_HEAD(&clp->cl_revoked); 2351 #ifdef CONFIG_NFSD_PNFS 2352 INIT_LIST_HEAD(&clp->cl_lo_states); 2353 #endif 2354 INIT_LIST_HEAD(&clp->async_copies); 2355 spin_lock_init(&clp->async_lock); 2356 spin_lock_init(&clp->cl_lock); 2357 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 2358 return clp; 2359 err_no_callback_wq: 2360 kfree(clp->cl_ownerstr_hashtbl); 2361 err_no_hashtbl: 2362 kfree(clp->cl_name.data); 2363 err_no_name: 2364 kmem_cache_free(client_slab, clp); 2365 return NULL; 2366 } 2367 2368 static void __free_client(struct kref *k) 2369 { 2370 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref); 2371 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs); 2372 2373 free_svc_cred(&clp->cl_cred); 2374 destroy_workqueue(clp->cl_callback_wq); 2375 kfree(clp->cl_ownerstr_hashtbl); 2376 kfree(clp->cl_name.data); 2377 kfree(clp->cl_nii_domain.data); 2378 kfree(clp->cl_nii_name.data); 2379 idr_destroy(&clp->cl_stateids); 2380 kfree(clp->cl_ra); 2381 kmem_cache_free(client_slab, clp); 2382 } 2383 2384 static void drop_client(struct nfs4_client *clp) 2385 { 2386 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client); 2387 } 2388 2389 static void 2390 free_client(struct nfs4_client *clp) 2391 { 2392 while (!list_empty(&clp->cl_sessions)) { 2393 struct nfsd4_session *ses; 2394 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 2395 se_perclnt); 2396 list_del(&ses->se_perclnt); 2397 WARN_ON_ONCE(atomic_read(&ses->se_ref)); 2398 free_session(ses); 2399 } 2400 rpc_destroy_wait_queue(&clp->cl_cb_waitq); 2401 if (clp->cl_nfsd_dentry) { 2402 nfsd_client_rmdir(clp->cl_nfsd_dentry); 2403 clp->cl_nfsd_dentry = NULL; 2404 wake_up_all(&expiry_wq); 2405 } 2406 drop_client(clp); 2407 } 2408 2409 /* must be called under the client_lock */ 2410 static void 2411 unhash_client_locked(struct nfs4_client *clp) 2412 { 2413 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2414 struct nfsd4_session *ses; 2415 2416 lockdep_assert_held(&nn->client_lock); 2417 2418 /* Mark the client as expired! */ 2419 clp->cl_time = 0; 2420 /* Make it invisible */ 2421 if (!list_empty(&clp->cl_idhash)) { 2422 list_del_init(&clp->cl_idhash); 2423 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2424 rb_erase(&clp->cl_namenode, &nn->conf_name_tree); 2425 else 2426 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 2427 } 2428 list_del_init(&clp->cl_lru); 2429 spin_lock(&clp->cl_lock); 2430 spin_lock(&nfsd_session_list_lock); 2431 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) { 2432 list_del_init(&ses->se_hash); 2433 list_del_init(&ses->se_all_sessions); 2434 } 2435 spin_unlock(&nfsd_session_list_lock); 2436 spin_unlock(&clp->cl_lock); 2437 } 2438 2439 static void 2440 unhash_client(struct nfs4_client *clp) 2441 { 2442 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2443 2444 spin_lock(&nn->client_lock); 2445 unhash_client_locked(clp); 2446 spin_unlock(&nn->client_lock); 2447 } 2448 2449 static __be32 mark_client_expired_locked(struct nfs4_client *clp) 2450 { 2451 int users = atomic_read(&clp->cl_rpc_users); 2452 2453 trace_nfsd_mark_client_expired(clp, users); 2454 2455 if (users) 2456 return nfserr_jukebox; 2457 unhash_client_locked(clp); 2458 return nfs_ok; 2459 } 2460 2461 static void 2462 __destroy_client(struct nfs4_client *clp) 2463 { 2464 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2465 int i; 2466 struct nfs4_openowner *oo; 2467 struct nfs4_delegation *dp; 2468 LIST_HEAD(reaplist); 2469 2470 spin_lock(&state_lock); 2471 while (!list_empty(&clp->cl_delegations)) { 2472 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 2473 unhash_delegation_locked(dp, SC_STATUS_CLOSED); 2474 list_add(&dp->dl_recall_lru, &reaplist); 2475 } 2476 spin_unlock(&state_lock); 2477 while (!list_empty(&reaplist)) { 2478 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); 2479 list_del_init(&dp->dl_recall_lru); 2480 destroy_unhashed_deleg(dp); 2481 } 2482 while (!list_empty(&clp->cl_revoked)) { 2483 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); 2484 list_del_init(&dp->dl_recall_lru); 2485 nfs4_put_stid(&dp->dl_stid); 2486 } 2487 while (!list_empty(&clp->cl_openowners)) { 2488 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); 2489 nfs4_get_stateowner(&oo->oo_owner); 2490 release_openowner(oo); 2491 } 2492 for (i = 0; i < OWNER_HASH_SIZE; i++) { 2493 struct nfs4_stateowner *so, *tmp; 2494 2495 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i], 2496 so_strhash) { 2497 /* Should be no openowners at this point */ 2498 WARN_ON_ONCE(so->so_is_open_owner); 2499 remove_blocked_locks(lockowner(so)); 2500 } 2501 } 2502 nfsd4_return_all_client_layouts(clp); 2503 nfsd4_shutdown_copy(clp); 2504 nfsd4_shutdown_callback(clp); 2505 if (clp->cl_cb_conn.cb_xprt) 2506 svc_xprt_put(clp->cl_cb_conn.cb_xprt); 2507 atomic_add_unless(&nn->nfs4_client_count, -1, 0); 2508 nfsd4_dec_courtesy_client_count(nn, clp); 2509 free_client(clp); 2510 wake_up_all(&expiry_wq); 2511 } 2512 2513 static void 2514 destroy_client(struct nfs4_client *clp) 2515 { 2516 unhash_client(clp); 2517 __destroy_client(clp); 2518 } 2519 2520 static void inc_reclaim_complete(struct nfs4_client *clp) 2521 { 2522 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 2523 2524 if (!nn->track_reclaim_completes) 2525 return; 2526 if (!nfsd4_find_reclaim_client(clp->cl_name, nn)) 2527 return; 2528 if (atomic_inc_return(&nn->nr_reclaim_complete) == 2529 nn->reclaim_str_hashtbl_size) { 2530 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n", 2531 clp->net->ns.inum); 2532 nfsd4_end_grace(nn); 2533 } 2534 } 2535 2536 static void expire_client(struct nfs4_client *clp) 2537 { 2538 unhash_client(clp); 2539 nfsd4_client_record_remove(clp); 2540 __destroy_client(clp); 2541 } 2542 2543 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source) 2544 { 2545 memcpy(target->cl_verifier.data, source->data, 2546 sizeof(target->cl_verifier.data)); 2547 } 2548 2549 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source) 2550 { 2551 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 2552 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 2553 } 2554 2555 static int copy_cred(struct svc_cred *target, struct svc_cred *source) 2556 { 2557 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL); 2558 target->cr_raw_principal = kstrdup(source->cr_raw_principal, 2559 GFP_KERNEL); 2560 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL); 2561 if ((source->cr_principal && !target->cr_principal) || 2562 (source->cr_raw_principal && !target->cr_raw_principal) || 2563 (source->cr_targ_princ && !target->cr_targ_princ)) 2564 return -ENOMEM; 2565 2566 target->cr_flavor = source->cr_flavor; 2567 target->cr_uid = source->cr_uid; 2568 target->cr_gid = source->cr_gid; 2569 target->cr_group_info = source->cr_group_info; 2570 get_group_info(target->cr_group_info); 2571 target->cr_gss_mech = source->cr_gss_mech; 2572 if (source->cr_gss_mech) 2573 gss_mech_get(source->cr_gss_mech); 2574 return 0; 2575 } 2576 2577 static int 2578 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2) 2579 { 2580 if (o1->len < o2->len) 2581 return -1; 2582 if (o1->len > o2->len) 2583 return 1; 2584 return memcmp(o1->data, o2->data, o1->len); 2585 } 2586 2587 static int 2588 same_verf(nfs4_verifier *v1, nfs4_verifier *v2) 2589 { 2590 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data)); 2591 } 2592 2593 static int 2594 same_clid(clientid_t *cl1, clientid_t *cl2) 2595 { 2596 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 2597 } 2598 2599 static bool groups_equal(struct group_info *g1, struct group_info *g2) 2600 { 2601 int i; 2602 2603 if (g1->ngroups != g2->ngroups) 2604 return false; 2605 for (i=0; i<g1->ngroups; i++) 2606 if (!gid_eq(g1->gid[i], g2->gid[i])) 2607 return false; 2608 return true; 2609 } 2610 2611 /* 2612 * RFC 3530 language requires clid_inuse be returned when the 2613 * "principal" associated with a requests differs from that previously 2614 * used. We use uid, gid's, and gss principal string as our best 2615 * approximation. We also don't want to allow non-gss use of a client 2616 * established using gss: in theory cr_principal should catch that 2617 * change, but in practice cr_principal can be null even in the gss case 2618 * since gssd doesn't always pass down a principal string. 2619 */ 2620 static bool is_gss_cred(struct svc_cred *cr) 2621 { 2622 /* Is cr_flavor one of the gss "pseudoflavors"?: */ 2623 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR); 2624 } 2625 2626 2627 static bool 2628 same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 2629 { 2630 if ((is_gss_cred(cr1) != is_gss_cred(cr2)) 2631 || (!uid_eq(cr1->cr_uid, cr2->cr_uid)) 2632 || (!gid_eq(cr1->cr_gid, cr2->cr_gid)) 2633 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info)) 2634 return false; 2635 /* XXX: check that cr_targ_princ fields match ? */ 2636 if (cr1->cr_principal == cr2->cr_principal) 2637 return true; 2638 if (!cr1->cr_principal || !cr2->cr_principal) 2639 return false; 2640 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal); 2641 } 2642 2643 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) 2644 { 2645 struct svc_cred *cr = &rqstp->rq_cred; 2646 u32 service; 2647 2648 if (!cr->cr_gss_mech) 2649 return false; 2650 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); 2651 return service == RPC_GSS_SVC_INTEGRITY || 2652 service == RPC_GSS_SVC_PRIVACY; 2653 } 2654 2655 bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp) 2656 { 2657 struct svc_cred *cr = &rqstp->rq_cred; 2658 2659 if (!cl->cl_mach_cred) 2660 return true; 2661 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech) 2662 return false; 2663 if (!svc_rqst_integrity_protected(rqstp)) 2664 return false; 2665 if (cl->cl_cred.cr_raw_principal) 2666 return 0 == strcmp(cl->cl_cred.cr_raw_principal, 2667 cr->cr_raw_principal); 2668 if (!cr->cr_principal) 2669 return false; 2670 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal); 2671 } 2672 2673 static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn) 2674 { 2675 __be32 verf[2]; 2676 2677 /* 2678 * This is opaque to client, so no need to byte-swap. Use 2679 * __force to keep sparse happy 2680 */ 2681 verf[0] = (__force __be32)(u32)ktime_get_real_seconds(); 2682 verf[1] = (__force __be32)nn->clverifier_counter++; 2683 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data)); 2684 } 2685 2686 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn) 2687 { 2688 clp->cl_clientid.cl_boot = (u32)nn->boot_time; 2689 clp->cl_clientid.cl_id = nn->clientid_counter++; 2690 gen_confirm(clp, nn); 2691 } 2692 2693 static struct nfs4_stid * 2694 find_stateid_locked(struct nfs4_client *cl, stateid_t *t) 2695 { 2696 struct nfs4_stid *ret; 2697 2698 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id); 2699 if (!ret || !ret->sc_type) 2700 return NULL; 2701 return ret; 2702 } 2703 2704 static struct nfs4_stid * 2705 find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, 2706 unsigned short typemask, unsigned short ok_states) 2707 { 2708 struct nfs4_stid *s; 2709 2710 spin_lock(&cl->cl_lock); 2711 s = find_stateid_locked(cl, t); 2712 if (s != NULL) { 2713 if ((s->sc_status & ~ok_states) == 0 && 2714 (typemask & s->sc_type)) 2715 refcount_inc(&s->sc_count); 2716 else 2717 s = NULL; 2718 } 2719 spin_unlock(&cl->cl_lock); 2720 return s; 2721 } 2722 2723 static struct nfs4_client *get_nfsdfs_clp(struct inode *inode) 2724 { 2725 struct nfsdfs_client *nc; 2726 nc = get_nfsdfs_client(inode); 2727 if (!nc) 2728 return NULL; 2729 return container_of(nc, struct nfs4_client, cl_nfsdfs); 2730 } 2731 2732 static void seq_quote_mem(struct seq_file *m, char *data, int len) 2733 { 2734 seq_puts(m, "\""); 2735 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\"); 2736 seq_puts(m, "\""); 2737 } 2738 2739 static const char *cb_state2str(int state) 2740 { 2741 switch (state) { 2742 case NFSD4_CB_UP: 2743 return "UP"; 2744 case NFSD4_CB_UNKNOWN: 2745 return "UNKNOWN"; 2746 case NFSD4_CB_DOWN: 2747 return "DOWN"; 2748 case NFSD4_CB_FAULT: 2749 return "FAULT"; 2750 } 2751 return "UNDEFINED"; 2752 } 2753 2754 static int client_info_show(struct seq_file *m, void *v) 2755 { 2756 struct inode *inode = file_inode(m->file); 2757 struct nfsd4_session *ses; 2758 struct nfs4_client *clp; 2759 u64 clid; 2760 2761 clp = get_nfsdfs_clp(inode); 2762 if (!clp) 2763 return -ENXIO; 2764 memcpy(&clid, &clp->cl_clientid, sizeof(clid)); 2765 seq_printf(m, "clientid: 0x%llx\n", clid); 2766 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr); 2767 2768 if (clp->cl_state == NFSD4_COURTESY) 2769 seq_puts(m, "status: courtesy\n"); 2770 else if (clp->cl_state == NFSD4_EXPIRABLE) 2771 seq_puts(m, "status: expirable\n"); 2772 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags)) 2773 seq_puts(m, "status: confirmed\n"); 2774 else 2775 seq_puts(m, "status: unconfirmed\n"); 2776 seq_printf(m, "seconds from last renew: %lld\n", 2777 ktime_get_boottime_seconds() - clp->cl_time); 2778 seq_puts(m, "name: "); 2779 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len); 2780 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion); 2781 if (clp->cl_nii_domain.data) { 2782 seq_puts(m, "Implementation domain: "); 2783 seq_quote_mem(m, clp->cl_nii_domain.data, 2784 clp->cl_nii_domain.len); 2785 seq_puts(m, "\nImplementation name: "); 2786 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len); 2787 seq_printf(m, "\nImplementation time: [%lld, %ld]\n", 2788 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec); 2789 } 2790 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state)); 2791 seq_printf(m, "callback address: \"%pISpc\"\n", &clp->cl_cb_conn.cb_addr); 2792 seq_printf(m, "admin-revoked states: %d\n", 2793 atomic_read(&clp->cl_admin_revoked)); 2794 spin_lock(&clp->cl_lock); 2795 seq_printf(m, "session slots:"); 2796 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 2797 seq_printf(m, " %u", ses->se_fchannel.maxreqs); 2798 seq_printf(m, "\nsession target slots:"); 2799 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) 2800 seq_printf(m, " %u", ses->se_target_maxslots); 2801 spin_unlock(&clp->cl_lock); 2802 seq_puts(m, "\n"); 2803 2804 drop_client(clp); 2805 2806 return 0; 2807 } 2808 2809 DEFINE_SHOW_ATTRIBUTE(client_info); 2810 2811 static void *states_start(struct seq_file *s, loff_t *pos) 2812 __acquires(&clp->cl_lock) 2813 { 2814 struct nfs4_client *clp = s->private; 2815 unsigned long id = *pos; 2816 void *ret; 2817 2818 spin_lock(&clp->cl_lock); 2819 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2820 *pos = id; 2821 return ret; 2822 } 2823 2824 static void *states_next(struct seq_file *s, void *v, loff_t *pos) 2825 { 2826 struct nfs4_client *clp = s->private; 2827 unsigned long id = *pos; 2828 void *ret; 2829 2830 id = *pos; 2831 id++; 2832 ret = idr_get_next_ul(&clp->cl_stateids, &id); 2833 *pos = id; 2834 return ret; 2835 } 2836 2837 static void states_stop(struct seq_file *s, void *v) 2838 __releases(&clp->cl_lock) 2839 { 2840 struct nfs4_client *clp = s->private; 2841 2842 spin_unlock(&clp->cl_lock); 2843 } 2844 2845 static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f) 2846 { 2847 seq_printf(s, "filename: \"%pD2\"", f->nf_file); 2848 } 2849 2850 static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f) 2851 { 2852 struct inode *inode = file_inode(f->nf_file); 2853 2854 seq_printf(s, "superblock: \"%02x:%02x:%ld\"", 2855 MAJOR(inode->i_sb->s_dev), 2856 MINOR(inode->i_sb->s_dev), 2857 inode->i_ino); 2858 } 2859 2860 static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo) 2861 { 2862 seq_puts(s, "owner: "); 2863 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len); 2864 } 2865 2866 static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid) 2867 { 2868 seq_printf(s, "0x%.8x", stid->si_generation); 2869 seq_printf(s, "%12phN", &stid->si_opaque); 2870 } 2871 2872 static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st) 2873 { 2874 struct nfs4_ol_stateid *ols; 2875 struct nfs4_file *nf; 2876 struct nfsd_file *file; 2877 struct nfs4_stateowner *oo; 2878 unsigned int access, deny; 2879 2880 ols = openlockstateid(st); 2881 oo = ols->st_stateowner; 2882 nf = st->sc_file; 2883 2884 seq_puts(s, "- "); 2885 nfs4_show_stateid(s, &st->sc_stateid); 2886 seq_puts(s, ": { type: open, "); 2887 2888 access = bmap_to_share_mode(ols->st_access_bmap); 2889 deny = bmap_to_share_mode(ols->st_deny_bmap); 2890 2891 seq_printf(s, "access: %s%s, ", 2892 access & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2893 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2894 seq_printf(s, "deny: %s%s, ", 2895 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-", 2896 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-"); 2897 2898 if (nf) { 2899 spin_lock(&nf->fi_lock); 2900 file = find_any_file_locked(nf); 2901 if (file) { 2902 nfs4_show_superblock(s, file); 2903 seq_puts(s, ", "); 2904 nfs4_show_fname(s, file); 2905 seq_puts(s, ", "); 2906 } 2907 spin_unlock(&nf->fi_lock); 2908 } else 2909 seq_puts(s, "closed, "); 2910 nfs4_show_owner(s, oo); 2911 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2912 seq_puts(s, ", admin-revoked"); 2913 seq_puts(s, " }\n"); 2914 return 0; 2915 } 2916 2917 static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st) 2918 { 2919 struct nfs4_ol_stateid *ols; 2920 struct nfs4_file *nf; 2921 struct nfsd_file *file; 2922 struct nfs4_stateowner *oo; 2923 2924 ols = openlockstateid(st); 2925 oo = ols->st_stateowner; 2926 nf = st->sc_file; 2927 2928 seq_puts(s, "- "); 2929 nfs4_show_stateid(s, &st->sc_stateid); 2930 seq_puts(s, ": { type: lock, "); 2931 2932 spin_lock(&nf->fi_lock); 2933 file = find_any_file_locked(nf); 2934 if (file) { 2935 /* 2936 * Note: a lock stateid isn't really the same thing as a lock, 2937 * it's the locking state held by one owner on a file, and there 2938 * may be multiple (or no) lock ranges associated with it. 2939 * (Same for the matter is true of open stateids.) 2940 */ 2941 2942 nfs4_show_superblock(s, file); 2943 /* XXX: open stateid? */ 2944 seq_puts(s, ", "); 2945 nfs4_show_fname(s, file); 2946 seq_puts(s, ", "); 2947 } 2948 nfs4_show_owner(s, oo); 2949 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2950 seq_puts(s, ", admin-revoked"); 2951 seq_puts(s, " }\n"); 2952 spin_unlock(&nf->fi_lock); 2953 return 0; 2954 } 2955 2956 static char *nfs4_show_deleg_type(u32 dl_type) 2957 { 2958 switch (dl_type) { 2959 case OPEN_DELEGATE_READ: 2960 return "r"; 2961 case OPEN_DELEGATE_WRITE: 2962 return "w"; 2963 case OPEN_DELEGATE_READ_ATTRS_DELEG: 2964 return "ra"; 2965 case OPEN_DELEGATE_WRITE_ATTRS_DELEG: 2966 return "wa"; 2967 } 2968 return "?"; 2969 } 2970 2971 static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st) 2972 { 2973 struct nfs4_delegation *ds; 2974 struct nfs4_file *nf; 2975 struct nfsd_file *file; 2976 2977 ds = delegstateid(st); 2978 nf = st->sc_file; 2979 2980 seq_puts(s, "- "); 2981 nfs4_show_stateid(s, &st->sc_stateid); 2982 seq_puts(s, ": { type: deleg, "); 2983 2984 seq_printf(s, "access: %s", nfs4_show_deleg_type(ds->dl_type)); 2985 2986 /* XXX: lease time, whether it's being recalled. */ 2987 2988 spin_lock(&nf->fi_lock); 2989 file = nf->fi_deleg_file; 2990 if (file) { 2991 seq_puts(s, ", "); 2992 nfs4_show_superblock(s, file); 2993 seq_puts(s, ", "); 2994 nfs4_show_fname(s, file); 2995 } 2996 spin_unlock(&nf->fi_lock); 2997 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 2998 seq_puts(s, ", admin-revoked"); 2999 seq_puts(s, " }\n"); 3000 return 0; 3001 } 3002 3003 static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st) 3004 { 3005 struct nfs4_layout_stateid *ls; 3006 struct nfsd_file *file; 3007 3008 ls = container_of(st, struct nfs4_layout_stateid, ls_stid); 3009 3010 seq_puts(s, "- "); 3011 nfs4_show_stateid(s, &st->sc_stateid); 3012 seq_puts(s, ": { type: layout"); 3013 3014 /* XXX: What else would be useful? */ 3015 3016 spin_lock(&ls->ls_stid.sc_file->fi_lock); 3017 file = ls->ls_file; 3018 if (file) { 3019 seq_puts(s, ", "); 3020 nfs4_show_superblock(s, file); 3021 seq_puts(s, ", "); 3022 nfs4_show_fname(s, file); 3023 } 3024 spin_unlock(&ls->ls_stid.sc_file->fi_lock); 3025 if (st->sc_status & SC_STATUS_ADMIN_REVOKED) 3026 seq_puts(s, ", admin-revoked"); 3027 seq_puts(s, " }\n"); 3028 3029 return 0; 3030 } 3031 3032 static int states_show(struct seq_file *s, void *v) 3033 { 3034 struct nfs4_stid *st = v; 3035 3036 switch (st->sc_type) { 3037 case SC_TYPE_OPEN: 3038 return nfs4_show_open(s, st); 3039 case SC_TYPE_LOCK: 3040 return nfs4_show_lock(s, st); 3041 case SC_TYPE_DELEG: 3042 return nfs4_show_deleg(s, st); 3043 case SC_TYPE_LAYOUT: 3044 return nfs4_show_layout(s, st); 3045 default: 3046 return 0; /* XXX: or SEQ_SKIP? */ 3047 } 3048 /* XXX: copy stateids? */ 3049 } 3050 3051 static struct seq_operations states_seq_ops = { 3052 .start = states_start, 3053 .next = states_next, 3054 .stop = states_stop, 3055 .show = states_show 3056 }; 3057 3058 static int client_states_open(struct inode *inode, struct file *file) 3059 { 3060 struct seq_file *s; 3061 struct nfs4_client *clp; 3062 int ret; 3063 3064 clp = get_nfsdfs_clp(inode); 3065 if (!clp) 3066 return -ENXIO; 3067 3068 ret = seq_open(file, &states_seq_ops); 3069 if (ret) 3070 return ret; 3071 s = file->private_data; 3072 s->private = clp; 3073 return 0; 3074 } 3075 3076 static int client_opens_release(struct inode *inode, struct file *file) 3077 { 3078 struct seq_file *m = file->private_data; 3079 struct nfs4_client *clp = m->private; 3080 3081 /* XXX: alternatively, we could get/drop in seq start/stop */ 3082 drop_client(clp); 3083 return seq_release(inode, file); 3084 } 3085 3086 static const struct file_operations client_states_fops = { 3087 .open = client_states_open, 3088 .read = seq_read, 3089 .llseek = seq_lseek, 3090 .release = client_opens_release, 3091 }; 3092 3093 /* 3094 * Normally we refuse to destroy clients that are in use, but here the 3095 * administrator is telling us to just do it. We also want to wait 3096 * so the caller has a guarantee that the client's locks are gone by 3097 * the time the write returns: 3098 */ 3099 static void force_expire_client(struct nfs4_client *clp) 3100 { 3101 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3102 bool already_expired; 3103 3104 trace_nfsd_clid_admin_expired(&clp->cl_clientid); 3105 3106 spin_lock(&nn->client_lock); 3107 clp->cl_time = 0; 3108 spin_unlock(&nn->client_lock); 3109 3110 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0); 3111 spin_lock(&nn->client_lock); 3112 already_expired = list_empty(&clp->cl_lru); 3113 if (!already_expired) 3114 unhash_client_locked(clp); 3115 spin_unlock(&nn->client_lock); 3116 3117 if (!already_expired) 3118 expire_client(clp); 3119 else 3120 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL); 3121 } 3122 3123 static ssize_t client_ctl_write(struct file *file, const char __user *buf, 3124 size_t size, loff_t *pos) 3125 { 3126 char *data; 3127 struct nfs4_client *clp; 3128 3129 data = simple_transaction_get(file, buf, size); 3130 if (IS_ERR(data)) 3131 return PTR_ERR(data); 3132 if (size != 7 || 0 != memcmp(data, "expire\n", 7)) 3133 return -EINVAL; 3134 clp = get_nfsdfs_clp(file_inode(file)); 3135 if (!clp) 3136 return -ENXIO; 3137 force_expire_client(clp); 3138 drop_client(clp); 3139 return 7; 3140 } 3141 3142 static const struct file_operations client_ctl_fops = { 3143 .write = client_ctl_write, 3144 .release = simple_transaction_release, 3145 }; 3146 3147 static const struct tree_descr client_files[] = { 3148 [0] = {"info", &client_info_fops, S_IRUSR}, 3149 [1] = {"states", &client_states_fops, S_IRUSR}, 3150 [2] = {"ctl", &client_ctl_fops, S_IWUSR}, 3151 [3] = {""}, 3152 }; 3153 3154 static int 3155 nfsd4_cb_recall_any_done(struct nfsd4_callback *cb, 3156 struct rpc_task *task) 3157 { 3158 trace_nfsd_cb_recall_any_done(cb, task); 3159 switch (task->tk_status) { 3160 case -NFS4ERR_DELAY: 3161 rpc_delay(task, 2 * HZ); 3162 return 0; 3163 default: 3164 return 1; 3165 } 3166 } 3167 3168 static void 3169 nfsd4_cb_recall_any_release(struct nfsd4_callback *cb) 3170 { 3171 struct nfs4_client *clp = cb->cb_clp; 3172 3173 drop_client(clp); 3174 } 3175 3176 static int 3177 nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task) 3178 { 3179 struct nfs4_cb_fattr *ncf = 3180 container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 3181 struct nfs4_delegation *dp = 3182 container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 3183 3184 trace_nfsd_cb_getattr_done(&dp->dl_stid.sc_stateid, task); 3185 ncf->ncf_cb_status = task->tk_status; 3186 switch (task->tk_status) { 3187 case -NFS4ERR_DELAY: 3188 rpc_delay(task, 2 * HZ); 3189 return 0; 3190 default: 3191 return 1; 3192 } 3193 } 3194 3195 static void 3196 nfsd4_cb_getattr_release(struct nfsd4_callback *cb) 3197 { 3198 struct nfs4_cb_fattr *ncf = 3199 container_of(cb, struct nfs4_cb_fattr, ncf_getattr); 3200 struct nfs4_delegation *dp = 3201 container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 3202 3203 nfs4_put_stid(&dp->dl_stid); 3204 } 3205 3206 static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = { 3207 .done = nfsd4_cb_recall_any_done, 3208 .release = nfsd4_cb_recall_any_release, 3209 .opcode = OP_CB_RECALL_ANY, 3210 }; 3211 3212 static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = { 3213 .done = nfsd4_cb_getattr_done, 3214 .release = nfsd4_cb_getattr_release, 3215 .opcode = OP_CB_GETATTR, 3216 }; 3217 3218 static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf) 3219 { 3220 struct nfs4_delegation *dp = 3221 container_of(ncf, struct nfs4_delegation, dl_cb_fattr); 3222 3223 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ncf->ncf_getattr.cb_flags)) 3224 return; 3225 3226 /* set to proper status when nfsd4_cb_getattr_done runs */ 3227 ncf->ncf_cb_status = NFS4ERR_IO; 3228 3229 /* ensure that wake_bit is done when RUNNING is cleared */ 3230 set_bit(NFSD4_CALLBACK_WAKE, &ncf->ncf_getattr.cb_flags); 3231 3232 refcount_inc(&dp->dl_stid.sc_count); 3233 nfsd4_run_cb(&ncf->ncf_getattr); 3234 } 3235 3236 static struct nfs4_client *create_client(struct xdr_netobj name, 3237 struct svc_rqst *rqstp, nfs4_verifier *verf) 3238 { 3239 struct nfs4_client *clp; 3240 struct sockaddr *sa = svc_addr(rqstp); 3241 int ret; 3242 struct net *net = SVC_NET(rqstp); 3243 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 3244 struct dentry *dentries[ARRAY_SIZE(client_files)]; 3245 3246 clp = alloc_client(name, nn); 3247 if (clp == NULL) 3248 return NULL; 3249 3250 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred); 3251 if (ret) { 3252 free_client(clp); 3253 return NULL; 3254 } 3255 gen_clid(clp, nn); 3256 kref_init(&clp->cl_nfsdfs.cl_ref); 3257 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL); 3258 clp->cl_time = ktime_get_boottime_seconds(); 3259 copy_verf(clp, verf); 3260 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage)); 3261 clp->cl_cb_session = NULL; 3262 clp->net = net; 3263 clp->cl_nfsd_dentry = nfsd_client_mkdir( 3264 nn, &clp->cl_nfsdfs, 3265 clp->cl_clientid.cl_id - nn->clientid_base, 3266 client_files, dentries); 3267 clp->cl_nfsd_info_dentry = dentries[0]; 3268 if (!clp->cl_nfsd_dentry) { 3269 free_client(clp); 3270 return NULL; 3271 } 3272 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL); 3273 if (!clp->cl_ra) { 3274 free_client(clp); 3275 return NULL; 3276 } 3277 clp->cl_ra_time = 0; 3278 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops, 3279 NFSPROC4_CLNT_CB_RECALL_ANY); 3280 return clp; 3281 } 3282 3283 static void 3284 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root) 3285 { 3286 struct rb_node **new = &(root->rb_node), *parent = NULL; 3287 struct nfs4_client *clp; 3288 3289 while (*new) { 3290 clp = rb_entry(*new, struct nfs4_client, cl_namenode); 3291 parent = *new; 3292 3293 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0) 3294 new = &((*new)->rb_left); 3295 else 3296 new = &((*new)->rb_right); 3297 } 3298 3299 rb_link_node(&new_clp->cl_namenode, parent, new); 3300 rb_insert_color(&new_clp->cl_namenode, root); 3301 } 3302 3303 static struct nfs4_client * 3304 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root) 3305 { 3306 int cmp; 3307 struct rb_node *node = root->rb_node; 3308 struct nfs4_client *clp; 3309 3310 while (node) { 3311 clp = rb_entry(node, struct nfs4_client, cl_namenode); 3312 cmp = compare_blob(&clp->cl_name, name); 3313 if (cmp > 0) 3314 node = node->rb_left; 3315 else if (cmp < 0) 3316 node = node->rb_right; 3317 else 3318 return clp; 3319 } 3320 return NULL; 3321 } 3322 3323 static void 3324 add_to_unconfirmed(struct nfs4_client *clp) 3325 { 3326 unsigned int idhashval; 3327 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3328 3329 lockdep_assert_held(&nn->client_lock); 3330 3331 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 3332 add_clp_to_name_tree(clp, &nn->unconf_name_tree); 3333 idhashval = clientid_hashval(clp->cl_clientid.cl_id); 3334 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]); 3335 renew_client_locked(clp); 3336 } 3337 3338 static void 3339 move_to_confirmed(struct nfs4_client *clp) 3340 { 3341 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id); 3342 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); 3343 3344 lockdep_assert_held(&nn->client_lock); 3345 3346 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]); 3347 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); 3348 add_clp_to_name_tree(clp, &nn->conf_name_tree); 3349 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags); 3350 trace_nfsd_clid_confirmed(&clp->cl_clientid); 3351 renew_client_locked(clp); 3352 } 3353 3354 static struct nfs4_client * 3355 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) 3356 { 3357 struct nfs4_client *clp; 3358 unsigned int idhashval = clientid_hashval(clid->cl_id); 3359 3360 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { 3361 if (same_clid(&clp->cl_clientid, clid)) { 3362 if ((bool)clp->cl_minorversion != sessions) 3363 return NULL; 3364 renew_client_locked(clp); 3365 return clp; 3366 } 3367 } 3368 return NULL; 3369 } 3370 3371 static struct nfs4_client * 3372 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 3373 { 3374 struct list_head *tbl = nn->conf_id_hashtbl; 3375 3376 lockdep_assert_held(&nn->client_lock); 3377 return find_client_in_id_table(tbl, clid, sessions); 3378 } 3379 3380 static struct nfs4_client * 3381 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) 3382 { 3383 struct list_head *tbl = nn->unconf_id_hashtbl; 3384 3385 lockdep_assert_held(&nn->client_lock); 3386 return find_client_in_id_table(tbl, clid, sessions); 3387 } 3388 3389 static bool clp_used_exchangeid(struct nfs4_client *clp) 3390 { 3391 return clp->cl_exchange_flags != 0; 3392 } 3393 3394 static struct nfs4_client * 3395 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 3396 { 3397 lockdep_assert_held(&nn->client_lock); 3398 return find_clp_in_name_tree(name, &nn->conf_name_tree); 3399 } 3400 3401 static struct nfs4_client * 3402 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn) 3403 { 3404 lockdep_assert_held(&nn->client_lock); 3405 return find_clp_in_name_tree(name, &nn->unconf_name_tree); 3406 } 3407 3408 static void 3409 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp) 3410 { 3411 struct nfs4_cb_conn *conn = &clp->cl_cb_conn; 3412 struct sockaddr *sa = svc_addr(rqstp); 3413 u32 scopeid = rpc_get_scope_id(sa); 3414 unsigned short expected_family; 3415 3416 /* Currently, we only support tcp and tcp6 for the callback channel */ 3417 if (se->se_callback_netid_len == 3 && 3418 !memcmp(se->se_callback_netid_val, "tcp", 3)) 3419 expected_family = AF_INET; 3420 else if (se->se_callback_netid_len == 4 && 3421 !memcmp(se->se_callback_netid_val, "tcp6", 4)) 3422 expected_family = AF_INET6; 3423 else 3424 goto out_err; 3425 3426 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val, 3427 se->se_callback_addr_len, 3428 (struct sockaddr *)&conn->cb_addr, 3429 sizeof(conn->cb_addr)); 3430 3431 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family) 3432 goto out_err; 3433 3434 if (conn->cb_addr.ss_family == AF_INET6) 3435 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid; 3436 3437 conn->cb_prog = se->se_callback_prog; 3438 conn->cb_ident = se->se_callback_ident; 3439 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen); 3440 trace_nfsd_cb_args(clp, conn); 3441 return; 3442 out_err: 3443 conn->cb_addr.ss_family = AF_UNSPEC; 3444 conn->cb_addrlen = 0; 3445 trace_nfsd_cb_nodelegs(clp); 3446 return; 3447 } 3448 3449 /* 3450 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size. 3451 */ 3452 static void 3453 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) 3454 { 3455 struct xdr_buf *buf = resp->xdr->buf; 3456 struct nfsd4_slot *slot = resp->cstate.slot; 3457 unsigned int base; 3458 3459 dprintk("--> %s slot %p\n", __func__, slot); 3460 3461 slot->sl_flags |= NFSD4_SLOT_INITIALIZED; 3462 slot->sl_opcnt = resp->opcnt; 3463 slot->sl_status = resp->cstate.status; 3464 free_svc_cred(&slot->sl_cred); 3465 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); 3466 3467 if (!nfsd4_cache_this(resp)) { 3468 slot->sl_flags &= ~NFSD4_SLOT_CACHED; 3469 return; 3470 } 3471 slot->sl_flags |= NFSD4_SLOT_CACHED; 3472 3473 base = resp->cstate.data_offset; 3474 slot->sl_datalen = buf->len - base; 3475 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) 3476 WARN(1, "%s: sessions DRC could not cache compound\n", 3477 __func__); 3478 return; 3479 } 3480 3481 /* 3482 * Encode the replay sequence operation from the slot values. 3483 * If cachethis is FALSE encode the uncached rep error on the next 3484 * operation which sets resp->p and increments resp->opcnt for 3485 * nfs4svc_encode_compoundres. 3486 * 3487 */ 3488 static __be32 3489 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, 3490 struct nfsd4_compoundres *resp) 3491 { 3492 struct nfsd4_op *op; 3493 struct nfsd4_slot *slot = resp->cstate.slot; 3494 3495 /* Encode the replayed sequence operation */ 3496 op = &args->ops[resp->opcnt - 1]; 3497 nfsd4_encode_operation(resp, op); 3498 3499 if (slot->sl_flags & NFSD4_SLOT_CACHED) 3500 return op->status; 3501 if (args->opcnt == 1) { 3502 /* 3503 * The original operation wasn't a solo sequence--we 3504 * always cache those--so this retry must not match the 3505 * original: 3506 */ 3507 op->status = nfserr_seq_false_retry; 3508 } else { 3509 op = &args->ops[resp->opcnt++]; 3510 op->status = nfserr_retry_uncached_rep; 3511 nfsd4_encode_operation(resp, op); 3512 } 3513 return op->status; 3514 } 3515 3516 /* 3517 * The sequence operation is not cached because we can use the slot and 3518 * session values. 3519 */ 3520 static __be32 3521 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, 3522 struct nfsd4_sequence *seq) 3523 { 3524 struct nfsd4_slot *slot = resp->cstate.slot; 3525 struct xdr_stream *xdr = resp->xdr; 3526 __be32 *p; 3527 __be32 status; 3528 3529 dprintk("--> %s slot %p\n", __func__, slot); 3530 3531 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp); 3532 if (status) 3533 return status; 3534 3535 p = xdr_reserve_space(xdr, slot->sl_datalen); 3536 if (!p) { 3537 WARN_ON_ONCE(1); 3538 return nfserr_serverfault; 3539 } 3540 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen); 3541 xdr_commit_encode(xdr); 3542 3543 resp->opcnt = slot->sl_opcnt; 3544 return slot->sl_status; 3545 } 3546 3547 /* 3548 * Set the exchange_id flags returned by the server. 3549 */ 3550 static void 3551 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid) 3552 { 3553 #ifdef CONFIG_NFSD_PNFS 3554 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS; 3555 #else 3556 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS; 3557 #endif 3558 3559 /* Referrals are supported, Migration is not. */ 3560 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER; 3561 3562 /* set the wire flags to return to client. */ 3563 clid->flags = new->cl_exchange_flags; 3564 } 3565 3566 static bool client_has_openowners(struct nfs4_client *clp) 3567 { 3568 struct nfs4_openowner *oo; 3569 3570 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) { 3571 if (!list_empty(&oo->oo_owner.so_stateids)) 3572 return true; 3573 } 3574 return false; 3575 } 3576 3577 static bool client_has_state(struct nfs4_client *clp) 3578 { 3579 return client_has_openowners(clp) 3580 #ifdef CONFIG_NFSD_PNFS 3581 || !list_empty(&clp->cl_lo_states) 3582 #endif 3583 || !list_empty(&clp->cl_delegations) 3584 || !list_empty(&clp->cl_sessions) 3585 || nfsd4_has_active_async_copies(clp); 3586 } 3587 3588 static __be32 copy_impl_id(struct nfs4_client *clp, 3589 struct nfsd4_exchange_id *exid) 3590 { 3591 if (!exid->nii_domain.data) 3592 return 0; 3593 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL); 3594 if (!clp->cl_nii_domain.data) 3595 return nfserr_jukebox; 3596 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL); 3597 if (!clp->cl_nii_name.data) 3598 return nfserr_jukebox; 3599 clp->cl_nii_time = exid->nii_time; 3600 return 0; 3601 } 3602 3603 __be32 3604 nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 3605 union nfsd4_op_u *u) 3606 { 3607 struct nfsd4_exchange_id *exid = &u->exchange_id; 3608 struct nfs4_client *conf, *new; 3609 struct nfs4_client *unconf = NULL; 3610 __be32 status; 3611 char addr_str[INET6_ADDRSTRLEN]; 3612 nfs4_verifier verf = exid->verifier; 3613 struct sockaddr *sa = svc_addr(rqstp); 3614 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A; 3615 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3616 3617 rpc_ntop(sa, addr_str, sizeof(addr_str)); 3618 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 3619 "ip_addr=%s flags %x, spa_how %u\n", 3620 __func__, rqstp, exid, exid->clname.len, exid->clname.data, 3621 addr_str, exid->flags, exid->spa_how); 3622 3623 exid->server_impl_name = kasprintf(GFP_KERNEL, "%s %s %s %s", 3624 utsname()->sysname, utsname()->release, 3625 utsname()->version, utsname()->machine); 3626 if (!exid->server_impl_name) 3627 return nfserr_jukebox; 3628 3629 if (exid->flags & ~EXCHGID4_FLAG_MASK_A) 3630 return nfserr_inval; 3631 3632 new = create_client(exid->clname, rqstp, &verf); 3633 if (new == NULL) 3634 return nfserr_jukebox; 3635 status = copy_impl_id(new, exid); 3636 if (status) 3637 goto out_nolock; 3638 3639 switch (exid->spa_how) { 3640 case SP4_MACH_CRED: 3641 exid->spo_must_enforce[0] = 0; 3642 exid->spo_must_enforce[1] = ( 3643 1 << (OP_BIND_CONN_TO_SESSION - 32) | 3644 1 << (OP_EXCHANGE_ID - 32) | 3645 1 << (OP_CREATE_SESSION - 32) | 3646 1 << (OP_DESTROY_SESSION - 32) | 3647 1 << (OP_DESTROY_CLIENTID - 32)); 3648 3649 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) | 3650 1 << (OP_OPEN_DOWNGRADE) | 3651 1 << (OP_LOCKU) | 3652 1 << (OP_DELEGRETURN)); 3653 3654 exid->spo_must_allow[1] &= ( 3655 1 << (OP_TEST_STATEID - 32) | 3656 1 << (OP_FREE_STATEID - 32)); 3657 if (!svc_rqst_integrity_protected(rqstp)) { 3658 status = nfserr_inval; 3659 goto out_nolock; 3660 } 3661 /* 3662 * Sometimes userspace doesn't give us a principal. 3663 * Which is a bug, really. Anyway, we can't enforce 3664 * MACH_CRED in that case, better to give up now: 3665 */ 3666 if (!new->cl_cred.cr_principal && 3667 !new->cl_cred.cr_raw_principal) { 3668 status = nfserr_serverfault; 3669 goto out_nolock; 3670 } 3671 new->cl_mach_cred = true; 3672 break; 3673 case SP4_NONE: 3674 break; 3675 default: /* checked by xdr code */ 3676 WARN_ON_ONCE(1); 3677 fallthrough; 3678 case SP4_SSV: 3679 status = nfserr_encr_alg_unsupp; 3680 goto out_nolock; 3681 } 3682 3683 /* Cases below refer to rfc 5661 section 18.35.4: */ 3684 spin_lock(&nn->client_lock); 3685 conf = find_confirmed_client_by_name(&exid->clname, nn); 3686 if (conf) { 3687 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred); 3688 bool verfs_match = same_verf(&verf, &conf->cl_verifier); 3689 3690 if (update) { 3691 if (!clp_used_exchangeid(conf)) { /* buggy client */ 3692 status = nfserr_inval; 3693 goto out; 3694 } 3695 if (!nfsd4_mach_creds_match(conf, rqstp)) { 3696 status = nfserr_wrong_cred; 3697 goto out; 3698 } 3699 if (!creds_match) { /* case 9 */ 3700 status = nfserr_perm; 3701 goto out; 3702 } 3703 if (!verfs_match) { /* case 8 */ 3704 status = nfserr_not_same; 3705 goto out; 3706 } 3707 /* case 6 */ 3708 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 3709 trace_nfsd_clid_confirmed_r(conf); 3710 goto out_copy; 3711 } 3712 if (!creds_match) { /* case 3 */ 3713 if (client_has_state(conf)) { 3714 status = nfserr_clid_inuse; 3715 trace_nfsd_clid_cred_mismatch(conf, rqstp); 3716 goto out; 3717 } 3718 goto out_new; 3719 } 3720 if (verfs_match) { /* case 2 */ 3721 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R; 3722 trace_nfsd_clid_confirmed_r(conf); 3723 goto out_copy; 3724 } 3725 /* case 5, client reboot */ 3726 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf); 3727 conf = NULL; 3728 goto out_new; 3729 } 3730 3731 if (update) { /* case 7 */ 3732 status = nfserr_noent; 3733 goto out; 3734 } 3735 3736 unconf = find_unconfirmed_client_by_name(&exid->clname, nn); 3737 if (unconf) /* case 4, possible retry or client restart */ 3738 unhash_client_locked(unconf); 3739 3740 /* case 1, new owner ID */ 3741 trace_nfsd_clid_fresh(new); 3742 3743 out_new: 3744 if (conf) { 3745 status = mark_client_expired_locked(conf); 3746 if (status) 3747 goto out; 3748 trace_nfsd_clid_replaced(&conf->cl_clientid); 3749 } 3750 new->cl_minorversion = cstate->minorversion; 3751 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0]; 3752 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1]; 3753 3754 /* Contrived initial CREATE_SESSION response */ 3755 new->cl_cs_slot.sl_status = nfserr_seq_misordered; 3756 3757 add_to_unconfirmed(new); 3758 swap(new, conf); 3759 out_copy: 3760 exid->clientid.cl_boot = conf->cl_clientid.cl_boot; 3761 exid->clientid.cl_id = conf->cl_clientid.cl_id; 3762 3763 exid->seqid = conf->cl_cs_slot.sl_seqid + 1; 3764 nfsd4_set_ex_flags(conf, exid); 3765 3766 exid->nii_domain.len = sizeof("kernel.org") - 1; 3767 exid->nii_domain.data = "kernel.org"; 3768 3769 /* 3770 * Note that RFC 8881 places no length limit on 3771 * nii_name, but this implementation permits no 3772 * more than NFS4_OPAQUE_LIMIT bytes. 3773 */ 3774 exid->nii_name.len = strlen(exid->server_impl_name); 3775 if (exid->nii_name.len > NFS4_OPAQUE_LIMIT) 3776 exid->nii_name.len = NFS4_OPAQUE_LIMIT; 3777 exid->nii_name.data = exid->server_impl_name; 3778 3779 /* just send zeros - the date is in nii_name */ 3780 exid->nii_time.tv_sec = 0; 3781 exid->nii_time.tv_nsec = 0; 3782 3783 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 3784 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags); 3785 status = nfs_ok; 3786 3787 out: 3788 spin_unlock(&nn->client_lock); 3789 out_nolock: 3790 if (new) 3791 expire_client(new); 3792 if (unconf) { 3793 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 3794 expire_client(unconf); 3795 } 3796 return status; 3797 } 3798 3799 void 3800 nfsd4_exchange_id_release(union nfsd4_op_u *u) 3801 { 3802 struct nfsd4_exchange_id *exid = &u->exchange_id; 3803 3804 kfree(exid->server_impl_name); 3805 } 3806 3807 static __be32 check_slot_seqid(u32 seqid, u32 slot_seqid, u8 flags) 3808 { 3809 /* The slot is in use, and no response has been sent. */ 3810 if (flags & NFSD4_SLOT_INUSE) { 3811 if (seqid == slot_seqid) 3812 return nfserr_jukebox; 3813 else 3814 return nfserr_seq_misordered; 3815 } 3816 /* Note unsigned 32-bit arithmetic handles wraparound: */ 3817 if (likely(seqid == slot_seqid + 1)) 3818 return nfs_ok; 3819 if ((flags & NFSD4_SLOT_REUSED) && seqid == 1) 3820 return nfs_ok; 3821 if (seqid == slot_seqid) 3822 return nfserr_replay_cache; 3823 return nfserr_seq_misordered; 3824 } 3825 3826 /* 3827 * Cache the create session result into the create session single DRC 3828 * slot cache by saving the xdr structure. sl_seqid has been set. 3829 * Do this for solo or embedded create session operations. 3830 */ 3831 static void 3832 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 3833 struct nfsd4_clid_slot *slot, __be32 nfserr) 3834 { 3835 slot->sl_status = nfserr; 3836 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 3837 } 3838 3839 static __be32 3840 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, 3841 struct nfsd4_clid_slot *slot) 3842 { 3843 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses)); 3844 return slot->sl_status; 3845 } 3846 3847 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\ 3848 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \ 3849 1 + /* MIN tag is length with zero, only length */ \ 3850 3 + /* version, opcount, opcode */ \ 3851 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3852 /* seqid, slotID, slotID, cache */ \ 3853 4 ) * sizeof(__be32)) 3854 3855 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\ 3856 2 + /* verifier: AUTH_NULL, length 0 */\ 3857 1 + /* status */ \ 3858 1 + /* MIN tag is length with zero, only length */ \ 3859 3 + /* opcount, opcode, opstatus*/ \ 3860 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \ 3861 /* seqid, slotID, slotID, slotID, status */ \ 3862 5 ) * sizeof(__be32)) 3863 3864 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) 3865 { 3866 u32 maxrpc = nn->nfsd_serv->sv_max_mesg; 3867 3868 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) 3869 return nfserr_toosmall; 3870 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) 3871 return nfserr_toosmall; 3872 ca->headerpadsz = 0; 3873 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); 3874 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); 3875 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); 3876 ca->maxresp_cached = min_t(u32, ca->maxresp_cached, 3877 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); 3878 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); 3879 3880 return nfs_ok; 3881 } 3882 3883 /* 3884 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now. 3885 * These are based on similar macros in linux/sunrpc/msg_prot.h . 3886 */ 3887 #define RPC_MAX_HEADER_WITH_AUTH_SYS \ 3888 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK)) 3889 3890 #define RPC_MAX_REPHEADER_WITH_AUTH_SYS \ 3891 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK)) 3892 3893 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \ 3894 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32)) 3895 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \ 3896 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \ 3897 sizeof(__be32)) 3898 3899 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) 3900 { 3901 ca->headerpadsz = 0; 3902 3903 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ) 3904 return nfserr_toosmall; 3905 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ) 3906 return nfserr_toosmall; 3907 ca->maxresp_cached = 0; 3908 if (ca->maxops < 2) 3909 return nfserr_toosmall; 3910 3911 return nfs_ok; 3912 } 3913 3914 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs) 3915 { 3916 switch (cbs->flavor) { 3917 case RPC_AUTH_NULL: 3918 case RPC_AUTH_UNIX: 3919 return nfs_ok; 3920 default: 3921 /* 3922 * GSS case: the spec doesn't allow us to return this 3923 * error. But it also doesn't allow us not to support 3924 * GSS. 3925 * I'd rather this fail hard than return some error the 3926 * client might think it can already handle: 3927 */ 3928 return nfserr_encr_alg_unsupp; 3929 } 3930 } 3931 3932 __be32 3933 nfsd4_create_session(struct svc_rqst *rqstp, 3934 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 3935 { 3936 struct nfsd4_create_session *cr_ses = &u->create_session; 3937 struct sockaddr *sa = svc_addr(rqstp); 3938 struct nfs4_client *conf, *unconf; 3939 struct nfsd4_clid_slot *cs_slot; 3940 struct nfs4_client *old = NULL; 3941 struct nfsd4_session *new; 3942 struct nfsd4_conn *conn; 3943 __be32 status = 0; 3944 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 3945 3946 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 3947 return nfserr_inval; 3948 status = nfsd4_check_cb_sec(&cr_ses->cb_sec); 3949 if (status) 3950 return status; 3951 status = check_forechannel_attrs(&cr_ses->fore_channel, nn); 3952 if (status) 3953 return status; 3954 status = check_backchannel_attrs(&cr_ses->back_channel); 3955 if (status) 3956 goto out_err; 3957 status = nfserr_jukebox; 3958 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel); 3959 if (!new) 3960 goto out_err; 3961 conn = alloc_conn_from_crses(rqstp, cr_ses); 3962 if (!conn) 3963 goto out_free_session; 3964 3965 spin_lock(&nn->client_lock); 3966 3967 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */ 3968 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); 3969 conf = find_confirmed_client(&cr_ses->clientid, true, nn); 3970 if (!conf && !unconf) { 3971 status = nfserr_stale_clientid; 3972 goto out_free_conn; 3973 } 3974 3975 /* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */ 3976 if (conf) { 3977 cs_slot = &conf->cl_cs_slot; 3978 trace_nfsd_slot_seqid_conf(conf, cr_ses); 3979 } else { 3980 cs_slot = &unconf->cl_cs_slot; 3981 trace_nfsd_slot_seqid_unconf(unconf, cr_ses); 3982 } 3983 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 3984 switch (status) { 3985 case nfs_ok: 3986 cs_slot->sl_seqid++; 3987 cr_ses->seqid = cs_slot->sl_seqid; 3988 break; 3989 case nfserr_replay_cache: 3990 status = nfsd4_replay_create_session(cr_ses, cs_slot); 3991 fallthrough; 3992 case nfserr_jukebox: 3993 /* The server MUST NOT cache NFS4ERR_DELAY */ 3994 goto out_free_conn; 3995 default: 3996 goto out_cache_error; 3997 } 3998 3999 /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */ 4000 if (conf) { 4001 status = nfserr_wrong_cred; 4002 if (!nfsd4_mach_creds_match(conf, rqstp)) 4003 goto out_cache_error; 4004 } else { 4005 status = nfserr_clid_inuse; 4006 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) || 4007 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) { 4008 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 4009 goto out_cache_error; 4010 } 4011 status = nfserr_wrong_cred; 4012 if (!nfsd4_mach_creds_match(unconf, rqstp)) 4013 goto out_cache_error; 4014 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 4015 if (old) { 4016 status = mark_client_expired_locked(old); 4017 if (status) 4018 goto out_expired_error; 4019 trace_nfsd_clid_replaced(&old->cl_clientid); 4020 } 4021 move_to_confirmed(unconf); 4022 conf = unconf; 4023 } 4024 4025 /* RFC 8881 Section 18.36.4 Phase 4: Session creation. */ 4026 status = nfs_ok; 4027 /* Persistent sessions are not supported */ 4028 cr_ses->flags &= ~SESSION4_PERSIST; 4029 /* Upshifting from TCP to RDMA is not supported */ 4030 cr_ses->flags &= ~SESSION4_RDMA; 4031 /* Report the correct number of backchannel slots */ 4032 cr_ses->back_channel.maxreqs = new->se_cb_highest_slot + 1; 4033 4034 init_session(rqstp, new, conf, cr_ses); 4035 nfsd4_get_session_locked(new); 4036 4037 memcpy(cr_ses->sessionid.data, new->se_sessionid.data, 4038 NFS4_MAX_SESSIONID_LEN); 4039 4040 /* cache solo and embedded create sessions under the client_lock */ 4041 nfsd4_cache_create_session(cr_ses, cs_slot, status); 4042 spin_unlock(&nn->client_lock); 4043 if (conf == unconf) 4044 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 4045 /* init connection and backchannel */ 4046 nfsd4_init_conn(rqstp, conn, new); 4047 nfsd4_put_session(new); 4048 if (old) 4049 expire_client(old); 4050 return status; 4051 4052 out_expired_error: 4053 /* 4054 * Revert the slot seq_nr change so the server will process 4055 * the client's resend instead of returning a cached response. 4056 */ 4057 if (status == nfserr_jukebox) { 4058 cs_slot->sl_seqid--; 4059 cr_ses->seqid = cs_slot->sl_seqid; 4060 goto out_free_conn; 4061 } 4062 out_cache_error: 4063 nfsd4_cache_create_session(cr_ses, cs_slot, status); 4064 out_free_conn: 4065 spin_unlock(&nn->client_lock); 4066 free_conn(conn); 4067 out_free_session: 4068 __free_session(new); 4069 out_err: 4070 return status; 4071 } 4072 4073 static __be32 nfsd4_map_bcts_dir(u32 *dir) 4074 { 4075 switch (*dir) { 4076 case NFS4_CDFC4_FORE: 4077 case NFS4_CDFC4_BACK: 4078 return nfs_ok; 4079 case NFS4_CDFC4_FORE_OR_BOTH: 4080 case NFS4_CDFC4_BACK_OR_BOTH: 4081 *dir = NFS4_CDFC4_BOTH; 4082 return nfs_ok; 4083 } 4084 return nfserr_inval; 4085 } 4086 4087 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, 4088 struct nfsd4_compound_state *cstate, 4089 union nfsd4_op_u *u) 4090 { 4091 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl; 4092 struct nfsd4_session *session = cstate->session; 4093 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4094 __be32 status; 4095 4096 status = nfsd4_check_cb_sec(&bc->bc_cb_sec); 4097 if (status) 4098 return status; 4099 spin_lock(&nn->client_lock); 4100 session->se_cb_prog = bc->bc_cb_program; 4101 session->se_cb_sec = bc->bc_cb_sec; 4102 spin_unlock(&nn->client_lock); 4103 4104 nfsd4_probe_callback(session->se_client); 4105 4106 return nfs_ok; 4107 } 4108 4109 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s) 4110 { 4111 struct nfsd4_conn *c; 4112 4113 list_for_each_entry(c, &s->se_conns, cn_persession) { 4114 if (c->cn_xprt == xpt) { 4115 return c; 4116 } 4117 } 4118 return NULL; 4119 } 4120 4121 static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst, 4122 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn) 4123 { 4124 struct nfs4_client *clp = session->se_client; 4125 struct svc_xprt *xpt = rqst->rq_xprt; 4126 struct nfsd4_conn *c; 4127 __be32 status; 4128 4129 /* Following the last paragraph of RFC 5661 Section 18.34.3: */ 4130 spin_lock(&clp->cl_lock); 4131 c = __nfsd4_find_conn(xpt, session); 4132 if (!c) 4133 status = nfserr_noent; 4134 else if (req == c->cn_flags) 4135 status = nfs_ok; 4136 else if (req == NFS4_CDFC4_FORE_OR_BOTH && 4137 c->cn_flags != NFS4_CDFC4_BACK) 4138 status = nfs_ok; 4139 else if (req == NFS4_CDFC4_BACK_OR_BOTH && 4140 c->cn_flags != NFS4_CDFC4_FORE) 4141 status = nfs_ok; 4142 else 4143 status = nfserr_inval; 4144 spin_unlock(&clp->cl_lock); 4145 if (status == nfs_ok && conn) 4146 *conn = c; 4147 return status; 4148 } 4149 4150 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, 4151 struct nfsd4_compound_state *cstate, 4152 union nfsd4_op_u *u) 4153 { 4154 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session; 4155 __be32 status; 4156 struct nfsd4_conn *conn; 4157 struct nfsd4_session *session; 4158 struct net *net = SVC_NET(rqstp); 4159 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4160 4161 if (!nfsd4_last_compound_op(rqstp)) 4162 return nfserr_not_only_op; 4163 spin_lock(&nn->client_lock); 4164 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status); 4165 spin_unlock(&nn->client_lock); 4166 if (!session) 4167 goto out_no_session; 4168 status = nfserr_wrong_cred; 4169 if (!nfsd4_mach_creds_match(session->se_client, rqstp)) 4170 goto out; 4171 status = nfsd4_match_existing_connection(rqstp, session, 4172 bcts->dir, &conn); 4173 if (status == nfs_ok) { 4174 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH || 4175 bcts->dir == NFS4_CDFC4_BACK) 4176 conn->cn_flags |= NFS4_CDFC4_BACK; 4177 nfsd4_probe_callback(session->se_client); 4178 goto out; 4179 } 4180 if (status == nfserr_inval) 4181 goto out; 4182 status = nfsd4_map_bcts_dir(&bcts->dir); 4183 if (status) 4184 goto out; 4185 conn = alloc_conn(rqstp, bcts->dir); 4186 status = nfserr_jukebox; 4187 if (!conn) 4188 goto out; 4189 nfsd4_init_conn(rqstp, conn, session); 4190 status = nfs_ok; 4191 out: 4192 nfsd4_put_session(session); 4193 out_no_session: 4194 return status; 4195 } 4196 4197 static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid) 4198 { 4199 if (!cstate->session) 4200 return false; 4201 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid)); 4202 } 4203 4204 __be32 4205 nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate, 4206 union nfsd4_op_u *u) 4207 { 4208 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid; 4209 struct nfsd4_session *ses; 4210 __be32 status; 4211 int ref_held_by_me = 0; 4212 struct net *net = SVC_NET(r); 4213 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4214 4215 status = nfserr_not_only_op; 4216 if (nfsd4_compound_in_session(cstate, sessionid)) { 4217 if (!nfsd4_last_compound_op(r)) 4218 goto out; 4219 ref_held_by_me++; 4220 } 4221 dump_sessionid(__func__, sessionid); 4222 spin_lock(&nn->client_lock); 4223 ses = find_in_sessionid_hashtbl(sessionid, net, &status); 4224 if (!ses) 4225 goto out_client_lock; 4226 status = nfserr_wrong_cred; 4227 if (!nfsd4_mach_creds_match(ses->se_client, r)) 4228 goto out_put_session; 4229 status = mark_session_dead_locked(ses, 1 + ref_held_by_me); 4230 if (status) 4231 goto out_put_session; 4232 unhash_session(ses); 4233 spin_unlock(&nn->client_lock); 4234 4235 nfsd4_probe_callback_sync(ses->se_client); 4236 4237 spin_lock(&nn->client_lock); 4238 status = nfs_ok; 4239 out_put_session: 4240 nfsd4_put_session_locked(ses); 4241 out_client_lock: 4242 spin_unlock(&nn->client_lock); 4243 out: 4244 return status; 4245 } 4246 4247 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses) 4248 { 4249 struct nfs4_client *clp = ses->se_client; 4250 struct nfsd4_conn *c; 4251 __be32 status = nfs_ok; 4252 int ret; 4253 4254 spin_lock(&clp->cl_lock); 4255 c = __nfsd4_find_conn(new->cn_xprt, ses); 4256 if (c) 4257 goto out_free; 4258 status = nfserr_conn_not_bound_to_session; 4259 if (clp->cl_mach_cred) 4260 goto out_free; 4261 __nfsd4_hash_conn(new, ses); 4262 spin_unlock(&clp->cl_lock); 4263 ret = nfsd4_register_conn(new); 4264 if (ret) 4265 /* oops; xprt is already down: */ 4266 nfsd4_conn_lost(&new->cn_xpt_user); 4267 return nfs_ok; 4268 out_free: 4269 spin_unlock(&clp->cl_lock); 4270 free_conn(new); 4271 return status; 4272 } 4273 4274 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session) 4275 { 4276 struct nfsd4_compoundargs *args = rqstp->rq_argp; 4277 4278 return args->opcnt > session->se_fchannel.maxops; 4279 } 4280 4281 static bool nfsd4_request_too_big(struct svc_rqst *rqstp, 4282 struct nfsd4_session *session) 4283 { 4284 struct xdr_buf *xb = &rqstp->rq_arg; 4285 4286 return xb->len > session->se_fchannel.maxreq_sz; 4287 } 4288 4289 static bool replay_matches_cache(struct svc_rqst *rqstp, 4290 struct nfsd4_sequence *seq, struct nfsd4_slot *slot) 4291 { 4292 struct nfsd4_compoundargs *argp = rqstp->rq_argp; 4293 4294 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != 4295 (bool)seq->cachethis) 4296 return false; 4297 /* 4298 * If there's an error then the reply can have fewer ops than 4299 * the call. 4300 */ 4301 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status) 4302 return false; 4303 /* 4304 * But if we cached a reply with *more* ops than the call you're 4305 * sending us now, then this new call is clearly not really a 4306 * replay of the old one: 4307 */ 4308 if (slot->sl_opcnt > argp->opcnt) 4309 return false; 4310 /* This is the only check explicitly called by spec: */ 4311 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) 4312 return false; 4313 /* 4314 * There may be more comparisons we could actually do, but the 4315 * spec doesn't require us to catch every case where the calls 4316 * don't match (that would require caching the call as well as 4317 * the reply), so we don't bother. 4318 */ 4319 return true; 4320 } 4321 4322 __be32 4323 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4324 union nfsd4_op_u *u) 4325 { 4326 struct nfsd4_sequence *seq = &u->sequence; 4327 struct nfsd4_compoundres *resp = rqstp->rq_resp; 4328 struct xdr_stream *xdr = resp->xdr; 4329 struct nfsd4_session *session; 4330 struct nfs4_client *clp; 4331 struct nfsd4_slot *slot; 4332 struct nfsd4_conn *conn; 4333 __be32 status; 4334 int buflen; 4335 struct net *net = SVC_NET(rqstp); 4336 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 4337 4338 if (resp->opcnt != 1) 4339 return nfserr_sequence_pos; 4340 4341 /* 4342 * Will be either used or freed by nfsd4_sequence_check_conn 4343 * below. 4344 */ 4345 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE); 4346 if (!conn) 4347 return nfserr_jukebox; 4348 4349 spin_lock(&nn->client_lock); 4350 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status); 4351 if (!session) 4352 goto out_no_session; 4353 clp = session->se_client; 4354 4355 status = nfserr_too_many_ops; 4356 if (nfsd4_session_too_many_ops(rqstp, session)) 4357 goto out_put_session; 4358 4359 status = nfserr_req_too_big; 4360 if (nfsd4_request_too_big(rqstp, session)) 4361 goto out_put_session; 4362 4363 status = nfserr_badslot; 4364 if (seq->slotid >= session->se_fchannel.maxreqs) 4365 goto out_put_session; 4366 4367 slot = xa_load(&session->se_slots, seq->slotid); 4368 dprintk("%s: slotid %d\n", __func__, seq->slotid); 4369 4370 trace_nfsd_slot_seqid_sequence(clp, seq, slot); 4371 status = check_slot_seqid(seq->seqid, slot->sl_seqid, slot->sl_flags); 4372 if (status == nfserr_replay_cache) { 4373 status = nfserr_seq_misordered; 4374 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) 4375 goto out_put_session; 4376 status = nfserr_seq_false_retry; 4377 if (!replay_matches_cache(rqstp, seq, slot)) 4378 goto out_put_session; 4379 cstate->slot = slot; 4380 cstate->session = session; 4381 cstate->clp = clp; 4382 /* Return the cached reply status and set cstate->status 4383 * for nfsd4_proc_compound processing */ 4384 status = nfsd4_replay_cache_entry(resp, seq); 4385 cstate->status = nfserr_replay_cache; 4386 goto out; 4387 } 4388 if (status) 4389 goto out_put_session; 4390 4391 status = nfsd4_sequence_check_conn(conn, session); 4392 conn = NULL; 4393 if (status) 4394 goto out_put_session; 4395 4396 if (session->se_target_maxslots < session->se_fchannel.maxreqs && 4397 slot->sl_generation == session->se_slot_gen && 4398 seq->maxslots <= session->se_target_maxslots) 4399 /* Client acknowledged our reduce maxreqs */ 4400 free_session_slots(session, session->se_target_maxslots); 4401 4402 buflen = (seq->cachethis) ? 4403 session->se_fchannel.maxresp_cached : 4404 session->se_fchannel.maxresp_sz; 4405 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache : 4406 nfserr_rep_too_big; 4407 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack)) 4408 goto out_put_session; 4409 svc_reserve_auth(rqstp, buflen); 4410 4411 status = nfs_ok; 4412 /* Success! accept new slot seqid */ 4413 slot->sl_seqid = seq->seqid; 4414 slot->sl_flags &= ~NFSD4_SLOT_REUSED; 4415 slot->sl_flags |= NFSD4_SLOT_INUSE; 4416 slot->sl_generation = session->se_slot_gen; 4417 if (seq->cachethis) 4418 slot->sl_flags |= NFSD4_SLOT_CACHETHIS; 4419 else 4420 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS; 4421 4422 cstate->slot = slot; 4423 cstate->session = session; 4424 cstate->clp = clp; 4425 4426 /* 4427 * If the client ever uses the highest available slot, 4428 * gently try to allocate another 20%. This allows 4429 * fairly quick growth without grossly over-shooting what 4430 * the client might use. 4431 */ 4432 if (seq->slotid == session->se_fchannel.maxreqs - 1 && 4433 session->se_target_maxslots >= session->se_fchannel.maxreqs && 4434 session->se_fchannel.maxreqs < NFSD_MAX_SLOTS_PER_SESSION) { 4435 int s = session->se_fchannel.maxreqs; 4436 int cnt = DIV_ROUND_UP(s, 5); 4437 void *prev_slot; 4438 4439 do { 4440 /* 4441 * GFP_NOWAIT both allows allocation under a 4442 * spinlock, and only succeeds if there is 4443 * plenty of memory. 4444 */ 4445 slot = nfsd4_alloc_slot(&session->se_fchannel, s, 4446 GFP_NOWAIT); 4447 prev_slot = xa_load(&session->se_slots, s); 4448 if (xa_is_value(prev_slot) && slot) { 4449 slot->sl_seqid = xa_to_value(prev_slot); 4450 slot->sl_flags |= NFSD4_SLOT_REUSED; 4451 } 4452 if (slot && 4453 !xa_is_err(xa_store(&session->se_slots, s, slot, 4454 GFP_NOWAIT))) { 4455 s += 1; 4456 session->se_fchannel.maxreqs = s; 4457 atomic_add(s - session->se_target_maxslots, 4458 &nfsd_total_target_slots); 4459 session->se_target_maxslots = s; 4460 } else { 4461 kfree(slot); 4462 slot = NULL; 4463 } 4464 } while (slot && --cnt > 0); 4465 } 4466 4467 out: 4468 seq->maxslots = max(session->se_target_maxslots, seq->maxslots); 4469 seq->target_maxslots = session->se_target_maxslots; 4470 4471 switch (clp->cl_cb_state) { 4472 case NFSD4_CB_DOWN: 4473 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; 4474 break; 4475 case NFSD4_CB_FAULT: 4476 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; 4477 break; 4478 default: 4479 seq->status_flags = 0; 4480 } 4481 if (!list_empty(&clp->cl_revoked)) 4482 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; 4483 if (atomic_read(&clp->cl_admin_revoked)) 4484 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED; 4485 trace_nfsd_seq4_status(rqstp, seq); 4486 out_no_session: 4487 if (conn) 4488 free_conn(conn); 4489 spin_unlock(&nn->client_lock); 4490 return status; 4491 out_put_session: 4492 nfsd4_put_session_locked(session); 4493 goto out_no_session; 4494 } 4495 4496 void 4497 nfsd4_sequence_done(struct nfsd4_compoundres *resp) 4498 { 4499 struct nfsd4_compound_state *cs = &resp->cstate; 4500 4501 if (nfsd4_has_session(cs)) { 4502 if (cs->status != nfserr_replay_cache) { 4503 nfsd4_store_cache_entry(resp); 4504 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; 4505 } 4506 /* Drop session reference that was taken in nfsd4_sequence() */ 4507 nfsd4_put_session(cs->session); 4508 } else if (cs->clp) 4509 put_client_renew(cs->clp); 4510 } 4511 4512 __be32 4513 nfsd4_destroy_clientid(struct svc_rqst *rqstp, 4514 struct nfsd4_compound_state *cstate, 4515 union nfsd4_op_u *u) 4516 { 4517 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid; 4518 struct nfs4_client *conf, *unconf; 4519 struct nfs4_client *clp = NULL; 4520 __be32 status = 0; 4521 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4522 4523 spin_lock(&nn->client_lock); 4524 unconf = find_unconfirmed_client(&dc->clientid, true, nn); 4525 conf = find_confirmed_client(&dc->clientid, true, nn); 4526 WARN_ON_ONCE(conf && unconf); 4527 4528 if (conf) { 4529 if (client_has_state(conf)) { 4530 status = nfserr_clientid_busy; 4531 goto out; 4532 } 4533 status = mark_client_expired_locked(conf); 4534 if (status) 4535 goto out; 4536 clp = conf; 4537 } else if (unconf) 4538 clp = unconf; 4539 else { 4540 status = nfserr_stale_clientid; 4541 goto out; 4542 } 4543 if (!nfsd4_mach_creds_match(clp, rqstp)) { 4544 clp = NULL; 4545 status = nfserr_wrong_cred; 4546 goto out; 4547 } 4548 trace_nfsd_clid_destroyed(&clp->cl_clientid); 4549 unhash_client_locked(clp); 4550 out: 4551 spin_unlock(&nn->client_lock); 4552 if (clp) 4553 expire_client(clp); 4554 return status; 4555 } 4556 4557 __be32 4558 nfsd4_reclaim_complete(struct svc_rqst *rqstp, 4559 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 4560 { 4561 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete; 4562 struct nfs4_client *clp = cstate->clp; 4563 __be32 status = 0; 4564 4565 if (rc->rca_one_fs) { 4566 if (!cstate->current_fh.fh_dentry) 4567 return nfserr_nofilehandle; 4568 /* 4569 * We don't take advantage of the rca_one_fs case. 4570 * That's OK, it's optional, we can safely ignore it. 4571 */ 4572 return nfs_ok; 4573 } 4574 4575 status = nfserr_complete_already; 4576 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 4577 goto out; 4578 4579 status = nfserr_stale_clientid; 4580 if (is_client_expired(clp)) 4581 /* 4582 * The following error isn't really legal. 4583 * But we only get here if the client just explicitly 4584 * destroyed the client. Surely it no longer cares what 4585 * error it gets back on an operation for the dead 4586 * client. 4587 */ 4588 goto out; 4589 4590 status = nfs_ok; 4591 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid); 4592 nfsd4_client_record_create(clp); 4593 inc_reclaim_complete(clp); 4594 out: 4595 return status; 4596 } 4597 4598 __be32 4599 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 4600 union nfsd4_op_u *u) 4601 { 4602 struct nfsd4_setclientid *setclid = &u->setclientid; 4603 struct xdr_netobj clname = setclid->se_name; 4604 nfs4_verifier clverifier = setclid->se_verf; 4605 struct nfs4_client *conf, *new; 4606 struct nfs4_client *unconf = NULL; 4607 __be32 status; 4608 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4609 4610 new = create_client(clname, rqstp, &clverifier); 4611 if (new == NULL) 4612 return nfserr_jukebox; 4613 spin_lock(&nn->client_lock); 4614 conf = find_confirmed_client_by_name(&clname, nn); 4615 if (conf && client_has_state(conf)) { 4616 status = nfserr_clid_inuse; 4617 if (clp_used_exchangeid(conf)) 4618 goto out; 4619 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4620 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4621 goto out; 4622 } 4623 } 4624 unconf = find_unconfirmed_client_by_name(&clname, nn); 4625 if (unconf) 4626 unhash_client_locked(unconf); 4627 if (conf) { 4628 if (same_verf(&conf->cl_verifier, &clverifier)) { 4629 copy_clid(new, conf); 4630 gen_confirm(new, nn); 4631 } else 4632 trace_nfsd_clid_verf_mismatch(conf, rqstp, 4633 &clverifier); 4634 } else 4635 trace_nfsd_clid_fresh(new); 4636 new->cl_minorversion = 0; 4637 gen_callback(new, setclid, rqstp); 4638 add_to_unconfirmed(new); 4639 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot; 4640 setclid->se_clientid.cl_id = new->cl_clientid.cl_id; 4641 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data)); 4642 new = NULL; 4643 status = nfs_ok; 4644 out: 4645 spin_unlock(&nn->client_lock); 4646 if (new) 4647 free_client(new); 4648 if (unconf) { 4649 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid); 4650 expire_client(unconf); 4651 } 4652 return status; 4653 } 4654 4655 __be32 4656 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 4657 struct nfsd4_compound_state *cstate, 4658 union nfsd4_op_u *u) 4659 { 4660 struct nfsd4_setclientid_confirm *setclientid_confirm = 4661 &u->setclientid_confirm; 4662 struct nfs4_client *conf, *unconf; 4663 struct nfs4_client *old = NULL; 4664 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 4665 clientid_t * clid = &setclientid_confirm->sc_clientid; 4666 __be32 status; 4667 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 4668 4669 if (STALE_CLIENTID(clid, nn)) 4670 return nfserr_stale_clientid; 4671 4672 spin_lock(&nn->client_lock); 4673 conf = find_confirmed_client(clid, false, nn); 4674 unconf = find_unconfirmed_client(clid, false, nn); 4675 /* 4676 * We try hard to give out unique clientid's, so if we get an 4677 * attempt to confirm the same clientid with a different cred, 4678 * the client may be buggy; this should never happen. 4679 * 4680 * Nevertheless, RFC 7530 recommends INUSE for this case: 4681 */ 4682 status = nfserr_clid_inuse; 4683 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) { 4684 trace_nfsd_clid_cred_mismatch(unconf, rqstp); 4685 goto out; 4686 } 4687 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 4688 trace_nfsd_clid_cred_mismatch(conf, rqstp); 4689 goto out; 4690 } 4691 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) { 4692 if (conf && same_verf(&confirm, &conf->cl_confirm)) { 4693 status = nfs_ok; 4694 } else 4695 status = nfserr_stale_clientid; 4696 goto out; 4697 } 4698 status = nfs_ok; 4699 if (conf) { 4700 old = unconf; 4701 unhash_client_locked(old); 4702 nfsd4_change_callback(conf, &unconf->cl_cb_conn); 4703 } else { 4704 old = find_confirmed_client_by_name(&unconf->cl_name, nn); 4705 if (old) { 4706 status = nfserr_clid_inuse; 4707 if (client_has_state(old) 4708 && !same_creds(&unconf->cl_cred, 4709 &old->cl_cred)) { 4710 old = NULL; 4711 goto out; 4712 } 4713 status = mark_client_expired_locked(old); 4714 if (status) { 4715 old = NULL; 4716 goto out; 4717 } 4718 trace_nfsd_clid_replaced(&old->cl_clientid); 4719 } 4720 move_to_confirmed(unconf); 4721 conf = unconf; 4722 } 4723 get_client_locked(conf); 4724 spin_unlock(&nn->client_lock); 4725 if (conf == unconf) 4726 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY); 4727 nfsd4_probe_callback(conf); 4728 spin_lock(&nn->client_lock); 4729 put_client_renew_locked(conf); 4730 out: 4731 spin_unlock(&nn->client_lock); 4732 if (old) 4733 expire_client(old); 4734 return status; 4735 } 4736 4737 static struct nfs4_file *nfsd4_alloc_file(void) 4738 { 4739 return kmem_cache_alloc(file_slab, GFP_KERNEL); 4740 } 4741 4742 /* OPEN Share state helper functions */ 4743 4744 static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp) 4745 { 4746 refcount_set(&fp->fi_ref, 1); 4747 spin_lock_init(&fp->fi_lock); 4748 INIT_LIST_HEAD(&fp->fi_stateids); 4749 INIT_LIST_HEAD(&fp->fi_delegations); 4750 INIT_LIST_HEAD(&fp->fi_clnt_odstate); 4751 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle); 4752 fp->fi_deleg_file = NULL; 4753 fp->fi_had_conflict = false; 4754 fp->fi_share_deny = 0; 4755 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 4756 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 4757 fp->fi_aliased = false; 4758 fp->fi_inode = d_inode(fh->fh_dentry); 4759 #ifdef CONFIG_NFSD_PNFS 4760 INIT_LIST_HEAD(&fp->fi_lo_states); 4761 atomic_set(&fp->fi_lo_recalls, 0); 4762 #endif 4763 } 4764 4765 void 4766 nfsd4_free_slabs(void) 4767 { 4768 kmem_cache_destroy(client_slab); 4769 kmem_cache_destroy(openowner_slab); 4770 kmem_cache_destroy(lockowner_slab); 4771 kmem_cache_destroy(file_slab); 4772 kmem_cache_destroy(stateid_slab); 4773 kmem_cache_destroy(deleg_slab); 4774 kmem_cache_destroy(odstate_slab); 4775 } 4776 4777 int 4778 nfsd4_init_slabs(void) 4779 { 4780 client_slab = KMEM_CACHE(nfs4_client, 0); 4781 if (client_slab == NULL) 4782 goto out; 4783 openowner_slab = KMEM_CACHE(nfs4_openowner, 0); 4784 if (openowner_slab == NULL) 4785 goto out_free_client_slab; 4786 lockowner_slab = KMEM_CACHE(nfs4_lockowner, 0); 4787 if (lockowner_slab == NULL) 4788 goto out_free_openowner_slab; 4789 file_slab = KMEM_CACHE(nfs4_file, 0); 4790 if (file_slab == NULL) 4791 goto out_free_lockowner_slab; 4792 stateid_slab = KMEM_CACHE(nfs4_ol_stateid, 0); 4793 if (stateid_slab == NULL) 4794 goto out_free_file_slab; 4795 deleg_slab = KMEM_CACHE(nfs4_delegation, 0); 4796 if (deleg_slab == NULL) 4797 goto out_free_stateid_slab; 4798 odstate_slab = KMEM_CACHE(nfs4_clnt_odstate, 0); 4799 if (odstate_slab == NULL) 4800 goto out_free_deleg_slab; 4801 return 0; 4802 4803 out_free_deleg_slab: 4804 kmem_cache_destroy(deleg_slab); 4805 out_free_stateid_slab: 4806 kmem_cache_destroy(stateid_slab); 4807 out_free_file_slab: 4808 kmem_cache_destroy(file_slab); 4809 out_free_lockowner_slab: 4810 kmem_cache_destroy(lockowner_slab); 4811 out_free_openowner_slab: 4812 kmem_cache_destroy(openowner_slab); 4813 out_free_client_slab: 4814 kmem_cache_destroy(client_slab); 4815 out: 4816 return -ENOMEM; 4817 } 4818 4819 static unsigned long 4820 nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc) 4821 { 4822 struct nfsd_net *nn = shrink->private_data; 4823 long count; 4824 4825 count = atomic_read(&nn->nfsd_courtesy_clients); 4826 if (!count) 4827 count = atomic_long_read(&num_delegations); 4828 if (count) 4829 queue_work(laundry_wq, &nn->nfsd_shrinker_work); 4830 return (unsigned long)count; 4831 } 4832 4833 static unsigned long 4834 nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc) 4835 { 4836 return SHRINK_STOP; 4837 } 4838 4839 void 4840 nfsd4_init_leases_net(struct nfsd_net *nn) 4841 { 4842 struct sysinfo si; 4843 u64 max_clients; 4844 4845 nn->nfsd4_lease = 90; /* default lease time */ 4846 nn->nfsd4_grace = 90; 4847 nn->somebody_reclaimed = false; 4848 nn->track_reclaim_completes = false; 4849 nn->clverifier_counter = get_random_u32(); 4850 nn->clientid_base = get_random_u32(); 4851 nn->clientid_counter = nn->clientid_base + 1; 4852 nn->s2s_cp_cl_id = nn->clientid_counter++; 4853 4854 atomic_set(&nn->nfs4_client_count, 0); 4855 si_meminfo(&si); 4856 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024); 4857 max_clients *= NFS4_CLIENTS_PER_GB; 4858 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB); 4859 4860 atomic_set(&nn->nfsd_courtesy_clients, 0); 4861 } 4862 4863 enum rp_lock { 4864 RP_UNLOCKED, 4865 RP_LOCKED, 4866 RP_UNHASHED, 4867 }; 4868 4869 static void init_nfs4_replay(struct nfs4_replay *rp) 4870 { 4871 rp->rp_status = nfserr_serverfault; 4872 rp->rp_buflen = 0; 4873 rp->rp_buf = rp->rp_ibuf; 4874 rp->rp_locked = RP_UNLOCKED; 4875 } 4876 4877 static int nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate, 4878 struct nfs4_stateowner *so) 4879 { 4880 if (!nfsd4_has_session(cstate)) { 4881 wait_var_event(&so->so_replay.rp_locked, 4882 cmpxchg(&so->so_replay.rp_locked, 4883 RP_UNLOCKED, RP_LOCKED) != RP_LOCKED); 4884 if (so->so_replay.rp_locked == RP_UNHASHED) 4885 return -EAGAIN; 4886 cstate->replay_owner = nfs4_get_stateowner(so); 4887 } 4888 return 0; 4889 } 4890 4891 void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate) 4892 { 4893 struct nfs4_stateowner *so = cstate->replay_owner; 4894 4895 if (so != NULL) { 4896 cstate->replay_owner = NULL; 4897 store_release_wake_up(&so->so_replay.rp_locked, RP_UNLOCKED); 4898 nfs4_put_stateowner(so); 4899 } 4900 } 4901 4902 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp) 4903 { 4904 struct nfs4_stateowner *sop; 4905 4906 sop = kmem_cache_alloc(slab, GFP_KERNEL); 4907 if (!sop) 4908 return NULL; 4909 4910 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL); 4911 if (!sop->so_owner.data) { 4912 kmem_cache_free(slab, sop); 4913 return NULL; 4914 } 4915 4916 INIT_LIST_HEAD(&sop->so_stateids); 4917 sop->so_client = clp; 4918 init_nfs4_replay(&sop->so_replay); 4919 atomic_set(&sop->so_count, 1); 4920 return sop; 4921 } 4922 4923 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval) 4924 { 4925 lockdep_assert_held(&clp->cl_lock); 4926 4927 list_add(&oo->oo_owner.so_strhash, 4928 &clp->cl_ownerstr_hashtbl[strhashval]); 4929 list_add(&oo->oo_perclient, &clp->cl_openowners); 4930 } 4931 4932 static void nfs4_unhash_openowner(struct nfs4_stateowner *so) 4933 { 4934 unhash_openowner_locked(openowner(so)); 4935 } 4936 4937 static void nfs4_free_openowner(struct nfs4_stateowner *so) 4938 { 4939 struct nfs4_openowner *oo = openowner(so); 4940 4941 kmem_cache_free(openowner_slab, oo); 4942 } 4943 4944 static const struct nfs4_stateowner_operations openowner_ops = { 4945 .so_unhash = nfs4_unhash_openowner, 4946 .so_free = nfs4_free_openowner, 4947 }; 4948 4949 static struct nfs4_ol_stateid * 4950 nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 4951 { 4952 struct nfs4_ol_stateid *local, *ret = NULL; 4953 struct nfs4_openowner *oo = open->op_openowner; 4954 4955 lockdep_assert_held(&fp->fi_lock); 4956 4957 list_for_each_entry(local, &fp->fi_stateids, st_perfile) { 4958 /* ignore lock owners */ 4959 if (local->st_stateowner->so_is_open_owner == 0) 4960 continue; 4961 if (local->st_stateowner != &oo->oo_owner) 4962 continue; 4963 if (local->st_stid.sc_type == SC_TYPE_OPEN && 4964 !local->st_stid.sc_status) { 4965 ret = local; 4966 refcount_inc(&ret->st_stid.sc_count); 4967 break; 4968 } 4969 } 4970 return ret; 4971 } 4972 4973 static void nfsd4_drop_revoked_stid(struct nfs4_stid *s) 4974 __releases(&s->sc_client->cl_lock) 4975 { 4976 struct nfs4_client *cl = s->sc_client; 4977 LIST_HEAD(reaplist); 4978 struct nfs4_ol_stateid *stp; 4979 struct nfs4_delegation *dp; 4980 bool unhashed; 4981 4982 switch (s->sc_type) { 4983 case SC_TYPE_OPEN: 4984 stp = openlockstateid(s); 4985 if (unhash_open_stateid(stp, &reaplist)) 4986 put_ol_stateid_locked(stp, &reaplist); 4987 spin_unlock(&cl->cl_lock); 4988 free_ol_stateid_reaplist(&reaplist); 4989 break; 4990 case SC_TYPE_LOCK: 4991 stp = openlockstateid(s); 4992 unhashed = unhash_lock_stateid(stp); 4993 spin_unlock(&cl->cl_lock); 4994 if (unhashed) 4995 nfs4_put_stid(s); 4996 break; 4997 case SC_TYPE_DELEG: 4998 dp = delegstateid(s); 4999 list_del_init(&dp->dl_recall_lru); 5000 spin_unlock(&cl->cl_lock); 5001 nfs4_put_stid(s); 5002 break; 5003 default: 5004 spin_unlock(&cl->cl_lock); 5005 } 5006 } 5007 5008 static void nfsd40_drop_revoked_stid(struct nfs4_client *cl, 5009 stateid_t *stid) 5010 { 5011 /* NFSv4.0 has no way for the client to tell the server 5012 * that it can forget an admin-revoked stateid. 5013 * So we keep it around until the first time that the 5014 * client uses it, and drop it the first time 5015 * nfserr_admin_revoked is returned. 5016 * For v4.1 and later we wait until explicitly told 5017 * to free the stateid. 5018 */ 5019 if (cl->cl_minorversion == 0) { 5020 struct nfs4_stid *st; 5021 5022 spin_lock(&cl->cl_lock); 5023 st = find_stateid_locked(cl, stid); 5024 if (st) 5025 nfsd4_drop_revoked_stid(st); 5026 else 5027 spin_unlock(&cl->cl_lock); 5028 } 5029 } 5030 5031 static __be32 5032 nfsd4_verify_open_stid(struct nfs4_stid *s) 5033 { 5034 __be32 ret = nfs_ok; 5035 5036 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) 5037 ret = nfserr_admin_revoked; 5038 else if (s->sc_status & SC_STATUS_REVOKED) 5039 ret = nfserr_deleg_revoked; 5040 else if (s->sc_status & SC_STATUS_CLOSED) 5041 ret = nfserr_bad_stateid; 5042 return ret; 5043 } 5044 5045 /* Lock the stateid st_mutex, and deal with races with CLOSE */ 5046 static __be32 5047 nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp) 5048 { 5049 __be32 ret; 5050 5051 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX); 5052 ret = nfsd4_verify_open_stid(&stp->st_stid); 5053 if (ret == nfserr_admin_revoked) 5054 nfsd40_drop_revoked_stid(stp->st_stid.sc_client, 5055 &stp->st_stid.sc_stateid); 5056 5057 if (ret != nfs_ok) 5058 mutex_unlock(&stp->st_mutex); 5059 return ret; 5060 } 5061 5062 static struct nfs4_ol_stateid * 5063 nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) 5064 { 5065 struct nfs4_ol_stateid *stp; 5066 for (;;) { 5067 spin_lock(&fp->fi_lock); 5068 stp = nfsd4_find_existing_open(fp, open); 5069 spin_unlock(&fp->fi_lock); 5070 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok) 5071 break; 5072 nfs4_put_stid(&stp->st_stid); 5073 } 5074 return stp; 5075 } 5076 5077 static struct nfs4_openowner * 5078 find_or_alloc_open_stateowner(unsigned int strhashval, struct nfsd4_open *open, 5079 struct nfsd4_compound_state *cstate) 5080 { 5081 struct nfs4_client *clp = cstate->clp; 5082 struct nfs4_openowner *oo, *new = NULL; 5083 5084 retry: 5085 spin_lock(&clp->cl_lock); 5086 oo = find_openstateowner_str(strhashval, open, clp); 5087 if (!oo && new) { 5088 hash_openowner(new, clp, strhashval); 5089 spin_unlock(&clp->cl_lock); 5090 return new; 5091 } 5092 spin_unlock(&clp->cl_lock); 5093 5094 if (oo && !(oo->oo_flags & NFS4_OO_CONFIRMED)) { 5095 /* Replace unconfirmed owners without checking for replay. */ 5096 release_openowner(oo); 5097 oo = NULL; 5098 } 5099 if (oo) { 5100 if (new) 5101 nfs4_free_stateowner(&new->oo_owner); 5102 return oo; 5103 } 5104 5105 new = alloc_stateowner(openowner_slab, &open->op_owner, clp); 5106 if (!new) 5107 return NULL; 5108 new->oo_owner.so_ops = &openowner_ops; 5109 new->oo_owner.so_is_open_owner = 1; 5110 new->oo_owner.so_seqid = open->op_seqid; 5111 new->oo_flags = 0; 5112 if (nfsd4_has_session(cstate)) 5113 new->oo_flags |= NFS4_OO_CONFIRMED; 5114 new->oo_time = 0; 5115 new->oo_last_closed_stid = NULL; 5116 INIT_LIST_HEAD(&new->oo_close_lru); 5117 goto retry; 5118 } 5119 5120 static struct nfs4_ol_stateid * 5121 init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) 5122 { 5123 5124 struct nfs4_openowner *oo = open->op_openowner; 5125 struct nfs4_ol_stateid *retstp = NULL; 5126 struct nfs4_ol_stateid *stp; 5127 5128 stp = open->op_stp; 5129 /* We are moving these outside of the spinlocks to avoid the warnings */ 5130 mutex_init(&stp->st_mutex); 5131 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 5132 5133 retry: 5134 spin_lock(&oo->oo_owner.so_client->cl_lock); 5135 spin_lock(&fp->fi_lock); 5136 5137 if (nfs4_openowner_unhashed(oo)) { 5138 mutex_unlock(&stp->st_mutex); 5139 stp = NULL; 5140 goto out_unlock; 5141 } 5142 5143 retstp = nfsd4_find_existing_open(fp, open); 5144 if (retstp) 5145 goto out_unlock; 5146 5147 open->op_stp = NULL; 5148 refcount_inc(&stp->st_stid.sc_count); 5149 stp->st_stid.sc_type = SC_TYPE_OPEN; 5150 INIT_LIST_HEAD(&stp->st_locks); 5151 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); 5152 get_nfs4_file(fp); 5153 stp->st_stid.sc_file = fp; 5154 stp->st_access_bmap = 0; 5155 stp->st_deny_bmap = 0; 5156 stp->st_openstp = NULL; 5157 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids); 5158 list_add(&stp->st_perfile, &fp->fi_stateids); 5159 5160 out_unlock: 5161 spin_unlock(&fp->fi_lock); 5162 spin_unlock(&oo->oo_owner.so_client->cl_lock); 5163 if (retstp) { 5164 /* Handle races with CLOSE */ 5165 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 5166 nfs4_put_stid(&retstp->st_stid); 5167 goto retry; 5168 } 5169 /* To keep mutex tracking happy */ 5170 mutex_unlock(&stp->st_mutex); 5171 stp = retstp; 5172 } 5173 return stp; 5174 } 5175 5176 /* 5177 * In the 4.0 case we need to keep the owners around a little while to handle 5178 * CLOSE replay. We still do need to release any file access that is held by 5179 * them before returning however. 5180 */ 5181 static void 5182 move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) 5183 { 5184 struct nfs4_ol_stateid *last; 5185 struct nfs4_openowner *oo = openowner(s->st_stateowner); 5186 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net, 5187 nfsd_net_id); 5188 5189 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo); 5190 5191 /* 5192 * We know that we hold one reference via nfsd4_close, and another 5193 * "persistent" reference for the client. If the refcount is higher 5194 * than 2, then there are still calls in progress that are using this 5195 * stateid. We can't put the sc_file reference until they are finished. 5196 * Wait for the refcount to drop to 2. Since it has been unhashed, 5197 * there should be no danger of the refcount going back up again at 5198 * this point. 5199 * Some threads with a reference might be waiting for rp_locked, 5200 * so tell them to stop waiting. 5201 */ 5202 store_release_wake_up(&oo->oo_owner.so_replay.rp_locked, RP_UNHASHED); 5203 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); 5204 5205 release_all_access(s); 5206 if (s->st_stid.sc_file) { 5207 put_nfs4_file(s->st_stid.sc_file); 5208 s->st_stid.sc_file = NULL; 5209 } 5210 5211 spin_lock(&nn->client_lock); 5212 last = oo->oo_last_closed_stid; 5213 oo->oo_last_closed_stid = s; 5214 list_move_tail(&oo->oo_close_lru, &nn->close_lru); 5215 oo->oo_time = ktime_get_boottime_seconds(); 5216 spin_unlock(&nn->client_lock); 5217 if (last) 5218 nfs4_put_stid(&last->st_stid); 5219 } 5220 5221 static noinline_for_stack struct nfs4_file * 5222 nfsd4_file_hash_lookup(const struct svc_fh *fhp) 5223 { 5224 struct inode *inode = d_inode(fhp->fh_dentry); 5225 struct rhlist_head *tmp, *list; 5226 struct nfs4_file *fi; 5227 5228 rcu_read_lock(); 5229 list = rhltable_lookup(&nfs4_file_rhltable, &inode, 5230 nfs4_file_rhash_params); 5231 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { 5232 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { 5233 if (refcount_inc_not_zero(&fi->fi_ref)) { 5234 rcu_read_unlock(); 5235 return fi; 5236 } 5237 } 5238 } 5239 rcu_read_unlock(); 5240 return NULL; 5241 } 5242 5243 /* 5244 * On hash insertion, identify entries with the same inode but 5245 * distinct filehandles. They will all be on the list returned 5246 * by rhltable_lookup(). 5247 * 5248 * inode->i_lock prevents racing insertions from adding an entry 5249 * for the same inode/fhp pair twice. 5250 */ 5251 static noinline_for_stack struct nfs4_file * 5252 nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp) 5253 { 5254 struct inode *inode = d_inode(fhp->fh_dentry); 5255 struct rhlist_head *tmp, *list; 5256 struct nfs4_file *ret = NULL; 5257 bool alias_found = false; 5258 struct nfs4_file *fi; 5259 int err; 5260 5261 rcu_read_lock(); 5262 spin_lock(&inode->i_lock); 5263 5264 list = rhltable_lookup(&nfs4_file_rhltable, &inode, 5265 nfs4_file_rhash_params); 5266 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) { 5267 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) { 5268 if (refcount_inc_not_zero(&fi->fi_ref)) 5269 ret = fi; 5270 } else 5271 fi->fi_aliased = alias_found = true; 5272 } 5273 if (ret) 5274 goto out_unlock; 5275 5276 nfsd4_file_init(fhp, new); 5277 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist, 5278 nfs4_file_rhash_params); 5279 if (err) 5280 goto out_unlock; 5281 5282 new->fi_aliased = alias_found; 5283 ret = new; 5284 5285 out_unlock: 5286 spin_unlock(&inode->i_lock); 5287 rcu_read_unlock(); 5288 return ret; 5289 } 5290 5291 static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi) 5292 { 5293 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist, 5294 nfs4_file_rhash_params); 5295 } 5296 5297 /* 5298 * Called to check deny when READ with all zero stateid or 5299 * WRITE with all zero or all one stateid 5300 */ 5301 static __be32 5302 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) 5303 { 5304 struct nfs4_file *fp; 5305 __be32 ret = nfs_ok; 5306 5307 fp = nfsd4_file_hash_lookup(current_fh); 5308 if (!fp) 5309 return ret; 5310 5311 /* Check for conflicting share reservations */ 5312 spin_lock(&fp->fi_lock); 5313 if (fp->fi_share_deny & deny_type) 5314 ret = nfserr_locked; 5315 spin_unlock(&fp->fi_lock); 5316 put_nfs4_file(fp); 5317 return ret; 5318 } 5319 5320 static bool nfsd4_deleg_present(const struct inode *inode) 5321 { 5322 struct file_lock_context *ctx = locks_inode_context(inode); 5323 5324 return ctx && !list_empty_careful(&ctx->flc_lease); 5325 } 5326 5327 /** 5328 * nfsd_wait_for_delegreturn - wait for delegations to be returned 5329 * @rqstp: the RPC transaction being executed 5330 * @inode: in-core inode of the file being waited for 5331 * 5332 * The timeout prevents deadlock if all nfsd threads happen to be 5333 * tied up waiting for returning delegations. 5334 * 5335 * Return values: 5336 * %true: delegation was returned 5337 * %false: timed out waiting for delegreturn 5338 */ 5339 bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode) 5340 { 5341 long __maybe_unused timeo; 5342 5343 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode), 5344 NFSD_DELEGRETURN_TIMEOUT); 5345 trace_nfsd_delegret_wakeup(rqstp, inode, timeo); 5346 return timeo > 0; 5347 } 5348 5349 static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb) 5350 { 5351 struct nfs4_delegation *dp = cb_to_delegation(cb); 5352 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net, 5353 nfsd_net_id); 5354 5355 block_delegations(&dp->dl_stid.sc_file->fi_fhandle); 5356 5357 /* 5358 * We can't do this in nfsd_break_deleg_cb because it is 5359 * already holding inode->i_lock. 5360 * 5361 * If the dl_time != 0, then we know that it has already been 5362 * queued for a lease break. Don't queue it again. 5363 */ 5364 spin_lock(&state_lock); 5365 if (delegation_hashed(dp) && dp->dl_time == 0) { 5366 dp->dl_time = ktime_get_boottime_seconds(); 5367 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); 5368 } 5369 spin_unlock(&state_lock); 5370 } 5371 5372 static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, 5373 struct rpc_task *task) 5374 { 5375 struct nfs4_delegation *dp = cb_to_delegation(cb); 5376 5377 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task); 5378 5379 if (dp->dl_stid.sc_status) 5380 /* CLOSED or REVOKED */ 5381 return 1; 5382 5383 switch (task->tk_status) { 5384 case 0: 5385 return 1; 5386 case -NFS4ERR_DELAY: 5387 rpc_delay(task, 2 * HZ); 5388 return 0; 5389 case -EBADHANDLE: 5390 case -NFS4ERR_BAD_STATEID: 5391 /* 5392 * Race: client probably got cb_recall before open reply 5393 * granting delegation. 5394 */ 5395 if (dp->dl_retries--) { 5396 rpc_delay(task, 2 * HZ); 5397 return 0; 5398 } 5399 fallthrough; 5400 default: 5401 return 1; 5402 } 5403 } 5404 5405 static void nfsd4_cb_recall_release(struct nfsd4_callback *cb) 5406 { 5407 struct nfs4_delegation *dp = cb_to_delegation(cb); 5408 5409 nfs4_put_stid(&dp->dl_stid); 5410 } 5411 5412 static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = { 5413 .prepare = nfsd4_cb_recall_prepare, 5414 .done = nfsd4_cb_recall_done, 5415 .release = nfsd4_cb_recall_release, 5416 .opcode = OP_CB_RECALL, 5417 }; 5418 5419 static void nfsd_break_one_deleg(struct nfs4_delegation *dp) 5420 { 5421 bool queued; 5422 5423 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &dp->dl_recall.cb_flags)) 5424 return; 5425 5426 /* 5427 * We're assuming the state code never drops its reference 5428 * without first removing the lease. Since we're in this lease 5429 * callback (and since the lease code is serialized by the 5430 * flc_lock) we know the server hasn't removed the lease yet, and 5431 * we know it's safe to take a reference. 5432 */ 5433 refcount_inc(&dp->dl_stid.sc_count); 5434 queued = nfsd4_run_cb(&dp->dl_recall); 5435 WARN_ON_ONCE(!queued); 5436 if (!queued) 5437 refcount_dec(&dp->dl_stid.sc_count); 5438 } 5439 5440 /* Called from break_lease() with flc_lock held. */ 5441 static bool 5442 nfsd_break_deleg_cb(struct file_lease *fl) 5443 { 5444 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner; 5445 struct nfs4_file *fp = dp->dl_stid.sc_file; 5446 struct nfs4_client *clp = dp->dl_stid.sc_client; 5447 struct nfsd_net *nn; 5448 5449 trace_nfsd_cb_recall(&dp->dl_stid); 5450 5451 dp->dl_recalled = true; 5452 atomic_inc(&clp->cl_delegs_in_recall); 5453 if (try_to_expire_client(clp)) { 5454 nn = net_generic(clp->net, nfsd_net_id); 5455 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 5456 } 5457 5458 /* 5459 * We don't want the locks code to timeout the lease for us; 5460 * we'll remove it ourself if a delegation isn't returned 5461 * in time: 5462 */ 5463 fl->fl_break_time = 0; 5464 5465 fp->fi_had_conflict = true; 5466 nfsd_break_one_deleg(dp); 5467 return false; 5468 } 5469 5470 /** 5471 * nfsd_breaker_owns_lease - Check if lease conflict was resolved 5472 * @fl: Lock state to check 5473 * 5474 * Return values: 5475 * %true: Lease conflict was resolved 5476 * %false: Lease conflict was not resolved. 5477 */ 5478 static bool nfsd_breaker_owns_lease(struct file_lease *fl) 5479 { 5480 struct nfs4_delegation *dl = fl->c.flc_owner; 5481 struct svc_rqst *rqst; 5482 struct nfs4_client *clp; 5483 5484 rqst = nfsd_current_rqst(); 5485 if (!nfsd_v4client(rqst)) 5486 return false; 5487 clp = *(rqst->rq_lease_breaker); 5488 return dl->dl_stid.sc_client == clp; 5489 } 5490 5491 static int 5492 nfsd_change_deleg_cb(struct file_lease *onlist, int arg, 5493 struct list_head *dispose) 5494 { 5495 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner; 5496 struct nfs4_client *clp = dp->dl_stid.sc_client; 5497 5498 if (arg & F_UNLCK) { 5499 if (dp->dl_recalled) 5500 atomic_dec(&clp->cl_delegs_in_recall); 5501 return lease_modify(onlist, arg, dispose); 5502 } else 5503 return -EAGAIN; 5504 } 5505 5506 static const struct lease_manager_operations nfsd_lease_mng_ops = { 5507 .lm_breaker_owns_lease = nfsd_breaker_owns_lease, 5508 .lm_break = nfsd_break_deleg_cb, 5509 .lm_change = nfsd_change_deleg_cb, 5510 }; 5511 5512 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid) 5513 { 5514 if (nfsd4_has_session(cstate)) 5515 return nfs_ok; 5516 if (seqid == so->so_seqid - 1) 5517 return nfserr_replay_me; 5518 if (seqid == so->so_seqid) 5519 return nfs_ok; 5520 return nfserr_bad_seqid; 5521 } 5522 5523 static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions, 5524 struct nfsd_net *nn) 5525 { 5526 struct nfs4_client *found; 5527 5528 spin_lock(&nn->client_lock); 5529 found = find_confirmed_client(clid, sessions, nn); 5530 if (found) 5531 atomic_inc(&found->cl_rpc_users); 5532 spin_unlock(&nn->client_lock); 5533 return found; 5534 } 5535 5536 static __be32 set_client(clientid_t *clid, 5537 struct nfsd4_compound_state *cstate, 5538 struct nfsd_net *nn) 5539 { 5540 if (cstate->clp) { 5541 if (!same_clid(&cstate->clp->cl_clientid, clid)) 5542 return nfserr_stale_clientid; 5543 return nfs_ok; 5544 } 5545 if (STALE_CLIENTID(clid, nn)) 5546 return nfserr_stale_clientid; 5547 /* 5548 * We're in the 4.0 case (otherwise the SEQUENCE op would have 5549 * set cstate->clp), so session = false: 5550 */ 5551 cstate->clp = lookup_clientid(clid, false, nn); 5552 if (!cstate->clp) 5553 return nfserr_expired; 5554 return nfs_ok; 5555 } 5556 5557 __be32 5558 nfsd4_process_open1(struct nfsd4_compound_state *cstate, 5559 struct nfsd4_open *open, struct nfsd_net *nn) 5560 { 5561 clientid_t *clientid = &open->op_clientid; 5562 struct nfs4_client *clp = NULL; 5563 unsigned int strhashval; 5564 struct nfs4_openowner *oo = NULL; 5565 __be32 status; 5566 5567 /* 5568 * In case we need it later, after we've already created the 5569 * file and don't want to risk a further failure: 5570 */ 5571 open->op_file = nfsd4_alloc_file(); 5572 if (open->op_file == NULL) 5573 return nfserr_jukebox; 5574 5575 status = set_client(clientid, cstate, nn); 5576 if (status) 5577 return status; 5578 clp = cstate->clp; 5579 5580 strhashval = ownerstr_hashval(&open->op_owner); 5581 retry: 5582 oo = find_or_alloc_open_stateowner(strhashval, open, cstate); 5583 open->op_openowner = oo; 5584 if (!oo) 5585 return nfserr_jukebox; 5586 if (nfsd4_cstate_assign_replay(cstate, &oo->oo_owner) == -EAGAIN) { 5587 nfs4_put_stateowner(&oo->oo_owner); 5588 goto retry; 5589 } 5590 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid); 5591 if (status) 5592 return status; 5593 5594 open->op_stp = nfs4_alloc_open_stateid(clp); 5595 if (!open->op_stp) 5596 return nfserr_jukebox; 5597 5598 if (nfsd4_has_session(cstate) && 5599 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) { 5600 open->op_odstate = alloc_clnt_odstate(clp); 5601 if (!open->op_odstate) 5602 return nfserr_jukebox; 5603 } 5604 5605 return nfs_ok; 5606 } 5607 5608 static inline __be32 5609 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) 5610 { 5611 if (!(flags & RD_STATE) && deleg_is_read(dp->dl_type)) 5612 return nfserr_openmode; 5613 else 5614 return nfs_ok; 5615 } 5616 5617 static int share_access_to_flags(u32 share_access) 5618 { 5619 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE; 5620 } 5621 5622 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, 5623 stateid_t *s) 5624 { 5625 struct nfs4_stid *ret; 5626 5627 ret = find_stateid_by_type(cl, s, SC_TYPE_DELEG, SC_STATUS_REVOKED); 5628 if (!ret) 5629 return NULL; 5630 return delegstateid(ret); 5631 } 5632 5633 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) 5634 { 5635 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR || 5636 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH; 5637 } 5638 5639 static __be32 5640 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, 5641 struct nfs4_delegation **dp) 5642 { 5643 int flags; 5644 __be32 status = nfserr_bad_stateid; 5645 struct nfs4_delegation *deleg; 5646 5647 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); 5648 if (deleg == NULL) 5649 goto out; 5650 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) { 5651 nfs4_put_stid(&deleg->dl_stid); 5652 status = nfserr_admin_revoked; 5653 goto out; 5654 } 5655 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) { 5656 nfs4_put_stid(&deleg->dl_stid); 5657 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid); 5658 status = nfserr_deleg_revoked; 5659 goto out; 5660 } 5661 flags = share_access_to_flags(open->op_share_access); 5662 status = nfs4_check_delegmode(deleg, flags); 5663 if (status) { 5664 nfs4_put_stid(&deleg->dl_stid); 5665 goto out; 5666 } 5667 *dp = deleg; 5668 out: 5669 if (!nfsd4_is_deleg_cur(open)) 5670 return nfs_ok; 5671 if (status) 5672 return status; 5673 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 5674 return nfs_ok; 5675 } 5676 5677 static inline int nfs4_access_to_access(u32 nfs4_access) 5678 { 5679 int flags = 0; 5680 5681 if (nfs4_access & NFS4_SHARE_ACCESS_READ) 5682 flags |= NFSD_MAY_READ; 5683 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE) 5684 flags |= NFSD_MAY_WRITE; 5685 return flags; 5686 } 5687 5688 static inline __be32 5689 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh, 5690 struct nfsd4_open *open) 5691 { 5692 struct iattr iattr = { 5693 .ia_valid = ATTR_SIZE, 5694 .ia_size = 0, 5695 }; 5696 struct nfsd_attrs attrs = { 5697 .na_iattr = &iattr, 5698 }; 5699 if (!open->op_truncate) 5700 return 0; 5701 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE)) 5702 return nfserr_inval; 5703 return nfsd_setattr(rqstp, fh, &attrs, NULL); 5704 } 5705 5706 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp, 5707 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 5708 struct nfsd4_open *open, bool new_stp) 5709 { 5710 struct nfsd_file *nf = NULL; 5711 __be32 status; 5712 int oflag = nfs4_access_to_omode(open->op_share_access); 5713 int access = nfs4_access_to_access(open->op_share_access); 5714 unsigned char old_access_bmap, old_deny_bmap; 5715 5716 spin_lock(&fp->fi_lock); 5717 5718 /* 5719 * Are we trying to set a deny mode that would conflict with 5720 * current access? 5721 */ 5722 status = nfs4_file_check_deny(fp, open->op_share_deny); 5723 if (status != nfs_ok) { 5724 if (status != nfserr_share_denied) { 5725 spin_unlock(&fp->fi_lock); 5726 goto out; 5727 } 5728 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, 5729 stp, open->op_share_deny, false)) 5730 status = nfserr_jukebox; 5731 spin_unlock(&fp->fi_lock); 5732 goto out; 5733 } 5734 5735 /* set access to the file */ 5736 status = nfs4_file_get_access(fp, open->op_share_access); 5737 if (status != nfs_ok) { 5738 if (status != nfserr_share_denied) { 5739 spin_unlock(&fp->fi_lock); 5740 goto out; 5741 } 5742 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp, 5743 stp, open->op_share_access, true)) 5744 status = nfserr_jukebox; 5745 spin_unlock(&fp->fi_lock); 5746 goto out; 5747 } 5748 5749 /* Set access bits in stateid */ 5750 old_access_bmap = stp->st_access_bmap; 5751 set_access(open->op_share_access, stp); 5752 5753 /* Set new deny mask */ 5754 old_deny_bmap = stp->st_deny_bmap; 5755 set_deny(open->op_share_deny, stp); 5756 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5757 5758 if (!fp->fi_fds[oflag]) { 5759 spin_unlock(&fp->fi_lock); 5760 5761 status = nfsd_file_acquire_opened(rqstp, cur_fh, access, 5762 open->op_filp, &nf); 5763 if (status != nfs_ok) 5764 goto out_put_access; 5765 5766 spin_lock(&fp->fi_lock); 5767 if (!fp->fi_fds[oflag]) { 5768 fp->fi_fds[oflag] = nf; 5769 nf = NULL; 5770 } 5771 } 5772 spin_unlock(&fp->fi_lock); 5773 if (nf) 5774 nfsd_file_put(nf); 5775 5776 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode, 5777 access)); 5778 if (status) 5779 goto out_put_access; 5780 5781 status = nfsd4_truncate(rqstp, cur_fh, open); 5782 if (status) 5783 goto out_put_access; 5784 out: 5785 return status; 5786 out_put_access: 5787 stp->st_access_bmap = old_access_bmap; 5788 nfs4_file_put_access(fp, open->op_share_access); 5789 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp); 5790 goto out; 5791 } 5792 5793 static __be32 5794 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, 5795 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, 5796 struct nfsd4_open *open) 5797 { 5798 __be32 status; 5799 unsigned char old_deny_bmap = stp->st_deny_bmap; 5800 5801 if (!test_access(open->op_share_access, stp)) 5802 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false); 5803 5804 /* test and set deny mode */ 5805 spin_lock(&fp->fi_lock); 5806 status = nfs4_file_check_deny(fp, open->op_share_deny); 5807 switch (status) { 5808 case nfs_ok: 5809 set_deny(open->op_share_deny, stp); 5810 fp->fi_share_deny |= 5811 (open->op_share_deny & NFS4_SHARE_DENY_BOTH); 5812 break; 5813 case nfserr_share_denied: 5814 if (nfs4_resolve_deny_conflicts_locked(fp, false, 5815 stp, open->op_share_deny, false)) 5816 status = nfserr_jukebox; 5817 break; 5818 } 5819 spin_unlock(&fp->fi_lock); 5820 5821 if (status != nfs_ok) 5822 return status; 5823 5824 status = nfsd4_truncate(rqstp, cur_fh, open); 5825 if (status != nfs_ok) 5826 reset_union_bmap_deny(old_deny_bmap, stp); 5827 return status; 5828 } 5829 5830 /* Should we give out recallable state?: */ 5831 static bool nfsd4_cb_channel_good(struct nfs4_client *clp) 5832 { 5833 if (clp->cl_cb_state == NFSD4_CB_UP) 5834 return true; 5835 /* 5836 * In the sessions case, since we don't have to establish a 5837 * separate connection for callbacks, we assume it's OK 5838 * until we hear otherwise: 5839 */ 5840 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 5841 } 5842 5843 static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp) 5844 { 5845 struct file_lease *fl; 5846 5847 fl = locks_alloc_lease(); 5848 if (!fl) 5849 return NULL; 5850 fl->fl_lmops = &nfsd_lease_mng_ops; 5851 fl->c.flc_flags = FL_DELEG; 5852 fl->c.flc_type = deleg_is_read(dp->dl_type) ? F_RDLCK : F_WRLCK; 5853 fl->c.flc_owner = (fl_owner_t)dp; 5854 fl->c.flc_pid = current->tgid; 5855 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file; 5856 return fl; 5857 } 5858 5859 static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, 5860 struct nfs4_file *fp) 5861 { 5862 struct nfs4_ol_stateid *st; 5863 struct file *f = fp->fi_deleg_file->nf_file; 5864 struct inode *ino = file_inode(f); 5865 int writes; 5866 5867 writes = atomic_read(&ino->i_writecount); 5868 if (!writes) 5869 return 0; 5870 /* 5871 * There could be multiple filehandles (hence multiple 5872 * nfs4_files) referencing this file, but that's not too 5873 * common; let's just give up in that case rather than 5874 * trying to go look up all the clients using that other 5875 * nfs4_file as well: 5876 */ 5877 if (fp->fi_aliased) 5878 return -EAGAIN; 5879 /* 5880 * If there's a close in progress, make sure that we see it 5881 * clear any fi_fds[] entries before we see it decrement 5882 * i_writecount: 5883 */ 5884 smp_mb__after_atomic(); 5885 5886 if (fp->fi_fds[O_WRONLY]) 5887 writes--; 5888 if (fp->fi_fds[O_RDWR]) 5889 writes--; 5890 if (writes > 0) 5891 return -EAGAIN; /* There may be non-NFSv4 writers */ 5892 /* 5893 * It's possible there are non-NFSv4 write opens in progress, 5894 * but if they haven't incremented i_writecount yet then they 5895 * also haven't called break lease yet; so, they'll break this 5896 * lease soon enough. So, all that's left to check for is NFSv4 5897 * opens: 5898 */ 5899 spin_lock(&fp->fi_lock); 5900 list_for_each_entry(st, &fp->fi_stateids, st_perfile) { 5901 if (st->st_openstp == NULL /* it's an open */ && 5902 access_permit_write(st) && 5903 st->st_stid.sc_client != clp) { 5904 spin_unlock(&fp->fi_lock); 5905 return -EAGAIN; 5906 } 5907 } 5908 spin_unlock(&fp->fi_lock); 5909 /* 5910 * There's a small chance that we could be racing with another 5911 * NFSv4 open. However, any open that hasn't added itself to 5912 * the fi_stateids list also hasn't called break_lease yet; so, 5913 * they'll break this lease soon enough. 5914 */ 5915 return 0; 5916 } 5917 5918 /* 5919 * It's possible that between opening the dentry and setting the delegation, 5920 * that it has been renamed or unlinked. Redo the lookup to verify that this 5921 * hasn't happened. 5922 */ 5923 static int 5924 nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp, 5925 struct svc_fh *parent) 5926 { 5927 struct svc_export *exp; 5928 struct dentry *child; 5929 __be32 err; 5930 5931 err = nfsd_lookup_dentry(open->op_rqstp, parent, 5932 open->op_fname, open->op_fnamelen, 5933 &exp, &child); 5934 5935 if (err) 5936 return -EAGAIN; 5937 5938 exp_put(exp); 5939 dput(child); 5940 if (child != file_dentry(fp->fi_deleg_file->nf_file)) 5941 return -EAGAIN; 5942 5943 return 0; 5944 } 5945 5946 /* 5947 * We avoid breaking delegations held by a client due to its own activity, but 5948 * clearing setuid/setgid bits on a write is an implicit activity and the client 5949 * may not notice and continue using the old mode. Avoid giving out a delegation 5950 * on setuid/setgid files when the client is requesting an open for write. 5951 */ 5952 static int 5953 nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf) 5954 { 5955 struct inode *inode = file_inode(nf->nf_file); 5956 5957 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) && 5958 (inode->i_mode & (S_ISUID|S_ISGID))) 5959 return -EAGAIN; 5960 return 0; 5961 } 5962 5963 #ifdef CONFIG_NFSD_V4_DELEG_TIMESTAMPS 5964 static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open) 5965 { 5966 return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS; 5967 } 5968 #else /* CONFIG_NFSD_V4_DELEG_TIMESTAMPS */ 5969 static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open) 5970 { 5971 return false; 5972 } 5973 #endif /* CONFIG NFSD_V4_DELEG_TIMESTAMPS */ 5974 5975 static struct nfs4_delegation * 5976 nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, 5977 struct svc_fh *parent) 5978 { 5979 bool deleg_ts = nfsd4_want_deleg_timestamps(open); 5980 struct nfs4_client *clp = stp->st_stid.sc_client; 5981 struct nfs4_file *fp = stp->st_stid.sc_file; 5982 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate; 5983 struct nfs4_delegation *dp; 5984 struct nfsd_file *nf = NULL; 5985 struct file_lease *fl; 5986 int status = 0; 5987 u32 dl_type; 5988 5989 /* 5990 * The fi_had_conflict and nfs_get_existing_delegation checks 5991 * here are just optimizations; we'll need to recheck them at 5992 * the end: 5993 */ 5994 if (fp->fi_had_conflict) 5995 return ERR_PTR(-EAGAIN); 5996 5997 /* 5998 * Try for a write delegation first. RFC8881 section 10.4 says: 5999 * 6000 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle, 6001 * on its own, all opens." 6002 * 6003 * Furthermore the client can use a write delegation for most READ 6004 * operations as well, so we require a O_RDWR file here. 6005 * 6006 * Offer a write delegation in the case of a BOTH open, and ensure 6007 * we get the O_RDWR descriptor. 6008 */ 6009 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) { 6010 nf = find_rw_file(fp); 6011 dl_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : OPEN_DELEGATE_WRITE; 6012 } 6013 6014 /* 6015 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR 6016 * file for some reason, then try for a read delegation instead. 6017 */ 6018 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) { 6019 nf = find_readable_file(fp); 6020 dl_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : OPEN_DELEGATE_READ; 6021 } 6022 6023 if (!nf) 6024 return ERR_PTR(-EAGAIN); 6025 6026 /* 6027 * File delegations and associated locks cannot be recovered if the 6028 * export is from an NFS proxy server. 6029 */ 6030 if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) { 6031 nfsd_file_put(nf); 6032 return ERR_PTR(-EOPNOTSUPP); 6033 } 6034 6035 spin_lock(&state_lock); 6036 spin_lock(&fp->fi_lock); 6037 if (nfs4_delegation_exists(clp, fp)) 6038 status = -EAGAIN; 6039 else if (nfsd4_verify_setuid_write(open, nf)) 6040 status = -EAGAIN; 6041 else if (!fp->fi_deleg_file) { 6042 fp->fi_deleg_file = nf; 6043 /* increment early to prevent fi_deleg_file from being 6044 * cleared */ 6045 fp->fi_delegees = 1; 6046 nf = NULL; 6047 } else 6048 fp->fi_delegees++; 6049 spin_unlock(&fp->fi_lock); 6050 spin_unlock(&state_lock); 6051 if (nf) 6052 nfsd_file_put(nf); 6053 if (status) 6054 return ERR_PTR(status); 6055 6056 status = -ENOMEM; 6057 dp = alloc_init_deleg(clp, fp, odstate, dl_type); 6058 if (!dp) 6059 goto out_delegees; 6060 6061 fl = nfs4_alloc_init_lease(dp); 6062 if (!fl) 6063 goto out_clnt_odstate; 6064 6065 status = kernel_setlease(fp->fi_deleg_file->nf_file, 6066 fl->c.flc_type, &fl, NULL); 6067 if (fl) 6068 locks_free_lease(fl); 6069 if (status) 6070 goto out_clnt_odstate; 6071 6072 if (parent) { 6073 status = nfsd4_verify_deleg_dentry(open, fp, parent); 6074 if (status) 6075 goto out_unlock; 6076 } 6077 6078 status = nfsd4_check_conflicting_opens(clp, fp); 6079 if (status) 6080 goto out_unlock; 6081 6082 /* 6083 * Now that the deleg is set, check again to ensure that nothing 6084 * raced in and changed the mode while we weren't looking. 6085 */ 6086 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file); 6087 if (status) 6088 goto out_unlock; 6089 6090 status = -EAGAIN; 6091 if (fp->fi_had_conflict) 6092 goto out_unlock; 6093 6094 spin_lock(&state_lock); 6095 spin_lock(&clp->cl_lock); 6096 spin_lock(&fp->fi_lock); 6097 status = hash_delegation_locked(dp, fp); 6098 spin_unlock(&fp->fi_lock); 6099 spin_unlock(&clp->cl_lock); 6100 spin_unlock(&state_lock); 6101 6102 if (status) 6103 goto out_unlock; 6104 6105 return dp; 6106 out_unlock: 6107 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp); 6108 out_clnt_odstate: 6109 put_clnt_odstate(dp->dl_clnt_odstate); 6110 nfs4_put_stid(&dp->dl_stid); 6111 out_delegees: 6112 put_deleg_file(fp); 6113 return ERR_PTR(status); 6114 } 6115 6116 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status) 6117 { 6118 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; 6119 if (status == -EAGAIN) 6120 open->op_why_no_deleg = WND4_CONTENTION; 6121 else { 6122 open->op_why_no_deleg = WND4_RESOURCE; 6123 switch (open->op_deleg_want) { 6124 case OPEN4_SHARE_ACCESS_WANT_READ_DELEG: 6125 case OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG: 6126 case OPEN4_SHARE_ACCESS_WANT_ANY_DELEG: 6127 break; 6128 case OPEN4_SHARE_ACCESS_WANT_CANCEL: 6129 open->op_why_no_deleg = WND4_CANCELLED; 6130 break; 6131 case OPEN4_SHARE_ACCESS_WANT_NO_DELEG: 6132 WARN_ON_ONCE(1); 6133 } 6134 } 6135 } 6136 6137 static bool 6138 nfs4_delegation_stat(struct nfs4_delegation *dp, struct svc_fh *currentfh, 6139 struct kstat *stat) 6140 { 6141 struct nfsd_file *nf = find_rw_file(dp->dl_stid.sc_file); 6142 struct path path; 6143 int rc; 6144 6145 if (!nf) 6146 return false; 6147 6148 path.mnt = currentfh->fh_export->ex_path.mnt; 6149 path.dentry = file_dentry(nf->nf_file); 6150 6151 rc = vfs_getattr(&path, stat, 6152 (STATX_MODE | STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE), 6153 AT_STATX_SYNC_AS_STAT); 6154 6155 nfsd_file_put(nf); 6156 return rc == 0; 6157 } 6158 6159 /* 6160 * The Linux NFS server does not offer write delegations to NFSv4.0 6161 * clients in order to avoid conflicts between write delegations and 6162 * GETATTRs requesting CHANGE or SIZE attributes. 6163 * 6164 * With NFSv4.1 and later minorversions, the SEQUENCE operation that 6165 * begins each COMPOUND contains a client ID. Delegation recall can 6166 * be avoided when the server recognizes the client sending a 6167 * GETATTR also holds write delegation it conflicts with. 6168 * 6169 * However, the NFSv4.0 protocol does not enable a server to 6170 * determine that a GETATTR originated from the client holding the 6171 * conflicting delegation versus coming from some other client. Per 6172 * RFC 7530 Section 16.7.5, the server must recall or send a 6173 * CB_GETATTR even when the GETATTR originates from the client that 6174 * holds the conflicting delegation. 6175 * 6176 * An NFSv4.0 client can trigger a pathological situation if it 6177 * always sends a DELEGRETURN preceded by a conflicting GETATTR in 6178 * the same COMPOUND. COMPOUND execution will always stop at the 6179 * GETATTR and the DELEGRETURN will never get executed. The server 6180 * eventually revokes the delegation, which can result in loss of 6181 * open or lock state. 6182 */ 6183 static void 6184 nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, 6185 struct svc_fh *currentfh) 6186 { 6187 struct nfs4_openowner *oo = openowner(stp->st_stateowner); 6188 bool deleg_ts = nfsd4_want_deleg_timestamps(open); 6189 struct nfs4_client *clp = stp->st_stid.sc_client; 6190 struct svc_fh *parent = NULL; 6191 struct nfs4_delegation *dp; 6192 struct kstat stat; 6193 int status = 0; 6194 int cb_up; 6195 6196 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client); 6197 open->op_recall = false; 6198 switch (open->op_claim_type) { 6199 case NFS4_OPEN_CLAIM_PREVIOUS: 6200 if (!cb_up) 6201 open->op_recall = true; 6202 break; 6203 case NFS4_OPEN_CLAIM_NULL: 6204 parent = currentfh; 6205 fallthrough; 6206 case NFS4_OPEN_CLAIM_FH: 6207 /* 6208 * Let's not give out any delegations till everyone's 6209 * had the chance to reclaim theirs, *and* until 6210 * NLM locks have all been reclaimed: 6211 */ 6212 if (locks_in_grace(clp->net)) 6213 goto out_no_deleg; 6214 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) 6215 goto out_no_deleg; 6216 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE && 6217 !clp->cl_minorversion) 6218 goto out_no_deleg; 6219 break; 6220 default: 6221 goto out_no_deleg; 6222 } 6223 dp = nfs4_set_delegation(open, stp, parent); 6224 if (IS_ERR(dp)) 6225 goto out_no_deleg; 6226 6227 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid)); 6228 6229 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) { 6230 if (!nfs4_delegation_stat(dp, currentfh, &stat)) { 6231 nfs4_put_stid(&dp->dl_stid); 6232 destroy_delegation(dp); 6233 goto out_no_deleg; 6234 } 6235 open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_WRITE_ATTRS_DELEG : 6236 OPEN_DELEGATE_WRITE; 6237 dp->dl_cb_fattr.ncf_cur_fsize = stat.size; 6238 dp->dl_cb_fattr.ncf_initial_cinfo = nfsd4_change_attribute(&stat); 6239 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid); 6240 } else { 6241 open->op_delegate_type = deleg_ts ? OPEN_DELEGATE_READ_ATTRS_DELEG : 6242 OPEN_DELEGATE_READ; 6243 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid); 6244 } 6245 nfs4_put_stid(&dp->dl_stid); 6246 return; 6247 out_no_deleg: 6248 open->op_delegate_type = OPEN_DELEGATE_NONE; 6249 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS && 6250 open->op_delegate_type != OPEN_DELEGATE_NONE) { 6251 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 6252 open->op_recall = true; 6253 } 6254 6255 /* 4.1 client asking for a delegation? */ 6256 if (open->op_deleg_want) 6257 nfsd4_open_deleg_none_ext(open, status); 6258 return; 6259 } 6260 6261 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open, 6262 struct nfs4_delegation *dp) 6263 { 6264 if (deleg_is_write(dp->dl_type)) { 6265 if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_READ_DELEG) { 6266 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; 6267 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE; 6268 } else if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_WRITE_DELEG) { 6269 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; 6270 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE; 6271 } 6272 } 6273 /* Otherwise the client must be confused wanting a delegation 6274 * it already has, therefore we don't return 6275 * OPEN_DELEGATE_NONE_EXT and reason. 6276 */ 6277 } 6278 6279 /* Are we returning only a delegation stateid? */ 6280 static bool open_xor_delegation(struct nfsd4_open *open) 6281 { 6282 if (!(open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_OPEN_XOR_DELEGATION)) 6283 return false; 6284 /* Did we actually get a delegation? */ 6285 if (!deleg_is_read(open->op_delegate_type) && !deleg_is_write(open->op_delegate_type)) 6286 return false; 6287 return true; 6288 } 6289 6290 /** 6291 * nfsd4_process_open2 - finish open processing 6292 * @rqstp: the RPC transaction being executed 6293 * @current_fh: NFSv4 COMPOUND's current filehandle 6294 * @open: OPEN arguments 6295 * 6296 * If successful, (1) truncate the file if open->op_truncate was 6297 * set, (2) set open->op_stateid, (3) set open->op_delegation. 6298 * 6299 * Returns %nfs_ok on success; otherwise an nfs4stat value in 6300 * network byte order is returned. 6301 */ 6302 __be32 6303 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) 6304 { 6305 struct nfsd4_compoundres *resp = rqstp->rq_resp; 6306 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client; 6307 struct nfs4_file *fp = NULL; 6308 struct nfs4_ol_stateid *stp = NULL; 6309 struct nfs4_delegation *dp = NULL; 6310 __be32 status; 6311 bool new_stp = false; 6312 6313 /* 6314 * Lookup file; if found, lookup stateid and check open request, 6315 * and check for delegations in the process of being recalled. 6316 * If not found, create the nfs4_file struct 6317 */ 6318 fp = nfsd4_file_hash_insert(open->op_file, current_fh); 6319 if (unlikely(!fp)) 6320 return nfserr_jukebox; 6321 if (fp != open->op_file) { 6322 status = nfs4_check_deleg(cl, open, &dp); 6323 if (status) 6324 goto out; 6325 stp = nfsd4_find_and_lock_existing_open(fp, open); 6326 } else { 6327 open->op_file = NULL; 6328 status = nfserr_bad_stateid; 6329 if (nfsd4_is_deleg_cur(open)) 6330 goto out; 6331 } 6332 6333 if (!stp) { 6334 stp = init_open_stateid(fp, open); 6335 if (!stp) { 6336 status = nfserr_jukebox; 6337 goto out; 6338 } 6339 6340 if (!open->op_stp) 6341 new_stp = true; 6342 } 6343 6344 /* 6345 * OPEN the file, or upgrade an existing OPEN. 6346 * If truncate fails, the OPEN fails. 6347 * 6348 * stp is already locked. 6349 */ 6350 if (!new_stp) { 6351 /* Stateid was found, this is an OPEN upgrade */ 6352 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open); 6353 if (status) { 6354 mutex_unlock(&stp->st_mutex); 6355 goto out; 6356 } 6357 } else { 6358 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true); 6359 if (status) { 6360 release_open_stateid(stp); 6361 mutex_unlock(&stp->st_mutex); 6362 goto out; 6363 } 6364 6365 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp, 6366 open->op_odstate); 6367 if (stp->st_clnt_odstate == open->op_odstate) 6368 open->op_odstate = NULL; 6369 } 6370 6371 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid); 6372 mutex_unlock(&stp->st_mutex); 6373 6374 if (nfsd4_has_session(&resp->cstate)) { 6375 if (open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_NO_DELEG) { 6376 open->op_delegate_type = OPEN_DELEGATE_NONE_EXT; 6377 open->op_why_no_deleg = WND4_NOT_WANTED; 6378 goto nodeleg; 6379 } 6380 } 6381 6382 /* 6383 * Attempt to hand out a delegation. No error return, because the 6384 * OPEN succeeds even if we fail. 6385 */ 6386 nfs4_open_delegation(open, stp, &resp->cstate.current_fh); 6387 6388 /* 6389 * If there is an existing open stateid, it must be updated and 6390 * returned. Only respect WANT_OPEN_XOR_DELEGATION when a new 6391 * open stateid would have to be created. 6392 */ 6393 if (new_stp && open_xor_delegation(open)) { 6394 memcpy(&open->op_stateid, &zero_stateid, sizeof(open->op_stateid)); 6395 open->op_rflags |= OPEN4_RESULT_NO_OPEN_STATEID; 6396 release_open_stateid(stp); 6397 } 6398 nodeleg: 6399 status = nfs_ok; 6400 trace_nfsd_open(&stp->st_stid.sc_stateid); 6401 out: 6402 /* 4.1 client trying to upgrade/downgrade delegation? */ 6403 if (open->op_delegate_type == OPEN_DELEGATE_NONE && dp && 6404 open->op_deleg_want) 6405 nfsd4_deleg_xgrade_none_ext(open, dp); 6406 6407 if (fp) 6408 put_nfs4_file(fp); 6409 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS) 6410 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; 6411 /* 6412 * To finish the open response, we just need to set the rflags. 6413 */ 6414 open->op_rflags |= NFS4_OPEN_RESULT_LOCKTYPE_POSIX; 6415 if (nfsd4_has_session(&resp->cstate)) 6416 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK; 6417 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED)) 6418 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM; 6419 6420 if (dp) 6421 nfs4_put_stid(&dp->dl_stid); 6422 if (stp) 6423 nfs4_put_stid(&stp->st_stid); 6424 6425 return status; 6426 } 6427 6428 void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate, 6429 struct nfsd4_open *open) 6430 { 6431 if (open->op_openowner) 6432 nfs4_put_stateowner(&open->op_openowner->oo_owner); 6433 if (open->op_file) 6434 kmem_cache_free(file_slab, open->op_file); 6435 if (open->op_stp) 6436 nfs4_put_stid(&open->op_stp->st_stid); 6437 if (open->op_odstate) 6438 kmem_cache_free(odstate_slab, open->op_odstate); 6439 } 6440 6441 __be32 6442 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 6443 union nfsd4_op_u *u) 6444 { 6445 clientid_t *clid = &u->renew; 6446 struct nfs4_client *clp; 6447 __be32 status; 6448 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 6449 6450 trace_nfsd_clid_renew(clid); 6451 status = set_client(clid, cstate, nn); 6452 if (status) 6453 return status; 6454 clp = cstate->clp; 6455 if (!list_empty(&clp->cl_delegations) 6456 && clp->cl_cb_state != NFSD4_CB_UP) 6457 return nfserr_cb_path_down; 6458 return nfs_ok; 6459 } 6460 6461 void 6462 nfsd4_end_grace(struct nfsd_net *nn) 6463 { 6464 /* do nothing if grace period already ended */ 6465 if (nn->grace_ended) 6466 return; 6467 6468 trace_nfsd_grace_complete(nn); 6469 nn->grace_ended = true; 6470 /* 6471 * If the server goes down again right now, an NFSv4 6472 * client will still be allowed to reclaim after it comes back up, 6473 * even if it hasn't yet had a chance to reclaim state this time. 6474 * 6475 */ 6476 nfsd4_record_grace_done(nn); 6477 /* 6478 * At this point, NFSv4 clients can still reclaim. But if the 6479 * server crashes, any that have not yet reclaimed will be out 6480 * of luck on the next boot. 6481 * 6482 * (NFSv4.1+ clients are considered to have reclaimed once they 6483 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to 6484 * have reclaimed after their first OPEN.) 6485 */ 6486 locks_end_grace(&nn->nfsd4_manager); 6487 /* 6488 * At this point, and once lockd and/or any other containers 6489 * exit their grace period, further reclaims will fail and 6490 * regular locking can resume. 6491 */ 6492 } 6493 6494 /* 6495 * If we've waited a lease period but there are still clients trying to 6496 * reclaim, wait a little longer to give them a chance to finish. 6497 */ 6498 static bool clients_still_reclaiming(struct nfsd_net *nn) 6499 { 6500 time64_t double_grace_period_end = nn->boot_time + 6501 2 * nn->nfsd4_lease; 6502 6503 if (nn->track_reclaim_completes && 6504 atomic_read(&nn->nr_reclaim_complete) == 6505 nn->reclaim_str_hashtbl_size) 6506 return false; 6507 if (!nn->somebody_reclaimed) 6508 return false; 6509 nn->somebody_reclaimed = false; 6510 /* 6511 * If we've given them *two* lease times to reclaim, and they're 6512 * still not done, give up: 6513 */ 6514 if (ktime_get_boottime_seconds() > double_grace_period_end) 6515 return false; 6516 return true; 6517 } 6518 6519 struct laundry_time { 6520 time64_t cutoff; 6521 time64_t new_timeo; 6522 }; 6523 6524 static bool state_expired(struct laundry_time *lt, time64_t last_refresh) 6525 { 6526 time64_t time_remaining; 6527 6528 if (last_refresh < lt->cutoff) 6529 return true; 6530 time_remaining = last_refresh - lt->cutoff; 6531 lt->new_timeo = min(lt->new_timeo, time_remaining); 6532 return false; 6533 } 6534 6535 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 6536 void nfsd4_ssc_init_umount_work(struct nfsd_net *nn) 6537 { 6538 spin_lock_init(&nn->nfsd_ssc_lock); 6539 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list); 6540 init_waitqueue_head(&nn->nfsd_ssc_waitq); 6541 } 6542 6543 /* 6544 * This is called when nfsd is being shutdown, after all inter_ssc 6545 * cleanup were done, to destroy the ssc delayed unmount list. 6546 */ 6547 static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn) 6548 { 6549 struct nfsd4_ssc_umount_item *ni = NULL; 6550 struct nfsd4_ssc_umount_item *tmp; 6551 6552 spin_lock(&nn->nfsd_ssc_lock); 6553 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 6554 list_del(&ni->nsui_list); 6555 spin_unlock(&nn->nfsd_ssc_lock); 6556 mntput(ni->nsui_vfsmount); 6557 kfree(ni); 6558 spin_lock(&nn->nfsd_ssc_lock); 6559 } 6560 spin_unlock(&nn->nfsd_ssc_lock); 6561 } 6562 6563 static void nfsd4_ssc_expire_umount(struct nfsd_net *nn) 6564 { 6565 bool do_wakeup = false; 6566 struct nfsd4_ssc_umount_item *ni = NULL; 6567 struct nfsd4_ssc_umount_item *tmp; 6568 6569 spin_lock(&nn->nfsd_ssc_lock); 6570 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) { 6571 if (time_after(jiffies, ni->nsui_expire)) { 6572 if (refcount_read(&ni->nsui_refcnt) > 1) 6573 continue; 6574 6575 /* mark being unmount */ 6576 ni->nsui_busy = true; 6577 spin_unlock(&nn->nfsd_ssc_lock); 6578 mntput(ni->nsui_vfsmount); 6579 spin_lock(&nn->nfsd_ssc_lock); 6580 6581 /* waiters need to start from begin of list */ 6582 list_del(&ni->nsui_list); 6583 kfree(ni); 6584 6585 /* wakeup ssc_connect waiters */ 6586 do_wakeup = true; 6587 continue; 6588 } 6589 break; 6590 } 6591 if (do_wakeup) 6592 wake_up_all(&nn->nfsd_ssc_waitq); 6593 spin_unlock(&nn->nfsd_ssc_lock); 6594 } 6595 #endif 6596 6597 /* Check if any lock belonging to this lockowner has any blockers */ 6598 static bool 6599 nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo) 6600 { 6601 struct file_lock_context *ctx; 6602 struct nfs4_ol_stateid *stp; 6603 struct nfs4_file *nf; 6604 6605 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { 6606 nf = stp->st_stid.sc_file; 6607 ctx = locks_inode_context(nf->fi_inode); 6608 if (!ctx) 6609 continue; 6610 if (locks_owner_has_blockers(ctx, lo)) 6611 return true; 6612 } 6613 return false; 6614 } 6615 6616 static bool 6617 nfs4_anylock_blockers(struct nfs4_client *clp) 6618 { 6619 int i; 6620 struct nfs4_stateowner *so; 6621 struct nfs4_lockowner *lo; 6622 6623 if (atomic_read(&clp->cl_delegs_in_recall)) 6624 return true; 6625 spin_lock(&clp->cl_lock); 6626 for (i = 0; i < OWNER_HASH_SIZE; i++) { 6627 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i], 6628 so_strhash) { 6629 if (so->so_is_open_owner) 6630 continue; 6631 lo = lockowner(so); 6632 if (nfs4_lockowner_has_blockers(lo)) { 6633 spin_unlock(&clp->cl_lock); 6634 return true; 6635 } 6636 } 6637 } 6638 spin_unlock(&clp->cl_lock); 6639 return false; 6640 } 6641 6642 static void 6643 nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist, 6644 struct laundry_time *lt) 6645 { 6646 unsigned int maxreap, reapcnt = 0; 6647 struct list_head *pos, *next; 6648 struct nfs4_client *clp; 6649 6650 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ? 6651 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0; 6652 INIT_LIST_HEAD(reaplist); 6653 spin_lock(&nn->client_lock); 6654 list_for_each_safe(pos, next, &nn->client_lru) { 6655 clp = list_entry(pos, struct nfs4_client, cl_lru); 6656 if (clp->cl_state == NFSD4_EXPIRABLE) 6657 goto exp_client; 6658 if (!state_expired(lt, clp->cl_time)) 6659 break; 6660 if (!atomic_read(&clp->cl_rpc_users)) { 6661 if (clp->cl_state == NFSD4_ACTIVE) 6662 atomic_inc(&nn->nfsd_courtesy_clients); 6663 clp->cl_state = NFSD4_COURTESY; 6664 } 6665 if (!client_has_state(clp)) 6666 goto exp_client; 6667 if (!nfs4_anylock_blockers(clp)) 6668 if (reapcnt >= maxreap) 6669 continue; 6670 exp_client: 6671 if (!mark_client_expired_locked(clp)) { 6672 list_add(&clp->cl_lru, reaplist); 6673 reapcnt++; 6674 } 6675 } 6676 spin_unlock(&nn->client_lock); 6677 } 6678 6679 static void 6680 nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn, 6681 struct list_head *reaplist) 6682 { 6683 unsigned int maxreap = 0, reapcnt = 0; 6684 struct list_head *pos, *next; 6685 struct nfs4_client *clp; 6686 6687 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN; 6688 INIT_LIST_HEAD(reaplist); 6689 6690 spin_lock(&nn->client_lock); 6691 list_for_each_safe(pos, next, &nn->client_lru) { 6692 clp = list_entry(pos, struct nfs4_client, cl_lru); 6693 if (clp->cl_state == NFSD4_ACTIVE) 6694 break; 6695 if (reapcnt >= maxreap) 6696 break; 6697 if (!mark_client_expired_locked(clp)) { 6698 list_add(&clp->cl_lru, reaplist); 6699 reapcnt++; 6700 } 6701 } 6702 spin_unlock(&nn->client_lock); 6703 } 6704 6705 static void 6706 nfs4_process_client_reaplist(struct list_head *reaplist) 6707 { 6708 struct list_head *pos, *next; 6709 struct nfs4_client *clp; 6710 6711 list_for_each_safe(pos, next, reaplist) { 6712 clp = list_entry(pos, struct nfs4_client, cl_lru); 6713 trace_nfsd_clid_purged(&clp->cl_clientid); 6714 list_del_init(&clp->cl_lru); 6715 expire_client(clp); 6716 } 6717 } 6718 6719 static void nfs40_clean_admin_revoked(struct nfsd_net *nn, 6720 struct laundry_time *lt) 6721 { 6722 struct nfs4_client *clp; 6723 6724 spin_lock(&nn->client_lock); 6725 if (nn->nfs40_last_revoke == 0 || 6726 nn->nfs40_last_revoke > lt->cutoff) { 6727 spin_unlock(&nn->client_lock); 6728 return; 6729 } 6730 nn->nfs40_last_revoke = 0; 6731 6732 retry: 6733 list_for_each_entry(clp, &nn->client_lru, cl_lru) { 6734 unsigned long id, tmp; 6735 struct nfs4_stid *stid; 6736 6737 if (atomic_read(&clp->cl_admin_revoked) == 0) 6738 continue; 6739 6740 spin_lock(&clp->cl_lock); 6741 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id) 6742 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { 6743 refcount_inc(&stid->sc_count); 6744 spin_unlock(&nn->client_lock); 6745 /* this function drops ->cl_lock */ 6746 nfsd4_drop_revoked_stid(stid); 6747 nfs4_put_stid(stid); 6748 spin_lock(&nn->client_lock); 6749 goto retry; 6750 } 6751 spin_unlock(&clp->cl_lock); 6752 } 6753 spin_unlock(&nn->client_lock); 6754 } 6755 6756 static time64_t 6757 nfs4_laundromat(struct nfsd_net *nn) 6758 { 6759 struct nfs4_openowner *oo; 6760 struct nfs4_delegation *dp; 6761 struct nfs4_ol_stateid *stp; 6762 struct nfsd4_blocked_lock *nbl; 6763 struct list_head *pos, *next, reaplist; 6764 struct laundry_time lt = { 6765 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease, 6766 .new_timeo = nn->nfsd4_lease 6767 }; 6768 struct nfs4_cpntf_state *cps; 6769 copy_stateid_t *cps_t; 6770 int i; 6771 6772 if (clients_still_reclaiming(nn)) { 6773 lt.new_timeo = 0; 6774 goto out; 6775 } 6776 nfsd4_end_grace(nn); 6777 6778 spin_lock(&nn->s2s_cp_lock); 6779 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) { 6780 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid); 6781 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID && 6782 state_expired(<, cps->cpntf_time)) 6783 _free_cpntf_state_locked(nn, cps); 6784 } 6785 spin_unlock(&nn->s2s_cp_lock); 6786 nfsd4_async_copy_reaper(nn); 6787 nfs4_get_client_reaplist(nn, &reaplist, <); 6788 nfs4_process_client_reaplist(&reaplist); 6789 6790 nfs40_clean_admin_revoked(nn, <); 6791 6792 spin_lock(&state_lock); 6793 list_for_each_safe(pos, next, &nn->del_recall_lru) { 6794 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 6795 if (!state_expired(<, dp->dl_time)) 6796 break; 6797 refcount_inc(&dp->dl_stid.sc_count); 6798 unhash_delegation_locked(dp, SC_STATUS_REVOKED); 6799 list_add(&dp->dl_recall_lru, &reaplist); 6800 } 6801 spin_unlock(&state_lock); 6802 while (!list_empty(&reaplist)) { 6803 dp = list_first_entry(&reaplist, struct nfs4_delegation, 6804 dl_recall_lru); 6805 list_del_init(&dp->dl_recall_lru); 6806 revoke_delegation(dp); 6807 } 6808 6809 spin_lock(&nn->client_lock); 6810 while (!list_empty(&nn->close_lru)) { 6811 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner, 6812 oo_close_lru); 6813 if (!state_expired(<, oo->oo_time)) 6814 break; 6815 list_del_init(&oo->oo_close_lru); 6816 stp = oo->oo_last_closed_stid; 6817 oo->oo_last_closed_stid = NULL; 6818 spin_unlock(&nn->client_lock); 6819 nfs4_put_stid(&stp->st_stid); 6820 spin_lock(&nn->client_lock); 6821 } 6822 spin_unlock(&nn->client_lock); 6823 6824 /* 6825 * It's possible for a client to try and acquire an already held lock 6826 * that is being held for a long time, and then lose interest in it. 6827 * So, we clean out any un-revisited request after a lease period 6828 * under the assumption that the client is no longer interested. 6829 * 6830 * RFC5661, sec. 9.6 states that the client must not rely on getting 6831 * notifications and must continue to poll for locks, even when the 6832 * server supports them. Thus this shouldn't lead to clients blocking 6833 * indefinitely once the lock does become free. 6834 */ 6835 BUG_ON(!list_empty(&reaplist)); 6836 spin_lock(&nn->blocked_locks_lock); 6837 while (!list_empty(&nn->blocked_locks_lru)) { 6838 nbl = list_first_entry(&nn->blocked_locks_lru, 6839 struct nfsd4_blocked_lock, nbl_lru); 6840 if (!state_expired(<, nbl->nbl_time)) 6841 break; 6842 list_move(&nbl->nbl_lru, &reaplist); 6843 list_del_init(&nbl->nbl_list); 6844 } 6845 spin_unlock(&nn->blocked_locks_lock); 6846 6847 while (!list_empty(&reaplist)) { 6848 nbl = list_first_entry(&reaplist, 6849 struct nfsd4_blocked_lock, nbl_lru); 6850 list_del_init(&nbl->nbl_lru); 6851 free_blocked_lock(nbl); 6852 } 6853 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 6854 /* service the server-to-server copy delayed unmount list */ 6855 nfsd4_ssc_expire_umount(nn); 6856 #endif 6857 if (atomic_long_read(&num_delegations) >= max_delegations) 6858 deleg_reaper(nn); 6859 out: 6860 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT); 6861 } 6862 6863 static void laundromat_main(struct work_struct *); 6864 6865 static void 6866 laundromat_main(struct work_struct *laundry) 6867 { 6868 time64_t t; 6869 struct delayed_work *dwork = to_delayed_work(laundry); 6870 struct nfsd_net *nn = container_of(dwork, struct nfsd_net, 6871 laundromat_work); 6872 6873 t = nfs4_laundromat(nn); 6874 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 6875 } 6876 6877 static void 6878 courtesy_client_reaper(struct nfsd_net *nn) 6879 { 6880 struct list_head reaplist; 6881 6882 nfs4_get_courtesy_client_reaplist(nn, &reaplist); 6883 nfs4_process_client_reaplist(&reaplist); 6884 } 6885 6886 static void 6887 deleg_reaper(struct nfsd_net *nn) 6888 { 6889 struct list_head *pos, *next; 6890 struct nfs4_client *clp; 6891 6892 spin_lock(&nn->client_lock); 6893 list_for_each_safe(pos, next, &nn->client_lru) { 6894 clp = list_entry(pos, struct nfs4_client, cl_lru); 6895 6896 if (clp->cl_state != NFSD4_ACTIVE) 6897 continue; 6898 if (list_empty(&clp->cl_delegations)) 6899 continue; 6900 if (atomic_read(&clp->cl_delegs_in_recall)) 6901 continue; 6902 if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags)) 6903 continue; 6904 if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5) 6905 continue; 6906 if (clp->cl_cb_state != NFSD4_CB_UP) 6907 continue; 6908 6909 /* release in nfsd4_cb_recall_any_release */ 6910 kref_get(&clp->cl_nfsdfs.cl_ref); 6911 clp->cl_ra_time = ktime_get_boottime_seconds(); 6912 clp->cl_ra->ra_keep = 0; 6913 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) | 6914 BIT(RCA4_TYPE_MASK_WDATA_DLG); 6915 trace_nfsd_cb_recall_any(clp->cl_ra); 6916 nfsd4_run_cb(&clp->cl_ra->ra_cb); 6917 } 6918 spin_unlock(&nn->client_lock); 6919 } 6920 6921 static void 6922 nfsd4_state_shrinker_worker(struct work_struct *work) 6923 { 6924 struct nfsd_net *nn = container_of(work, struct nfsd_net, 6925 nfsd_shrinker_work); 6926 6927 courtesy_client_reaper(nn); 6928 deleg_reaper(nn); 6929 } 6930 6931 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp) 6932 { 6933 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle)) 6934 return nfserr_bad_stateid; 6935 return nfs_ok; 6936 } 6937 6938 static 6939 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags) 6940 { 6941 __be32 status = nfserr_openmode; 6942 6943 /* For lock stateid's, we test the parent open, not the lock: */ 6944 if (stp->st_openstp) 6945 stp = stp->st_openstp; 6946 if ((flags & WR_STATE) && !access_permit_write(stp)) 6947 goto out; 6948 if ((flags & RD_STATE) && !access_permit_read(stp)) 6949 goto out; 6950 status = nfs_ok; 6951 out: 6952 return status; 6953 } 6954 6955 static inline __be32 6956 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags) 6957 { 6958 if (ONE_STATEID(stateid) && (flags & RD_STATE)) 6959 return nfs_ok; 6960 else if (opens_in_grace(net)) { 6961 /* Answer in remaining cases depends on existence of 6962 * conflicting state; so we must wait out the grace period. */ 6963 return nfserr_grace; 6964 } else if (flags & WR_STATE) 6965 return nfs4_share_conflict(current_fh, 6966 NFS4_SHARE_DENY_WRITE); 6967 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */ 6968 return nfs4_share_conflict(current_fh, 6969 NFS4_SHARE_DENY_READ); 6970 } 6971 6972 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 6973 { 6974 /* 6975 * When sessions are used the stateid generation number is ignored 6976 * when it is zero. 6977 */ 6978 if (has_session && in->si_generation == 0) 6979 return nfs_ok; 6980 6981 if (in->si_generation == ref->si_generation) 6982 return nfs_ok; 6983 6984 /* If the client sends us a stateid from the future, it's buggy: */ 6985 if (nfsd4_stateid_generation_after(in, ref)) 6986 return nfserr_bad_stateid; 6987 /* 6988 * However, we could see a stateid from the past, even from a 6989 * non-buggy client. For example, if the client sends a lock 6990 * while some IO is outstanding, the lock may bump si_generation 6991 * while the IO is still in flight. The client could avoid that 6992 * situation by waiting for responses on all the IO requests, 6993 * but better performance may result in retrying IO that 6994 * receives an old_stateid error if requests are rarely 6995 * reordered in flight: 6996 */ 6997 return nfserr_old_stateid; 6998 } 6999 7000 static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session) 7001 { 7002 __be32 ret; 7003 7004 spin_lock(&s->sc_lock); 7005 ret = nfsd4_verify_open_stid(s); 7006 if (ret == nfs_ok) 7007 ret = check_stateid_generation(in, &s->sc_stateid, has_session); 7008 spin_unlock(&s->sc_lock); 7009 if (ret == nfserr_admin_revoked) 7010 nfsd40_drop_revoked_stid(s->sc_client, 7011 &s->sc_stateid); 7012 return ret; 7013 } 7014 7015 static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols) 7016 { 7017 if (ols->st_stateowner->so_is_open_owner && 7018 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) 7019 return nfserr_bad_stateid; 7020 return nfs_ok; 7021 } 7022 7023 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) 7024 { 7025 struct nfs4_stid *s; 7026 __be32 status = nfserr_bad_stateid; 7027 7028 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 7029 CLOSE_STATEID(stateid)) 7030 return status; 7031 spin_lock(&cl->cl_lock); 7032 s = find_stateid_locked(cl, stateid); 7033 if (!s) 7034 goto out_unlock; 7035 status = nfsd4_stid_check_stateid_generation(stateid, s, 1); 7036 if (status) 7037 goto out_unlock; 7038 status = nfsd4_verify_open_stid(s); 7039 if (status) 7040 goto out_unlock; 7041 7042 switch (s->sc_type) { 7043 case SC_TYPE_DELEG: 7044 status = nfs_ok; 7045 break; 7046 case SC_TYPE_OPEN: 7047 case SC_TYPE_LOCK: 7048 status = nfsd4_check_openowner_confirmed(openlockstateid(s)); 7049 break; 7050 default: 7051 printk("unknown stateid type %x\n", s->sc_type); 7052 status = nfserr_bad_stateid; 7053 } 7054 out_unlock: 7055 spin_unlock(&cl->cl_lock); 7056 if (status == nfserr_admin_revoked) 7057 nfsd40_drop_revoked_stid(cl, stateid); 7058 return status; 7059 } 7060 7061 __be32 7062 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, 7063 stateid_t *stateid, 7064 unsigned short typemask, unsigned short statusmask, 7065 struct nfs4_stid **s, struct nfsd_net *nn) 7066 { 7067 __be32 status; 7068 struct nfs4_stid *stid; 7069 bool return_revoked = false; 7070 7071 /* 7072 * only return revoked delegations if explicitly asked. 7073 * otherwise we report revoked or bad_stateid status. 7074 */ 7075 if (statusmask & SC_STATUS_REVOKED) 7076 return_revoked = true; 7077 if (typemask & SC_TYPE_DELEG) 7078 /* Always allow REVOKED for DELEG so we can 7079 * retturn the appropriate error. 7080 */ 7081 statusmask |= SC_STATUS_REVOKED; 7082 7083 statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE; 7084 7085 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || 7086 CLOSE_STATEID(stateid)) 7087 return nfserr_bad_stateid; 7088 status = set_client(&stateid->si_opaque.so_clid, cstate, nn); 7089 if (status == nfserr_stale_clientid) { 7090 if (cstate->session) 7091 return nfserr_bad_stateid; 7092 return nfserr_stale_stateid; 7093 } 7094 if (status) 7095 return status; 7096 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask); 7097 if (!stid) 7098 return nfserr_bad_stateid; 7099 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) { 7100 nfs4_put_stid(stid); 7101 return nfserr_deleg_revoked; 7102 } 7103 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) { 7104 nfsd40_drop_revoked_stid(cstate->clp, stateid); 7105 nfs4_put_stid(stid); 7106 return nfserr_admin_revoked; 7107 } 7108 *s = stid; 7109 return nfs_ok; 7110 } 7111 7112 static struct nfsd_file * 7113 nfs4_find_file(struct nfs4_stid *s, int flags) 7114 { 7115 struct nfsd_file *ret = NULL; 7116 7117 if (!s || s->sc_status) 7118 return NULL; 7119 7120 switch (s->sc_type) { 7121 case SC_TYPE_DELEG: 7122 spin_lock(&s->sc_file->fi_lock); 7123 ret = nfsd_file_get(s->sc_file->fi_deleg_file); 7124 spin_unlock(&s->sc_file->fi_lock); 7125 break; 7126 case SC_TYPE_OPEN: 7127 case SC_TYPE_LOCK: 7128 if (flags & RD_STATE) 7129 ret = find_readable_file(s->sc_file); 7130 else 7131 ret = find_writeable_file(s->sc_file); 7132 } 7133 7134 return ret; 7135 } 7136 7137 static __be32 7138 nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags) 7139 { 7140 __be32 status; 7141 7142 status = nfsd4_check_openowner_confirmed(ols); 7143 if (status) 7144 return status; 7145 return nfs4_check_openmode(ols, flags); 7146 } 7147 7148 static __be32 7149 nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s, 7150 struct nfsd_file **nfp, int flags) 7151 { 7152 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE; 7153 struct nfsd_file *nf; 7154 __be32 status; 7155 7156 nf = nfs4_find_file(s, flags); 7157 if (nf) { 7158 status = nfsd_permission(&rqstp->rq_cred, 7159 fhp->fh_export, fhp->fh_dentry, 7160 acc | NFSD_MAY_OWNER_OVERRIDE); 7161 if (status) { 7162 nfsd_file_put(nf); 7163 goto out; 7164 } 7165 } else { 7166 status = nfsd_file_acquire(rqstp, fhp, acc, &nf); 7167 if (status) 7168 return status; 7169 } 7170 *nfp = nf; 7171 out: 7172 return status; 7173 } 7174 static void 7175 _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 7176 { 7177 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID); 7178 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count)) 7179 return; 7180 list_del(&cps->cp_list); 7181 idr_remove(&nn->s2s_cp_stateids, 7182 cps->cp_stateid.cs_stid.si_opaque.so_id); 7183 kfree(cps); 7184 } 7185 /* 7186 * A READ from an inter server to server COPY will have a 7187 * copy stateid. Look up the copy notify stateid from the 7188 * idr structure and take a reference on it. 7189 */ 7190 __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st, 7191 struct nfs4_client *clp, 7192 struct nfs4_cpntf_state **cps) 7193 { 7194 copy_stateid_t *cps_t; 7195 struct nfs4_cpntf_state *state = NULL; 7196 7197 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id) 7198 return nfserr_bad_stateid; 7199 spin_lock(&nn->s2s_cp_lock); 7200 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id); 7201 if (cps_t) { 7202 state = container_of(cps_t, struct nfs4_cpntf_state, 7203 cp_stateid); 7204 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) { 7205 state = NULL; 7206 goto unlock; 7207 } 7208 if (!clp) 7209 refcount_inc(&state->cp_stateid.cs_count); 7210 else 7211 _free_cpntf_state_locked(nn, state); 7212 } 7213 unlock: 7214 spin_unlock(&nn->s2s_cp_lock); 7215 if (!state) 7216 return nfserr_bad_stateid; 7217 if (!clp) 7218 *cps = state; 7219 return 0; 7220 } 7221 7222 static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st, 7223 struct nfs4_stid **stid) 7224 { 7225 __be32 status; 7226 struct nfs4_cpntf_state *cps = NULL; 7227 struct nfs4_client *found; 7228 7229 status = manage_cpntf_state(nn, st, NULL, &cps); 7230 if (status) 7231 return status; 7232 7233 cps->cpntf_time = ktime_get_boottime_seconds(); 7234 7235 status = nfserr_expired; 7236 found = lookup_clientid(&cps->cp_p_clid, true, nn); 7237 if (!found) 7238 goto out; 7239 7240 *stid = find_stateid_by_type(found, &cps->cp_p_stateid, 7241 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK, 7242 0); 7243 if (*stid) 7244 status = nfs_ok; 7245 else 7246 status = nfserr_bad_stateid; 7247 7248 put_client_renew(found); 7249 out: 7250 nfs4_put_cpntf_state(nn, cps); 7251 return status; 7252 } 7253 7254 void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps) 7255 { 7256 spin_lock(&nn->s2s_cp_lock); 7257 _free_cpntf_state_locked(nn, cps); 7258 spin_unlock(&nn->s2s_cp_lock); 7259 } 7260 7261 /** 7262 * nfs4_preprocess_stateid_op - find and prep stateid for an operation 7263 * @rqstp: incoming request from client 7264 * @cstate: current compound state 7265 * @fhp: filehandle associated with requested stateid 7266 * @stateid: stateid (provided by client) 7267 * @flags: flags describing type of operation to be done 7268 * @nfp: optional nfsd_file return pointer (may be NULL) 7269 * @cstid: optional returned nfs4_stid pointer (may be NULL) 7270 * 7271 * Given info from the client, look up a nfs4_stid for the operation. On 7272 * success, it returns a reference to the nfs4_stid and/or the nfsd_file 7273 * associated with it. 7274 */ 7275 __be32 7276 nfs4_preprocess_stateid_op(struct svc_rqst *rqstp, 7277 struct nfsd4_compound_state *cstate, struct svc_fh *fhp, 7278 stateid_t *stateid, int flags, struct nfsd_file **nfp, 7279 struct nfs4_stid **cstid) 7280 { 7281 struct net *net = SVC_NET(rqstp); 7282 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7283 struct nfs4_stid *s = NULL; 7284 __be32 status; 7285 7286 if (nfp) 7287 *nfp = NULL; 7288 7289 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) { 7290 status = check_special_stateids(net, fhp, stateid, flags); 7291 goto done; 7292 } 7293 7294 status = nfsd4_lookup_stateid(cstate, stateid, 7295 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK, 7296 0, &s, nn); 7297 if (status == nfserr_bad_stateid) 7298 status = find_cpntf_state(nn, stateid, &s); 7299 if (status) 7300 return status; 7301 status = nfsd4_stid_check_stateid_generation(stateid, s, 7302 nfsd4_has_session(cstate)); 7303 if (status) 7304 goto out; 7305 7306 switch (s->sc_type) { 7307 case SC_TYPE_DELEG: 7308 status = nfs4_check_delegmode(delegstateid(s), flags); 7309 break; 7310 case SC_TYPE_OPEN: 7311 case SC_TYPE_LOCK: 7312 status = nfs4_check_olstateid(openlockstateid(s), flags); 7313 break; 7314 } 7315 if (status) 7316 goto out; 7317 status = nfs4_check_fh(fhp, s); 7318 7319 done: 7320 if (status == nfs_ok && nfp) 7321 status = nfs4_check_file(rqstp, fhp, s, nfp, flags); 7322 out: 7323 if (s) { 7324 if (!status && cstid) 7325 *cstid = s; 7326 else 7327 nfs4_put_stid(s); 7328 } 7329 return status; 7330 } 7331 7332 /* 7333 * Test if the stateid is valid 7334 */ 7335 __be32 7336 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7337 union nfsd4_op_u *u) 7338 { 7339 struct nfsd4_test_stateid *test_stateid = &u->test_stateid; 7340 struct nfsd4_test_stateid_id *stateid; 7341 struct nfs4_client *cl = cstate->clp; 7342 7343 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list) 7344 stateid->ts_id_status = 7345 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid); 7346 7347 return nfs_ok; 7348 } 7349 7350 static __be32 7351 nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) 7352 { 7353 struct nfs4_ol_stateid *stp = openlockstateid(s); 7354 __be32 ret; 7355 7356 ret = nfsd4_lock_ol_stateid(stp); 7357 if (ret) 7358 goto out_put_stid; 7359 7360 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 7361 if (ret) 7362 goto out; 7363 7364 ret = nfserr_locks_held; 7365 if (check_for_locks(stp->st_stid.sc_file, 7366 lockowner(stp->st_stateowner))) 7367 goto out; 7368 7369 release_lock_stateid(stp); 7370 ret = nfs_ok; 7371 7372 out: 7373 mutex_unlock(&stp->st_mutex); 7374 out_put_stid: 7375 nfs4_put_stid(s); 7376 return ret; 7377 } 7378 7379 __be32 7380 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7381 union nfsd4_op_u *u) 7382 { 7383 struct nfsd4_free_stateid *free_stateid = &u->free_stateid; 7384 stateid_t *stateid = &free_stateid->fr_stateid; 7385 struct nfs4_stid *s; 7386 struct nfs4_delegation *dp; 7387 struct nfs4_client *cl = cstate->clp; 7388 __be32 ret = nfserr_bad_stateid; 7389 7390 spin_lock(&cl->cl_lock); 7391 s = find_stateid_locked(cl, stateid); 7392 if (!s || s->sc_status & SC_STATUS_CLOSED) 7393 goto out_unlock; 7394 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) { 7395 nfsd4_drop_revoked_stid(s); 7396 ret = nfs_ok; 7397 goto out; 7398 } 7399 spin_lock(&s->sc_lock); 7400 switch (s->sc_type) { 7401 case SC_TYPE_DELEG: 7402 if (s->sc_status & SC_STATUS_REVOKED) { 7403 s->sc_status |= SC_STATUS_CLOSED; 7404 spin_unlock(&s->sc_lock); 7405 dp = delegstateid(s); 7406 if (s->sc_status & SC_STATUS_FREEABLE) 7407 list_del_init(&dp->dl_recall_lru); 7408 s->sc_status |= SC_STATUS_FREED; 7409 spin_unlock(&cl->cl_lock); 7410 nfs4_put_stid(s); 7411 ret = nfs_ok; 7412 goto out; 7413 } 7414 ret = nfserr_locks_held; 7415 break; 7416 case SC_TYPE_OPEN: 7417 ret = check_stateid_generation(stateid, &s->sc_stateid, 1); 7418 if (ret) 7419 break; 7420 ret = nfserr_locks_held; 7421 break; 7422 case SC_TYPE_LOCK: 7423 spin_unlock(&s->sc_lock); 7424 refcount_inc(&s->sc_count); 7425 spin_unlock(&cl->cl_lock); 7426 ret = nfsd4_free_lock_stateid(stateid, s); 7427 goto out; 7428 } 7429 spin_unlock(&s->sc_lock); 7430 out_unlock: 7431 spin_unlock(&cl->cl_lock); 7432 out: 7433 return ret; 7434 } 7435 7436 static inline int 7437 setlkflg (int type) 7438 { 7439 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ? 7440 RD_STATE : WR_STATE; 7441 } 7442 7443 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp) 7444 { 7445 struct svc_fh *current_fh = &cstate->current_fh; 7446 struct nfs4_stateowner *sop = stp->st_stateowner; 7447 __be32 status; 7448 7449 status = nfsd4_check_seqid(cstate, sop, seqid); 7450 if (status) 7451 return status; 7452 status = nfsd4_lock_ol_stateid(stp); 7453 if (status != nfs_ok) 7454 return status; 7455 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 7456 if (status == nfs_ok) 7457 status = nfs4_check_fh(current_fh, &stp->st_stid); 7458 if (status != nfs_ok) 7459 mutex_unlock(&stp->st_mutex); 7460 return status; 7461 } 7462 7463 /** 7464 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op 7465 * @cstate: compund state 7466 * @seqid: seqid (provided by client) 7467 * @stateid: stateid (provided by client) 7468 * @typemask: mask of allowable types for this operation 7469 * @statusmask: mask of allowed states: 0 or STID_CLOSED 7470 * @stpp: return pointer for the stateid found 7471 * @nn: net namespace for request 7472 * 7473 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and 7474 * return it in @stpp. On a nfs_ok return, the returned stateid will 7475 * have its st_mutex locked. 7476 */ 7477 static __be32 7478 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 7479 stateid_t *stateid, 7480 unsigned short typemask, unsigned short statusmask, 7481 struct nfs4_ol_stateid **stpp, 7482 struct nfsd_net *nn) 7483 { 7484 __be32 status; 7485 struct nfs4_stid *s; 7486 struct nfs4_ol_stateid *stp = NULL; 7487 7488 trace_nfsd_preprocess(seqid, stateid); 7489 7490 *stpp = NULL; 7491 retry: 7492 status = nfsd4_lookup_stateid(cstate, stateid, 7493 typemask, statusmask, &s, nn); 7494 if (status) 7495 return status; 7496 stp = openlockstateid(s); 7497 if (nfsd4_cstate_assign_replay(cstate, stp->st_stateowner) == -EAGAIN) { 7498 nfs4_put_stateowner(stp->st_stateowner); 7499 goto retry; 7500 } 7501 7502 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp); 7503 if (!status) 7504 *stpp = stp; 7505 else 7506 nfs4_put_stid(&stp->st_stid); 7507 return status; 7508 } 7509 7510 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, 7511 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn) 7512 { 7513 __be32 status; 7514 struct nfs4_openowner *oo; 7515 struct nfs4_ol_stateid *stp; 7516 7517 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid, 7518 SC_TYPE_OPEN, 0, &stp, nn); 7519 if (status) 7520 return status; 7521 oo = openowner(stp->st_stateowner); 7522 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) { 7523 mutex_unlock(&stp->st_mutex); 7524 nfs4_put_stid(&stp->st_stid); 7525 return nfserr_bad_stateid; 7526 } 7527 *stpp = stp; 7528 return nfs_ok; 7529 } 7530 7531 __be32 7532 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7533 union nfsd4_op_u *u) 7534 { 7535 struct nfsd4_open_confirm *oc = &u->open_confirm; 7536 __be32 status; 7537 struct nfs4_openowner *oo; 7538 struct nfs4_ol_stateid *stp; 7539 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7540 7541 dprintk("NFSD: nfsd4_open_confirm on file %pd\n", 7542 cstate->current_fh.fh_dentry); 7543 7544 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 7545 if (status) 7546 return status; 7547 7548 status = nfs4_preprocess_seqid_op(cstate, 7549 oc->oc_seqid, &oc->oc_req_stateid, 7550 SC_TYPE_OPEN, 0, &stp, nn); 7551 if (status) 7552 goto out; 7553 oo = openowner(stp->st_stateowner); 7554 status = nfserr_bad_stateid; 7555 if (oo->oo_flags & NFS4_OO_CONFIRMED) { 7556 mutex_unlock(&stp->st_mutex); 7557 goto put_stateid; 7558 } 7559 oo->oo_flags |= NFS4_OO_CONFIRMED; 7560 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid); 7561 mutex_unlock(&stp->st_mutex); 7562 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid); 7563 nfsd4_client_record_create(oo->oo_owner.so_client); 7564 status = nfs_ok; 7565 put_stateid: 7566 nfs4_put_stid(&stp->st_stid); 7567 out: 7568 nfsd4_bump_seqid(cstate, status); 7569 return status; 7570 } 7571 7572 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 7573 { 7574 if (!test_access(access, stp)) 7575 return; 7576 nfs4_file_put_access(stp->st_stid.sc_file, access); 7577 clear_access(access, stp); 7578 } 7579 7580 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 7581 { 7582 switch (to_access) { 7583 case NFS4_SHARE_ACCESS_READ: 7584 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE); 7585 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 7586 break; 7587 case NFS4_SHARE_ACCESS_WRITE: 7588 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ); 7589 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH); 7590 break; 7591 case NFS4_SHARE_ACCESS_BOTH: 7592 break; 7593 default: 7594 WARN_ON_ONCE(1); 7595 } 7596 } 7597 7598 __be32 7599 nfsd4_open_downgrade(struct svc_rqst *rqstp, 7600 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) 7601 { 7602 struct nfsd4_open_downgrade *od = &u->open_downgrade; 7603 __be32 status; 7604 struct nfs4_ol_stateid *stp; 7605 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7606 7607 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 7608 cstate->current_fh.fh_dentry); 7609 7610 /* We don't yet support WANT bits: */ 7611 if (od->od_deleg_want) 7612 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__, 7613 od->od_deleg_want); 7614 7615 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid, 7616 &od->od_stateid, &stp, nn); 7617 if (status) 7618 goto out; 7619 status = nfserr_inval; 7620 if (!test_access(od->od_share_access, stp)) { 7621 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n", 7622 stp->st_access_bmap, od->od_share_access); 7623 goto put_stateid; 7624 } 7625 if (!test_deny(od->od_share_deny, stp)) { 7626 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n", 7627 stp->st_deny_bmap, od->od_share_deny); 7628 goto put_stateid; 7629 } 7630 nfs4_stateid_downgrade(stp, od->od_share_access); 7631 reset_union_bmap_deny(od->od_share_deny, stp); 7632 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid); 7633 status = nfs_ok; 7634 put_stateid: 7635 mutex_unlock(&stp->st_mutex); 7636 nfs4_put_stid(&stp->st_stid); 7637 out: 7638 nfsd4_bump_seqid(cstate, status); 7639 return status; 7640 } 7641 7642 static bool nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) 7643 { 7644 struct nfs4_client *clp = s->st_stid.sc_client; 7645 bool unhashed; 7646 LIST_HEAD(reaplist); 7647 struct nfs4_ol_stateid *stp; 7648 7649 spin_lock(&clp->cl_lock); 7650 unhashed = unhash_open_stateid(s, &reaplist); 7651 7652 if (clp->cl_minorversion) { 7653 if (unhashed) 7654 put_ol_stateid_locked(s, &reaplist); 7655 spin_unlock(&clp->cl_lock); 7656 list_for_each_entry(stp, &reaplist, st_locks) 7657 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid); 7658 free_ol_stateid_reaplist(&reaplist); 7659 return false; 7660 } else { 7661 spin_unlock(&clp->cl_lock); 7662 free_ol_stateid_reaplist(&reaplist); 7663 return unhashed; 7664 } 7665 } 7666 7667 /* 7668 * nfs4_unlock_state() called after encode 7669 */ 7670 __be32 7671 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7672 union nfsd4_op_u *u) 7673 { 7674 struct nfsd4_close *close = &u->close; 7675 __be32 status; 7676 struct nfs4_ol_stateid *stp; 7677 struct net *net = SVC_NET(rqstp); 7678 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7679 bool need_move_to_close_list; 7680 7681 dprintk("NFSD: nfsd4_close on file %pd\n", 7682 cstate->current_fh.fh_dentry); 7683 7684 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid, 7685 &close->cl_stateid, 7686 SC_TYPE_OPEN, SC_STATUS_CLOSED, 7687 &stp, nn); 7688 nfsd4_bump_seqid(cstate, status); 7689 if (status) 7690 goto out; 7691 7692 spin_lock(&stp->st_stid.sc_client->cl_lock); 7693 stp->st_stid.sc_status |= SC_STATUS_CLOSED; 7694 spin_unlock(&stp->st_stid.sc_client->cl_lock); 7695 7696 /* 7697 * Technically we don't _really_ have to increment or copy it, since 7698 * it should just be gone after this operation and we clobber the 7699 * copied value below, but we continue to do so here just to ensure 7700 * that racing ops see that there was a state change. 7701 */ 7702 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid); 7703 7704 need_move_to_close_list = nfsd4_close_open_stateid(stp); 7705 mutex_unlock(&stp->st_mutex); 7706 if (need_move_to_close_list) 7707 move_to_close_lru(stp, net); 7708 7709 /* v4.1+ suggests that we send a special stateid in here, since the 7710 * clients should just ignore this anyway. Since this is not useful 7711 * for v4.0 clients either, we set it to the special close_stateid 7712 * universally. 7713 * 7714 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5 7715 */ 7716 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid)); 7717 7718 /* put reference from nfs4_preprocess_seqid_op */ 7719 nfs4_put_stid(&stp->st_stid); 7720 out: 7721 return status; 7722 } 7723 7724 __be32 7725 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 7726 union nfsd4_op_u *u) 7727 { 7728 struct nfsd4_delegreturn *dr = &u->delegreturn; 7729 struct nfs4_delegation *dp; 7730 stateid_t *stateid = &dr->dr_stateid; 7731 struct nfs4_stid *s; 7732 __be32 status; 7733 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 7734 7735 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 7736 return status; 7737 7738 status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn); 7739 if (status) 7740 goto out; 7741 dp = delegstateid(s); 7742 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate)); 7743 if (status) 7744 goto put_stateid; 7745 7746 trace_nfsd_deleg_return(stateid); 7747 destroy_delegation(dp); 7748 smp_mb__after_atomic(); 7749 wake_up_var(d_inode(cstate->current_fh.fh_dentry)); 7750 put_stateid: 7751 nfs4_put_stid(&dp->dl_stid); 7752 out: 7753 return status; 7754 } 7755 7756 /* last octet in a range */ 7757 static inline u64 7758 last_byte_offset(u64 start, u64 len) 7759 { 7760 u64 end; 7761 7762 WARN_ON_ONCE(!len); 7763 end = start + len; 7764 return end > start ? end - 1: NFS4_MAX_UINT64; 7765 } 7766 7767 /* 7768 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that 7769 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th 7770 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit 7771 * locking, this prevents us from being completely protocol-compliant. The 7772 * real solution to this problem is to start using unsigned file offsets in 7773 * the VFS, but this is a very deep change! 7774 */ 7775 static inline void 7776 nfs4_transform_lock_offset(struct file_lock *lock) 7777 { 7778 if (lock->fl_start < 0) 7779 lock->fl_start = OFFSET_MAX; 7780 if (lock->fl_end < 0) 7781 lock->fl_end = OFFSET_MAX; 7782 } 7783 7784 static fl_owner_t 7785 nfsd4_lm_get_owner(fl_owner_t owner) 7786 { 7787 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 7788 7789 nfs4_get_stateowner(&lo->lo_owner); 7790 return owner; 7791 } 7792 7793 static void 7794 nfsd4_lm_put_owner(fl_owner_t owner) 7795 { 7796 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner; 7797 7798 if (lo) 7799 nfs4_put_stateowner(&lo->lo_owner); 7800 } 7801 7802 /* return pointer to struct nfs4_client if client is expirable */ 7803 static bool 7804 nfsd4_lm_lock_expirable(struct file_lock *cfl) 7805 { 7806 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner; 7807 struct nfs4_client *clp = lo->lo_owner.so_client; 7808 struct nfsd_net *nn; 7809 7810 if (try_to_expire_client(clp)) { 7811 nn = net_generic(clp->net, nfsd_net_id); 7812 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0); 7813 return true; 7814 } 7815 return false; 7816 } 7817 7818 /* schedule laundromat to run immediately and wait for it to complete */ 7819 static void 7820 nfsd4_lm_expire_lock(void) 7821 { 7822 flush_workqueue(laundry_wq); 7823 } 7824 7825 static void 7826 nfsd4_lm_notify(struct file_lock *fl) 7827 { 7828 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner; 7829 struct net *net = lo->lo_owner.so_client->net; 7830 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 7831 struct nfsd4_blocked_lock *nbl = container_of(fl, 7832 struct nfsd4_blocked_lock, nbl_lock); 7833 bool queue = false; 7834 7835 /* An empty list means that something else is going to be using it */ 7836 spin_lock(&nn->blocked_locks_lock); 7837 if (!list_empty(&nbl->nbl_list)) { 7838 list_del_init(&nbl->nbl_list); 7839 list_del_init(&nbl->nbl_lru); 7840 queue = true; 7841 } 7842 spin_unlock(&nn->blocked_locks_lock); 7843 7844 if (queue) { 7845 trace_nfsd_cb_notify_lock(lo, nbl); 7846 nfsd4_try_run_cb(&nbl->nbl_cb); 7847 } 7848 } 7849 7850 static const struct lock_manager_operations nfsd_posix_mng_ops = { 7851 .lm_mod_owner = THIS_MODULE, 7852 .lm_notify = nfsd4_lm_notify, 7853 .lm_get_owner = nfsd4_lm_get_owner, 7854 .lm_put_owner = nfsd4_lm_put_owner, 7855 .lm_lock_expirable = nfsd4_lm_lock_expirable, 7856 .lm_expire_lock = nfsd4_lm_expire_lock, 7857 }; 7858 7859 static inline void 7860 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny) 7861 { 7862 struct nfs4_lockowner *lo; 7863 7864 if (fl->fl_lmops == &nfsd_posix_mng_ops) { 7865 lo = (struct nfs4_lockowner *) fl->c.flc_owner; 7866 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner, 7867 GFP_KERNEL); 7868 if (!deny->ld_owner.data) 7869 /* We just don't care that much */ 7870 goto nevermind; 7871 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid; 7872 } else { 7873 nevermind: 7874 deny->ld_owner.len = 0; 7875 deny->ld_owner.data = NULL; 7876 deny->ld_clientid.cl_boot = 0; 7877 deny->ld_clientid.cl_id = 0; 7878 } 7879 deny->ld_start = fl->fl_start; 7880 deny->ld_length = NFS4_MAX_UINT64; 7881 if (fl->fl_end != NFS4_MAX_UINT64) 7882 deny->ld_length = fl->fl_end - fl->fl_start + 1; 7883 deny->ld_type = NFS4_READ_LT; 7884 if (fl->c.flc_type != F_RDLCK) 7885 deny->ld_type = NFS4_WRITE_LT; 7886 } 7887 7888 static struct nfs4_lockowner * 7889 find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner) 7890 { 7891 unsigned int strhashval = ownerstr_hashval(owner); 7892 struct nfs4_stateowner *so; 7893 7894 lockdep_assert_held(&clp->cl_lock); 7895 7896 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval], 7897 so_strhash) { 7898 if (so->so_is_open_owner) 7899 continue; 7900 if (same_owner_str(so, owner)) 7901 return lockowner(nfs4_get_stateowner(so)); 7902 } 7903 return NULL; 7904 } 7905 7906 static struct nfs4_lockowner * 7907 find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner) 7908 { 7909 struct nfs4_lockowner *lo; 7910 7911 spin_lock(&clp->cl_lock); 7912 lo = find_lockowner_str_locked(clp, owner); 7913 spin_unlock(&clp->cl_lock); 7914 return lo; 7915 } 7916 7917 static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop) 7918 { 7919 unhash_lockowner_locked(lockowner(sop)); 7920 } 7921 7922 static void nfs4_free_lockowner(struct nfs4_stateowner *sop) 7923 { 7924 struct nfs4_lockowner *lo = lockowner(sop); 7925 7926 kmem_cache_free(lockowner_slab, lo); 7927 } 7928 7929 static const struct nfs4_stateowner_operations lockowner_ops = { 7930 .so_unhash = nfs4_unhash_lockowner, 7931 .so_free = nfs4_free_lockowner, 7932 }; 7933 7934 /* 7935 * Alloc a lock owner structure. 7936 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 7937 * occurred. 7938 * 7939 * strhashval = ownerstr_hashval 7940 */ 7941 static struct nfs4_lockowner * 7942 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, 7943 struct nfs4_ol_stateid *open_stp, 7944 struct nfsd4_lock *lock) 7945 { 7946 struct nfs4_lockowner *lo, *ret; 7947 7948 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp); 7949 if (!lo) 7950 return NULL; 7951 INIT_LIST_HEAD(&lo->lo_blocked); 7952 INIT_LIST_HEAD(&lo->lo_owner.so_stateids); 7953 lo->lo_owner.so_is_open_owner = 0; 7954 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid; 7955 lo->lo_owner.so_ops = &lockowner_ops; 7956 spin_lock(&clp->cl_lock); 7957 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner); 7958 if (ret == NULL) { 7959 list_add(&lo->lo_owner.so_strhash, 7960 &clp->cl_ownerstr_hashtbl[strhashval]); 7961 ret = lo; 7962 } else 7963 nfs4_free_stateowner(&lo->lo_owner); 7964 7965 spin_unlock(&clp->cl_lock); 7966 return ret; 7967 } 7968 7969 static struct nfs4_ol_stateid * 7970 find_lock_stateid(const struct nfs4_lockowner *lo, 7971 const struct nfs4_ol_stateid *ost) 7972 { 7973 struct nfs4_ol_stateid *lst; 7974 7975 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock); 7976 7977 /* If ost is not hashed, ost->st_locks will not be valid */ 7978 if (!nfs4_ol_stateid_unhashed(ost)) 7979 list_for_each_entry(lst, &ost->st_locks, st_locks) { 7980 if (lst->st_stateowner == &lo->lo_owner) { 7981 refcount_inc(&lst->st_stid.sc_count); 7982 return lst; 7983 } 7984 } 7985 return NULL; 7986 } 7987 7988 static struct nfs4_ol_stateid * 7989 init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, 7990 struct nfs4_file *fp, struct inode *inode, 7991 struct nfs4_ol_stateid *open_stp) 7992 { 7993 struct nfs4_client *clp = lo->lo_owner.so_client; 7994 struct nfs4_ol_stateid *retstp; 7995 7996 mutex_init(&stp->st_mutex); 7997 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX); 7998 retry: 7999 spin_lock(&clp->cl_lock); 8000 if (nfs4_ol_stateid_unhashed(open_stp)) 8001 goto out_close; 8002 retstp = find_lock_stateid(lo, open_stp); 8003 if (retstp) 8004 goto out_found; 8005 refcount_inc(&stp->st_stid.sc_count); 8006 stp->st_stid.sc_type = SC_TYPE_LOCK; 8007 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); 8008 get_nfs4_file(fp); 8009 stp->st_stid.sc_file = fp; 8010 stp->st_access_bmap = 0; 8011 stp->st_deny_bmap = open_stp->st_deny_bmap; 8012 stp->st_openstp = open_stp; 8013 spin_lock(&fp->fi_lock); 8014 list_add(&stp->st_locks, &open_stp->st_locks); 8015 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids); 8016 list_add(&stp->st_perfile, &fp->fi_stateids); 8017 spin_unlock(&fp->fi_lock); 8018 spin_unlock(&clp->cl_lock); 8019 return stp; 8020 out_found: 8021 spin_unlock(&clp->cl_lock); 8022 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) { 8023 nfs4_put_stid(&retstp->st_stid); 8024 goto retry; 8025 } 8026 /* To keep mutex tracking happy */ 8027 mutex_unlock(&stp->st_mutex); 8028 return retstp; 8029 out_close: 8030 spin_unlock(&clp->cl_lock); 8031 mutex_unlock(&stp->st_mutex); 8032 return NULL; 8033 } 8034 8035 static struct nfs4_ol_stateid * 8036 find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi, 8037 struct inode *inode, struct nfs4_ol_stateid *ost, 8038 bool *new) 8039 { 8040 struct nfs4_stid *ns = NULL; 8041 struct nfs4_ol_stateid *lst; 8042 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 8043 struct nfs4_client *clp = oo->oo_owner.so_client; 8044 8045 *new = false; 8046 spin_lock(&clp->cl_lock); 8047 lst = find_lock_stateid(lo, ost); 8048 spin_unlock(&clp->cl_lock); 8049 if (lst != NULL) { 8050 if (nfsd4_lock_ol_stateid(lst) == nfs_ok) 8051 goto out; 8052 nfs4_put_stid(&lst->st_stid); 8053 } 8054 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid); 8055 if (ns == NULL) 8056 return NULL; 8057 8058 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost); 8059 if (lst == openlockstateid(ns)) 8060 *new = true; 8061 else 8062 nfs4_put_stid(ns); 8063 out: 8064 return lst; 8065 } 8066 8067 static int 8068 check_lock_length(u64 offset, u64 length) 8069 { 8070 return ((length == 0) || ((length != NFS4_MAX_UINT64) && 8071 (length > ~offset))); 8072 } 8073 8074 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access) 8075 { 8076 struct nfs4_file *fp = lock_stp->st_stid.sc_file; 8077 8078 lockdep_assert_held(&fp->fi_lock); 8079 8080 if (test_access(access, lock_stp)) 8081 return; 8082 __nfs4_file_get_access(fp, access); 8083 set_access(access, lock_stp); 8084 } 8085 8086 static __be32 8087 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, 8088 struct nfs4_ol_stateid *ost, 8089 struct nfsd4_lock *lock, 8090 struct nfs4_ol_stateid **plst, bool *new) 8091 { 8092 __be32 status; 8093 struct nfs4_file *fi = ost->st_stid.sc_file; 8094 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 8095 struct nfs4_client *cl = oo->oo_owner.so_client; 8096 struct inode *inode = d_inode(cstate->current_fh.fh_dentry); 8097 struct nfs4_lockowner *lo; 8098 struct nfs4_ol_stateid *lst; 8099 unsigned int strhashval; 8100 8101 lo = find_lockowner_str(cl, &lock->lk_new_owner); 8102 if (!lo) { 8103 strhashval = ownerstr_hashval(&lock->lk_new_owner); 8104 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock); 8105 if (lo == NULL) 8106 return nfserr_jukebox; 8107 } else { 8108 /* with an existing lockowner, seqids must be the same */ 8109 status = nfserr_bad_seqid; 8110 if (!cstate->minorversion && 8111 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid) 8112 goto out; 8113 } 8114 8115 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); 8116 if (lst == NULL) { 8117 status = nfserr_jukebox; 8118 goto out; 8119 } 8120 8121 status = nfs_ok; 8122 *plst = lst; 8123 out: 8124 nfs4_put_stateowner(&lo->lo_owner); 8125 return status; 8126 } 8127 8128 /* 8129 * LOCK operation 8130 */ 8131 __be32 8132 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 8133 union nfsd4_op_u *u) 8134 { 8135 struct nfsd4_lock *lock = &u->lock; 8136 struct nfs4_openowner *open_sop = NULL; 8137 struct nfs4_lockowner *lock_sop = NULL; 8138 struct nfs4_ol_stateid *lock_stp = NULL; 8139 struct nfs4_ol_stateid *open_stp = NULL; 8140 struct nfs4_file *fp; 8141 struct nfsd_file *nf = NULL; 8142 struct nfsd4_blocked_lock *nbl = NULL; 8143 struct file_lock *file_lock = NULL; 8144 struct file_lock *conflock = NULL; 8145 __be32 status = 0; 8146 int lkflg; 8147 int err; 8148 bool new = false; 8149 unsigned char type; 8150 unsigned int flags = FL_POSIX; 8151 struct net *net = SVC_NET(rqstp); 8152 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8153 8154 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n", 8155 (long long) lock->lk_offset, 8156 (long long) lock->lk_length); 8157 8158 if (check_lock_length(lock->lk_offset, lock->lk_length)) 8159 return nfserr_inval; 8160 8161 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0); 8162 if (status != nfs_ok) 8163 return status; 8164 if (exportfs_cannot_lock(cstate->current_fh.fh_dentry->d_sb->s_export_op)) { 8165 status = nfserr_notsupp; 8166 goto out; 8167 } 8168 8169 if (lock->lk_is_new) { 8170 if (nfsd4_has_session(cstate)) 8171 /* See rfc 5661 18.10.3: given clientid is ignored: */ 8172 memcpy(&lock->lk_new_clientid, 8173 &cstate->clp->cl_clientid, 8174 sizeof(clientid_t)); 8175 8176 /* validate and update open stateid and open seqid */ 8177 status = nfs4_preprocess_confirmed_seqid_op(cstate, 8178 lock->lk_new_open_seqid, 8179 &lock->lk_new_open_stateid, 8180 &open_stp, nn); 8181 if (status) 8182 goto out; 8183 mutex_unlock(&open_stp->st_mutex); 8184 open_sop = openowner(open_stp->st_stateowner); 8185 status = nfserr_bad_stateid; 8186 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid, 8187 &lock->lk_new_clientid)) 8188 goto out; 8189 status = lookup_or_create_lock_state(cstate, open_stp, lock, 8190 &lock_stp, &new); 8191 } else { 8192 status = nfs4_preprocess_seqid_op(cstate, 8193 lock->lk_old_lock_seqid, 8194 &lock->lk_old_lock_stateid, 8195 SC_TYPE_LOCK, 0, &lock_stp, 8196 nn); 8197 } 8198 if (status) 8199 goto out; 8200 lock_sop = lockowner(lock_stp->st_stateowner); 8201 8202 lkflg = setlkflg(lock->lk_type); 8203 status = nfs4_check_openmode(lock_stp, lkflg); 8204 if (status) 8205 goto out; 8206 8207 status = nfserr_grace; 8208 if (locks_in_grace(net) && !lock->lk_reclaim) 8209 goto out; 8210 status = nfserr_no_grace; 8211 if (!locks_in_grace(net) && lock->lk_reclaim) 8212 goto out; 8213 8214 if (lock->lk_reclaim) 8215 flags |= FL_RECLAIM; 8216 8217 fp = lock_stp->st_stid.sc_file; 8218 switch (lock->lk_type) { 8219 case NFS4_READW_LT: 8220 fallthrough; 8221 case NFS4_READ_LT: 8222 spin_lock(&fp->fi_lock); 8223 nf = find_readable_file_locked(fp); 8224 if (nf) 8225 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ); 8226 spin_unlock(&fp->fi_lock); 8227 type = F_RDLCK; 8228 break; 8229 case NFS4_WRITEW_LT: 8230 fallthrough; 8231 case NFS4_WRITE_LT: 8232 spin_lock(&fp->fi_lock); 8233 nf = find_writeable_file_locked(fp); 8234 if (nf) 8235 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE); 8236 spin_unlock(&fp->fi_lock); 8237 type = F_WRLCK; 8238 break; 8239 default: 8240 status = nfserr_inval; 8241 goto out; 8242 } 8243 8244 if (!nf) { 8245 status = nfserr_openmode; 8246 goto out; 8247 } 8248 8249 if (lock->lk_type & (NFS4_READW_LT | NFS4_WRITEW_LT) && 8250 nfsd4_has_session(cstate) && 8251 locks_can_async_lock(nf->nf_file->f_op)) 8252 flags |= FL_SLEEP; 8253 8254 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn); 8255 if (!nbl) { 8256 dprintk("NFSD: %s: unable to allocate block!\n", __func__); 8257 status = nfserr_jukebox; 8258 goto out; 8259 } 8260 8261 file_lock = &nbl->nbl_lock; 8262 file_lock->c.flc_type = type; 8263 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner)); 8264 file_lock->c.flc_pid = current->tgid; 8265 file_lock->c.flc_file = nf->nf_file; 8266 file_lock->c.flc_flags = flags; 8267 file_lock->fl_lmops = &nfsd_posix_mng_ops; 8268 file_lock->fl_start = lock->lk_offset; 8269 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length); 8270 nfs4_transform_lock_offset(file_lock); 8271 8272 conflock = locks_alloc_lock(); 8273 if (!conflock) { 8274 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 8275 status = nfserr_jukebox; 8276 goto out; 8277 } 8278 8279 if (flags & FL_SLEEP) { 8280 nbl->nbl_time = ktime_get_boottime_seconds(); 8281 spin_lock(&nn->blocked_locks_lock); 8282 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); 8283 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); 8284 kref_get(&nbl->nbl_kref); 8285 spin_unlock(&nn->blocked_locks_lock); 8286 } 8287 8288 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock); 8289 switch (err) { 8290 case 0: /* success! */ 8291 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid); 8292 status = 0; 8293 if (lock->lk_reclaim) 8294 nn->somebody_reclaimed = true; 8295 break; 8296 case FILE_LOCK_DEFERRED: 8297 kref_put(&nbl->nbl_kref, free_nbl); 8298 nbl = NULL; 8299 fallthrough; 8300 case -EAGAIN: /* conflock holds conflicting lock */ 8301 status = nfserr_denied; 8302 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); 8303 nfs4_set_lock_denied(conflock, &lock->lk_denied); 8304 break; 8305 case -EDEADLK: 8306 status = nfserr_deadlock; 8307 break; 8308 default: 8309 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err); 8310 status = nfserrno(err); 8311 break; 8312 } 8313 out: 8314 if (nbl) { 8315 /* dequeue it if we queued it before */ 8316 if (flags & FL_SLEEP) { 8317 spin_lock(&nn->blocked_locks_lock); 8318 if (!list_empty(&nbl->nbl_list) && 8319 !list_empty(&nbl->nbl_lru)) { 8320 list_del_init(&nbl->nbl_list); 8321 list_del_init(&nbl->nbl_lru); 8322 kref_put(&nbl->nbl_kref, free_nbl); 8323 } 8324 /* nbl can use one of lists to be linked to reaplist */ 8325 spin_unlock(&nn->blocked_locks_lock); 8326 } 8327 free_blocked_lock(nbl); 8328 } 8329 if (nf) 8330 nfsd_file_put(nf); 8331 if (lock_stp) { 8332 /* Bump seqid manually if the 4.0 replay owner is openowner */ 8333 if (cstate->replay_owner && 8334 cstate->replay_owner != &lock_sop->lo_owner && 8335 seqid_mutating_err(ntohl(status))) 8336 lock_sop->lo_owner.so_seqid++; 8337 8338 /* 8339 * If this is a new, never-before-used stateid, and we are 8340 * returning an error, then just go ahead and release it. 8341 */ 8342 if (status && new) 8343 release_lock_stateid(lock_stp); 8344 8345 mutex_unlock(&lock_stp->st_mutex); 8346 8347 nfs4_put_stid(&lock_stp->st_stid); 8348 } 8349 if (open_stp) 8350 nfs4_put_stid(&open_stp->st_stid); 8351 nfsd4_bump_seqid(cstate, status); 8352 if (conflock) 8353 locks_free_lock(conflock); 8354 return status; 8355 } 8356 8357 void nfsd4_lock_release(union nfsd4_op_u *u) 8358 { 8359 struct nfsd4_lock *lock = &u->lock; 8360 struct nfsd4_lock_denied *deny = &lock->lk_denied; 8361 8362 kfree(deny->ld_owner.data); 8363 } 8364 8365 /* 8366 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN, 8367 * so we do a temporary open here just to get an open file to pass to 8368 * vfs_test_lock. 8369 */ 8370 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock) 8371 { 8372 struct nfsd_file *nf; 8373 struct inode *inode; 8374 __be32 err; 8375 8376 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf); 8377 if (err) 8378 return err; 8379 inode = fhp->fh_dentry->d_inode; 8380 inode_lock(inode); /* to block new leases till after test_lock: */ 8381 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 8382 if (err) 8383 goto out; 8384 lock->c.flc_file = nf->nf_file; 8385 err = nfserrno(vfs_test_lock(nf->nf_file, lock)); 8386 lock->c.flc_file = NULL; 8387 out: 8388 inode_unlock(inode); 8389 nfsd_file_put(nf); 8390 return err; 8391 } 8392 8393 /* 8394 * LOCKT operation 8395 */ 8396 __be32 8397 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 8398 union nfsd4_op_u *u) 8399 { 8400 struct nfsd4_lockt *lockt = &u->lockt; 8401 struct file_lock *file_lock = NULL; 8402 struct nfs4_lockowner *lo = NULL; 8403 __be32 status; 8404 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8405 8406 if (locks_in_grace(SVC_NET(rqstp))) 8407 return nfserr_grace; 8408 8409 if (check_lock_length(lockt->lt_offset, lockt->lt_length)) 8410 return nfserr_inval; 8411 8412 if (!nfsd4_has_session(cstate)) { 8413 status = set_client(&lockt->lt_clientid, cstate, nn); 8414 if (status) 8415 goto out; 8416 } 8417 8418 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0))) 8419 goto out; 8420 8421 file_lock = locks_alloc_lock(); 8422 if (!file_lock) { 8423 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 8424 status = nfserr_jukebox; 8425 goto out; 8426 } 8427 8428 switch (lockt->lt_type) { 8429 case NFS4_READ_LT: 8430 case NFS4_READW_LT: 8431 file_lock->c.flc_type = F_RDLCK; 8432 break; 8433 case NFS4_WRITE_LT: 8434 case NFS4_WRITEW_LT: 8435 file_lock->c.flc_type = F_WRLCK; 8436 break; 8437 default: 8438 dprintk("NFSD: nfs4_lockt: bad lock type!\n"); 8439 status = nfserr_inval; 8440 goto out; 8441 } 8442 8443 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner); 8444 if (lo) 8445 file_lock->c.flc_owner = (fl_owner_t)lo; 8446 file_lock->c.flc_pid = current->tgid; 8447 file_lock->c.flc_flags = FL_POSIX; 8448 8449 file_lock->fl_start = lockt->lt_offset; 8450 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length); 8451 8452 nfs4_transform_lock_offset(file_lock); 8453 8454 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock); 8455 if (status) 8456 goto out; 8457 8458 if (file_lock->c.flc_type != F_UNLCK) { 8459 status = nfserr_denied; 8460 nfs4_set_lock_denied(file_lock, &lockt->lt_denied); 8461 } 8462 out: 8463 if (lo) 8464 nfs4_put_stateowner(&lo->lo_owner); 8465 if (file_lock) 8466 locks_free_lock(file_lock); 8467 return status; 8468 } 8469 8470 void nfsd4_lockt_release(union nfsd4_op_u *u) 8471 { 8472 struct nfsd4_lockt *lockt = &u->lockt; 8473 struct nfsd4_lock_denied *deny = &lockt->lt_denied; 8474 8475 kfree(deny->ld_owner.data); 8476 } 8477 8478 __be32 8479 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, 8480 union nfsd4_op_u *u) 8481 { 8482 struct nfsd4_locku *locku = &u->locku; 8483 struct nfs4_ol_stateid *stp; 8484 struct nfsd_file *nf = NULL; 8485 struct file_lock *file_lock = NULL; 8486 __be32 status; 8487 int err; 8488 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8489 8490 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n", 8491 (long long) locku->lu_offset, 8492 (long long) locku->lu_length); 8493 8494 if (check_lock_length(locku->lu_offset, locku->lu_length)) 8495 return nfserr_inval; 8496 8497 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid, 8498 &locku->lu_stateid, SC_TYPE_LOCK, 0, 8499 &stp, nn); 8500 if (status) 8501 goto out; 8502 nf = find_any_file(stp->st_stid.sc_file); 8503 if (!nf) { 8504 status = nfserr_lock_range; 8505 goto put_stateid; 8506 } 8507 if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) { 8508 status = nfserr_notsupp; 8509 goto put_file; 8510 } 8511 8512 file_lock = locks_alloc_lock(); 8513 if (!file_lock) { 8514 dprintk("NFSD: %s: unable to allocate lock!\n", __func__); 8515 status = nfserr_jukebox; 8516 goto put_file; 8517 } 8518 8519 file_lock->c.flc_type = F_UNLCK; 8520 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner)); 8521 file_lock->c.flc_pid = current->tgid; 8522 file_lock->c.flc_file = nf->nf_file; 8523 file_lock->c.flc_flags = FL_POSIX; 8524 file_lock->fl_lmops = &nfsd_posix_mng_ops; 8525 file_lock->fl_start = locku->lu_offset; 8526 8527 file_lock->fl_end = last_byte_offset(locku->lu_offset, 8528 locku->lu_length); 8529 nfs4_transform_lock_offset(file_lock); 8530 8531 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL); 8532 if (err) { 8533 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); 8534 goto out_nfserr; 8535 } 8536 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid); 8537 put_file: 8538 nfsd_file_put(nf); 8539 put_stateid: 8540 mutex_unlock(&stp->st_mutex); 8541 nfs4_put_stid(&stp->st_stid); 8542 out: 8543 nfsd4_bump_seqid(cstate, status); 8544 if (file_lock) 8545 locks_free_lock(file_lock); 8546 return status; 8547 8548 out_nfserr: 8549 status = nfserrno(err); 8550 goto put_file; 8551 } 8552 8553 /* 8554 * returns 8555 * true: locks held by lockowner 8556 * false: no locks held by lockowner 8557 */ 8558 static bool 8559 check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) 8560 { 8561 struct file_lock *fl; 8562 int status = false; 8563 struct nfsd_file *nf; 8564 struct inode *inode; 8565 struct file_lock_context *flctx; 8566 8567 spin_lock(&fp->fi_lock); 8568 nf = find_any_file_locked(fp); 8569 if (!nf) { 8570 /* Any valid lock stateid should have some sort of access */ 8571 WARN_ON_ONCE(1); 8572 goto out; 8573 } 8574 8575 inode = file_inode(nf->nf_file); 8576 flctx = locks_inode_context(inode); 8577 8578 if (flctx && !list_empty_careful(&flctx->flc_posix)) { 8579 spin_lock(&flctx->flc_lock); 8580 for_each_file_lock(fl, &flctx->flc_posix) { 8581 if (fl->c.flc_owner == (fl_owner_t)lowner) { 8582 status = true; 8583 break; 8584 } 8585 } 8586 spin_unlock(&flctx->flc_lock); 8587 } 8588 out: 8589 spin_unlock(&fp->fi_lock); 8590 return status; 8591 } 8592 8593 /** 8594 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations 8595 * @rqstp: RPC transaction 8596 * @cstate: NFSv4 COMPOUND state 8597 * @u: RELEASE_LOCKOWNER arguments 8598 * 8599 * Check if there are any locks still held and if not, free the lockowner 8600 * and any lock state that is owned. 8601 * 8602 * Return values: 8603 * %nfs_ok: lockowner released or not found 8604 * %nfserr_locks_held: lockowner still in use 8605 * %nfserr_stale_clientid: clientid no longer active 8606 * %nfserr_expired: clientid not recognized 8607 */ 8608 __be32 8609 nfsd4_release_lockowner(struct svc_rqst *rqstp, 8610 struct nfsd4_compound_state *cstate, 8611 union nfsd4_op_u *u) 8612 { 8613 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner; 8614 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 8615 clientid_t *clid = &rlockowner->rl_clientid; 8616 struct nfs4_ol_stateid *stp; 8617 struct nfs4_lockowner *lo; 8618 struct nfs4_client *clp; 8619 LIST_HEAD(reaplist); 8620 __be32 status; 8621 8622 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n", 8623 clid->cl_boot, clid->cl_id); 8624 8625 status = set_client(clid, cstate, nn); 8626 if (status) 8627 return status; 8628 clp = cstate->clp; 8629 8630 spin_lock(&clp->cl_lock); 8631 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner); 8632 if (!lo) { 8633 spin_unlock(&clp->cl_lock); 8634 return nfs_ok; 8635 } 8636 8637 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { 8638 if (check_for_locks(stp->st_stid.sc_file, lo)) { 8639 spin_unlock(&clp->cl_lock); 8640 nfs4_put_stateowner(&lo->lo_owner); 8641 return nfserr_locks_held; 8642 } 8643 } 8644 unhash_lockowner_locked(lo); 8645 while (!list_empty(&lo->lo_owner.so_stateids)) { 8646 stp = list_first_entry(&lo->lo_owner.so_stateids, 8647 struct nfs4_ol_stateid, 8648 st_perstateowner); 8649 unhash_lock_stateid(stp); 8650 put_ol_stateid_locked(stp, &reaplist); 8651 } 8652 spin_unlock(&clp->cl_lock); 8653 8654 free_ol_stateid_reaplist(&reaplist); 8655 remove_blocked_locks(lo); 8656 nfs4_put_stateowner(&lo->lo_owner); 8657 return nfs_ok; 8658 } 8659 8660 static inline struct nfs4_client_reclaim * 8661 alloc_reclaim(void) 8662 { 8663 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL); 8664 } 8665 8666 bool 8667 nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn) 8668 { 8669 struct nfs4_client_reclaim *crp; 8670 8671 crp = nfsd4_find_reclaim_client(name, nn); 8672 return (crp && crp->cr_clp); 8673 } 8674 8675 /* 8676 * failure => all reset bets are off, nfserr_no_grace... 8677 * 8678 * The caller is responsible for freeing name.data if NULL is returned (it 8679 * will be freed in nfs4_remove_reclaim_record in the normal case). 8680 */ 8681 struct nfs4_client_reclaim * 8682 nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash, 8683 struct nfsd_net *nn) 8684 { 8685 unsigned int strhashval; 8686 struct nfs4_client_reclaim *crp; 8687 8688 crp = alloc_reclaim(); 8689 if (crp) { 8690 strhashval = clientstr_hashval(name); 8691 INIT_LIST_HEAD(&crp->cr_strhash); 8692 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]); 8693 crp->cr_name.data = name.data; 8694 crp->cr_name.len = name.len; 8695 crp->cr_princhash.data = princhash.data; 8696 crp->cr_princhash.len = princhash.len; 8697 crp->cr_clp = NULL; 8698 nn->reclaim_str_hashtbl_size++; 8699 } 8700 return crp; 8701 } 8702 8703 void 8704 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn) 8705 { 8706 list_del(&crp->cr_strhash); 8707 kfree(crp->cr_name.data); 8708 kfree(crp->cr_princhash.data); 8709 kfree(crp); 8710 nn->reclaim_str_hashtbl_size--; 8711 } 8712 8713 void 8714 nfs4_release_reclaim(struct nfsd_net *nn) 8715 { 8716 struct nfs4_client_reclaim *crp = NULL; 8717 int i; 8718 8719 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8720 while (!list_empty(&nn->reclaim_str_hashtbl[i])) { 8721 crp = list_entry(nn->reclaim_str_hashtbl[i].next, 8722 struct nfs4_client_reclaim, cr_strhash); 8723 nfs4_remove_reclaim_record(crp, nn); 8724 } 8725 } 8726 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size); 8727 } 8728 8729 /* 8730 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */ 8731 struct nfs4_client_reclaim * 8732 nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn) 8733 { 8734 unsigned int strhashval; 8735 struct nfs4_client_reclaim *crp = NULL; 8736 8737 strhashval = clientstr_hashval(name); 8738 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) { 8739 if (compare_blob(&crp->cr_name, &name) == 0) { 8740 return crp; 8741 } 8742 } 8743 return NULL; 8744 } 8745 8746 __be32 8747 nfs4_check_open_reclaim(struct nfs4_client *clp) 8748 { 8749 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags)) 8750 return nfserr_no_grace; 8751 8752 if (nfsd4_client_record_check(clp)) 8753 return nfserr_reclaim_bad; 8754 8755 return nfs_ok; 8756 } 8757 8758 /* 8759 * Since the lifetime of a delegation isn't limited to that of an open, a 8760 * client may quite reasonably hang on to a delegation as long as it has 8761 * the inode cached. This becomes an obvious problem the first time a 8762 * client's inode cache approaches the size of the server's total memory. 8763 * 8764 * For now we avoid this problem by imposing a hard limit on the number 8765 * of delegations, which varies according to the server's memory size. 8766 */ 8767 static void 8768 set_max_delegations(void) 8769 { 8770 /* 8771 * Allow at most 4 delegations per megabyte of RAM. Quick 8772 * estimates suggest that in the worst case (where every delegation 8773 * is for a different inode), a delegation could take about 1.5K, 8774 * giving a worst case usage of about 6% of memory. 8775 */ 8776 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT); 8777 } 8778 8779 static int nfs4_state_create_net(struct net *net) 8780 { 8781 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8782 int i; 8783 8784 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 8785 sizeof(struct list_head), 8786 GFP_KERNEL); 8787 if (!nn->conf_id_hashtbl) 8788 goto err; 8789 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE, 8790 sizeof(struct list_head), 8791 GFP_KERNEL); 8792 if (!nn->unconf_id_hashtbl) 8793 goto err_unconf_id; 8794 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE, 8795 sizeof(struct list_head), 8796 GFP_KERNEL); 8797 if (!nn->sessionid_hashtbl) 8798 goto err_sessionid; 8799 8800 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8801 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]); 8802 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]); 8803 } 8804 for (i = 0; i < SESSION_HASH_SIZE; i++) 8805 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]); 8806 nn->conf_name_tree = RB_ROOT; 8807 nn->unconf_name_tree = RB_ROOT; 8808 nn->boot_time = ktime_get_real_seconds(); 8809 nn->grace_ended = false; 8810 nn->nfsd4_manager.block_opens = true; 8811 INIT_LIST_HEAD(&nn->nfsd4_manager.list); 8812 INIT_LIST_HEAD(&nn->client_lru); 8813 INIT_LIST_HEAD(&nn->close_lru); 8814 INIT_LIST_HEAD(&nn->del_recall_lru); 8815 spin_lock_init(&nn->client_lock); 8816 spin_lock_init(&nn->s2s_cp_lock); 8817 idr_init(&nn->s2s_cp_stateids); 8818 atomic_set(&nn->pending_async_copies, 0); 8819 8820 spin_lock_init(&nn->blocked_locks_lock); 8821 INIT_LIST_HEAD(&nn->blocked_locks_lru); 8822 8823 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); 8824 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker); 8825 get_net(net); 8826 8827 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client"); 8828 if (!nn->nfsd_client_shrinker) 8829 goto err_shrinker; 8830 8831 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan; 8832 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count; 8833 nn->nfsd_client_shrinker->private_data = nn; 8834 8835 shrinker_register(nn->nfsd_client_shrinker); 8836 8837 return 0; 8838 8839 err_shrinker: 8840 put_net(net); 8841 kfree(nn->sessionid_hashtbl); 8842 err_sessionid: 8843 kfree(nn->unconf_id_hashtbl); 8844 err_unconf_id: 8845 kfree(nn->conf_id_hashtbl); 8846 err: 8847 return -ENOMEM; 8848 } 8849 8850 static void 8851 nfs4_state_destroy_net(struct net *net) 8852 { 8853 int i; 8854 struct nfs4_client *clp = NULL; 8855 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8856 8857 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8858 while (!list_empty(&nn->conf_id_hashtbl[i])) { 8859 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 8860 destroy_client(clp); 8861 } 8862 } 8863 8864 WARN_ON(!list_empty(&nn->blocked_locks_lru)); 8865 8866 for (i = 0; i < CLIENT_HASH_SIZE; i++) { 8867 while (!list_empty(&nn->unconf_id_hashtbl[i])) { 8868 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash); 8869 destroy_client(clp); 8870 } 8871 } 8872 8873 kfree(nn->sessionid_hashtbl); 8874 kfree(nn->unconf_id_hashtbl); 8875 kfree(nn->conf_id_hashtbl); 8876 put_net(net); 8877 } 8878 8879 int 8880 nfs4_state_start_net(struct net *net) 8881 { 8882 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8883 int ret; 8884 8885 ret = nfs4_state_create_net(net); 8886 if (ret) 8887 return ret; 8888 locks_start_grace(net, &nn->nfsd4_manager); 8889 nfsd4_client_tracking_init(net); 8890 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0) 8891 goto skip_grace; 8892 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n", 8893 nn->nfsd4_grace, net->ns.inum); 8894 trace_nfsd_grace_start(nn); 8895 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); 8896 return 0; 8897 8898 skip_grace: 8899 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n", 8900 net->ns.inum); 8901 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ); 8902 nfsd4_end_grace(nn); 8903 return 0; 8904 } 8905 8906 /* initialization to perform when the nfsd service is started: */ 8907 int 8908 nfs4_state_start(void) 8909 { 8910 int ret; 8911 8912 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params); 8913 if (ret) 8914 return ret; 8915 8916 nfsd_slot_shrinker = shrinker_alloc(0, "nfsd-DRC-slot"); 8917 if (!nfsd_slot_shrinker) { 8918 rhltable_destroy(&nfs4_file_rhltable); 8919 return -ENOMEM; 8920 } 8921 nfsd_slot_shrinker->count_objects = nfsd_slot_count; 8922 nfsd_slot_shrinker->scan_objects = nfsd_slot_scan; 8923 shrinker_register(nfsd_slot_shrinker); 8924 8925 set_max_delegations(); 8926 return 0; 8927 } 8928 8929 void 8930 nfs4_state_shutdown_net(struct net *net) 8931 { 8932 struct nfs4_delegation *dp = NULL; 8933 struct list_head *pos, *next, reaplist; 8934 struct nfsd_net *nn = net_generic(net, nfsd_net_id); 8935 8936 shrinker_free(nn->nfsd_client_shrinker); 8937 cancel_work_sync(&nn->nfsd_shrinker_work); 8938 cancel_delayed_work_sync(&nn->laundromat_work); 8939 locks_end_grace(&nn->nfsd4_manager); 8940 8941 INIT_LIST_HEAD(&reaplist); 8942 spin_lock(&state_lock); 8943 list_for_each_safe(pos, next, &nn->del_recall_lru) { 8944 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 8945 unhash_delegation_locked(dp, SC_STATUS_CLOSED); 8946 list_add(&dp->dl_recall_lru, &reaplist); 8947 } 8948 spin_unlock(&state_lock); 8949 list_for_each_safe(pos, next, &reaplist) { 8950 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); 8951 list_del_init(&dp->dl_recall_lru); 8952 destroy_unhashed_deleg(dp); 8953 } 8954 8955 nfsd4_client_tracking_exit(net); 8956 nfs4_state_destroy_net(net); 8957 #ifdef CONFIG_NFSD_V4_2_INTER_SSC 8958 nfsd4_ssc_shutdown_umount(nn); 8959 #endif 8960 } 8961 8962 void 8963 nfs4_state_shutdown(void) 8964 { 8965 rhltable_destroy(&nfs4_file_rhltable); 8966 shrinker_free(nfsd_slot_shrinker); 8967 } 8968 8969 static void 8970 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 8971 { 8972 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) && 8973 CURRENT_STATEID(stateid)) 8974 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t)); 8975 } 8976 8977 static void 8978 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid) 8979 { 8980 if (cstate->minorversion) { 8981 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t)); 8982 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 8983 } 8984 } 8985 8986 void 8987 clear_current_stateid(struct nfsd4_compound_state *cstate) 8988 { 8989 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG); 8990 } 8991 8992 /* 8993 * functions to set current state id 8994 */ 8995 void 8996 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, 8997 union nfsd4_op_u *u) 8998 { 8999 put_stateid(cstate, &u->open_downgrade.od_stateid); 9000 } 9001 9002 void 9003 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, 9004 union nfsd4_op_u *u) 9005 { 9006 put_stateid(cstate, &u->open.op_stateid); 9007 } 9008 9009 void 9010 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, 9011 union nfsd4_op_u *u) 9012 { 9013 put_stateid(cstate, &u->close.cl_stateid); 9014 } 9015 9016 void 9017 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, 9018 union nfsd4_op_u *u) 9019 { 9020 put_stateid(cstate, &u->lock.lk_resp_stateid); 9021 } 9022 9023 /* 9024 * functions to consume current state id 9025 */ 9026 9027 void 9028 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, 9029 union nfsd4_op_u *u) 9030 { 9031 get_stateid(cstate, &u->open_downgrade.od_stateid); 9032 } 9033 9034 void 9035 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, 9036 union nfsd4_op_u *u) 9037 { 9038 get_stateid(cstate, &u->delegreturn.dr_stateid); 9039 } 9040 9041 void 9042 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, 9043 union nfsd4_op_u *u) 9044 { 9045 get_stateid(cstate, &u->free_stateid.fr_stateid); 9046 } 9047 9048 void 9049 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, 9050 union nfsd4_op_u *u) 9051 { 9052 get_stateid(cstate, &u->setattr.sa_stateid); 9053 } 9054 9055 void 9056 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, 9057 union nfsd4_op_u *u) 9058 { 9059 get_stateid(cstate, &u->close.cl_stateid); 9060 } 9061 9062 void 9063 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, 9064 union nfsd4_op_u *u) 9065 { 9066 get_stateid(cstate, &u->locku.lu_stateid); 9067 } 9068 9069 void 9070 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, 9071 union nfsd4_op_u *u) 9072 { 9073 get_stateid(cstate, &u->read.rd_stateid); 9074 } 9075 9076 void 9077 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, 9078 union nfsd4_op_u *u) 9079 { 9080 get_stateid(cstate, &u->write.wr_stateid); 9081 } 9082 9083 /** 9084 * set_cb_time - vet and set the timespec for a cb_getattr update 9085 * @cb: timestamp from the CB_GETATTR response 9086 * @orig: original timestamp in the inode 9087 * @now: current time 9088 * 9089 * Given a timestamp in a CB_GETATTR response, check it against the 9090 * current timestamp in the inode and the current time. Returns true 9091 * if the inode's timestamp needs to be updated, and false otherwise. 9092 * @cb may also be changed if the timestamp needs to be clamped. 9093 */ 9094 static bool set_cb_time(struct timespec64 *cb, const struct timespec64 *orig, 9095 const struct timespec64 *now) 9096 { 9097 9098 /* 9099 * "When the time presented is before the original time, then the 9100 * update is ignored." Also no need to update if there is no change. 9101 */ 9102 if (timespec64_compare(cb, orig) <= 0) 9103 return false; 9104 9105 /* 9106 * "When the time presented is in the future, the server can either 9107 * clamp the new time to the current time, or it may 9108 * return NFS4ERR_DELAY to the client, allowing it to retry." 9109 */ 9110 if (timespec64_compare(cb, now) > 0) { 9111 /* clamp it */ 9112 *cb = *now; 9113 } 9114 9115 return true; 9116 } 9117 9118 static int cb_getattr_update_times(struct dentry *dentry, struct nfs4_delegation *dp) 9119 { 9120 struct inode *inode = d_inode(dentry); 9121 struct timespec64 now = current_time(inode); 9122 struct nfs4_cb_fattr *ncf = &dp->dl_cb_fattr; 9123 struct iattr attrs = { }; 9124 int ret; 9125 9126 if (deleg_attrs_deleg(dp->dl_type)) { 9127 struct timespec64 atime = inode_get_atime(inode); 9128 struct timespec64 mtime = inode_get_mtime(inode); 9129 9130 attrs.ia_atime = ncf->ncf_cb_atime; 9131 attrs.ia_mtime = ncf->ncf_cb_mtime; 9132 9133 if (set_cb_time(&attrs.ia_atime, &atime, &now)) 9134 attrs.ia_valid |= ATTR_ATIME | ATTR_ATIME_SET; 9135 9136 if (set_cb_time(&attrs.ia_mtime, &mtime, &now)) { 9137 attrs.ia_valid |= ATTR_CTIME | ATTR_MTIME | ATTR_MTIME_SET; 9138 attrs.ia_ctime = attrs.ia_mtime; 9139 } 9140 } else { 9141 attrs.ia_valid |= ATTR_MTIME | ATTR_CTIME; 9142 attrs.ia_mtime = attrs.ia_ctime = now; 9143 } 9144 9145 if (!attrs.ia_valid) 9146 return 0; 9147 9148 attrs.ia_valid |= ATTR_DELEG; 9149 inode_lock(inode); 9150 ret = notify_change(&nop_mnt_idmap, dentry, &attrs, NULL); 9151 inode_unlock(inode); 9152 return ret; 9153 } 9154 9155 /** 9156 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict 9157 * @rqstp: RPC transaction context 9158 * @dentry: dentry of inode to be checked for a conflict 9159 * @pdp: returned WRITE delegation, if one was found 9160 * 9161 * This function is called when there is a conflict between a write 9162 * delegation and a change/size GETATTR from another client. The server 9163 * must either use the CB_GETATTR to get the current values of the 9164 * attributes from the client that holds the delegation or recall the 9165 * delegation before replying to the GETATTR. See RFC 8881 section 9166 * 18.7.4. 9167 * 9168 * Returns 0 if there is no conflict; otherwise an nfs_stat 9169 * code is returned. If @pdp is set to a non-NULL value, then the 9170 * caller must put the reference. 9171 */ 9172 __be32 9173 nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry, 9174 struct nfs4_delegation **pdp) 9175 { 9176 __be32 status; 9177 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); 9178 struct file_lock_context *ctx; 9179 struct nfs4_delegation *dp = NULL; 9180 struct file_lease *fl; 9181 struct nfs4_cb_fattr *ncf; 9182 struct inode *inode = d_inode(dentry); 9183 9184 ctx = locks_inode_context(inode); 9185 if (!ctx) 9186 return nfs_ok; 9187 9188 #define NON_NFSD_LEASE ((void *)1) 9189 9190 spin_lock(&ctx->flc_lock); 9191 for_each_file_lock(fl, &ctx->flc_lease) { 9192 if (fl->c.flc_flags == FL_LAYOUT) 9193 continue; 9194 if (fl->c.flc_type == F_WRLCK) { 9195 if (fl->fl_lmops == &nfsd_lease_mng_ops) 9196 dp = fl->c.flc_owner; 9197 else 9198 dp = NON_NFSD_LEASE; 9199 } 9200 break; 9201 } 9202 if (dp == NULL || dp == NON_NFSD_LEASE || 9203 dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) { 9204 spin_unlock(&ctx->flc_lock); 9205 if (dp == NON_NFSD_LEASE) { 9206 status = nfserrno(nfsd_open_break_lease(inode, 9207 NFSD_MAY_READ)); 9208 if (status != nfserr_jukebox || 9209 !nfsd_wait_for_delegreturn(rqstp, inode)) 9210 return status; 9211 } 9212 return 0; 9213 } 9214 9215 nfsd_stats_wdeleg_getattr_inc(nn); 9216 refcount_inc(&dp->dl_stid.sc_count); 9217 ncf = &dp->dl_cb_fattr; 9218 nfs4_cb_getattr(&dp->dl_cb_fattr); 9219 spin_unlock(&ctx->flc_lock); 9220 9221 wait_on_bit_timeout(&ncf->ncf_getattr.cb_flags, NFSD4_CALLBACK_RUNNING, 9222 TASK_UNINTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT); 9223 if (ncf->ncf_cb_status) { 9224 /* Recall delegation only if client didn't respond */ 9225 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ)); 9226 if (status != nfserr_jukebox || 9227 !nfsd_wait_for_delegreturn(rqstp, inode)) 9228 goto out_status; 9229 } 9230 if (!ncf->ncf_file_modified && 9231 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change || 9232 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize)) 9233 ncf->ncf_file_modified = true; 9234 if (ncf->ncf_file_modified) { 9235 int err; 9236 9237 /* 9238 * Per section 10.4.3 of RFC 8881, the server would 9239 * not update the file's metadata with the client's 9240 * modified size 9241 */ 9242 err = cb_getattr_update_times(dentry, dp); 9243 if (err) { 9244 status = nfserrno(err); 9245 goto out_status; 9246 } 9247 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize; 9248 *pdp = dp; 9249 return nfs_ok; 9250 } 9251 status = nfs_ok; 9252 out_status: 9253 nfs4_put_stid(&dp->dl_stid); 9254 return status; 9255 } 9256