1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS cell and server record management
3  *
4  * Copyright (C) 2002, 2017 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/slab.h>
9 #include <linux/key.h>
10 #include <linux/ctype.h>
11 #include <linux/dns_resolver.h>
12 #include <linux/sched.h>
13 #include <linux/inet.h>
14 #include <linux/namei.h>
15 #include <keys/rxrpc-type.h>
16 #include "internal.h"
17 
18 static unsigned __read_mostly afs_cell_gc_delay = 10;
19 static unsigned __read_mostly afs_cell_min_ttl = 10 * 60;
20 static unsigned __read_mostly afs_cell_max_ttl = 24 * 60 * 60;
21 static atomic_t cell_debug_id;
22 
23 static void afs_cell_timer(struct timer_list *timer);
24 static void afs_destroy_cell_work(struct work_struct *work);
25 static void afs_manage_cell_work(struct work_struct *work);
26 
27 static void afs_dec_cells_outstanding(struct afs_net *net)
28 {
29 	if (atomic_dec_and_test(&net->cells_outstanding))
30 		wake_up_var(&net->cells_outstanding);
31 }
32 
33 static void afs_set_cell_state(struct afs_cell *cell, enum afs_cell_state state)
34 {
35 	smp_store_release(&cell->state, state); /* Commit cell changes before state */
36 	smp_wmb(); /* Set cell state before task state */
37 	wake_up_var(&cell->state);
38 }
39 
40 /*
41  * Look up and get an activation reference on a cell record.  The caller must
42  * hold net->cells_lock at least read-locked.
43  */
44 static struct afs_cell *afs_find_cell_locked(struct afs_net *net,
45 					     const char *name, unsigned int namesz,
46 					     enum afs_cell_trace reason)
47 {
48 	struct afs_cell *cell = NULL;
49 	struct rb_node *p;
50 	int n;
51 
52 	_enter("%*.*s", namesz, namesz, name);
53 
54 	if (name && namesz == 0)
55 		return ERR_PTR(-EINVAL);
56 	if (namesz > AFS_MAXCELLNAME)
57 		return ERR_PTR(-ENAMETOOLONG);
58 
59 	if (!name) {
60 		cell = rcu_dereference_protected(net->ws_cell,
61 						 lockdep_is_held(&net->cells_lock));
62 		if (!cell)
63 			return ERR_PTR(-EDESTADDRREQ);
64 		goto found;
65 	}
66 
67 	p = net->cells.rb_node;
68 	while (p) {
69 		cell = rb_entry(p, struct afs_cell, net_node);
70 
71 		n = strncasecmp(cell->name, name,
72 				min_t(size_t, cell->name_len, namesz));
73 		if (n == 0)
74 			n = cell->name_len - namesz;
75 		if (n < 0)
76 			p = p->rb_left;
77 		else if (n > 0)
78 			p = p->rb_right;
79 		else
80 			goto found;
81 	}
82 
83 	return ERR_PTR(-ENOENT);
84 
85 found:
86 	return afs_use_cell(cell, reason);
87 }
88 
89 /*
90  * Look up and get an activation reference on a cell record.
91  */
92 struct afs_cell *afs_find_cell(struct afs_net *net,
93 			       const char *name, unsigned int namesz,
94 			       enum afs_cell_trace reason)
95 {
96 	struct afs_cell *cell;
97 
98 	down_read(&net->cells_lock);
99 	cell = afs_find_cell_locked(net, name, namesz, reason);
100 	up_read(&net->cells_lock);
101 	return cell;
102 }
103 
104 /*
105  * Set up a cell record and fill in its name, VL server address list and
106  * allocate an anonymous key
107  */
108 static struct afs_cell *afs_alloc_cell(struct afs_net *net,
109 				       const char *name, unsigned int namelen,
110 				       const char *addresses)
111 {
112 	struct afs_vlserver_list *vllist = NULL;
113 	struct afs_cell *cell;
114 	int i, ret;
115 
116 	ASSERT(name);
117 	if (namelen == 0)
118 		return ERR_PTR(-EINVAL);
119 	if (namelen > AFS_MAXCELLNAME) {
120 		_leave(" = -ENAMETOOLONG");
121 		return ERR_PTR(-ENAMETOOLONG);
122 	}
123 
124 	/* Prohibit cell names that contain unprintable chars, '/' and '@' or
125 	 * that begin with a dot.  This also precludes "@cell".
126 	 */
127 	if (name[0] == '.')
128 		return ERR_PTR(-EINVAL);
129 	for (i = 0; i < namelen; i++) {
130 		char ch = name[i];
131 		if (!isprint(ch) || ch == '/' || ch == '@')
132 			return ERR_PTR(-EINVAL);
133 	}
134 
135 	_enter("%*.*s,%s", namelen, namelen, name, addresses);
136 
137 	cell = kzalloc(sizeof(struct afs_cell), GFP_KERNEL);
138 	if (!cell) {
139 		_leave(" = -ENOMEM");
140 		return ERR_PTR(-ENOMEM);
141 	}
142 
143 	cell->name = kmalloc(1 + namelen + 1, GFP_KERNEL);
144 	if (!cell->name) {
145 		kfree(cell);
146 		return ERR_PTR(-ENOMEM);
147 	}
148 
149 	cell->name[0] = '.';
150 	cell->name++;
151 	cell->name_len = namelen;
152 	for (i = 0; i < namelen; i++)
153 		cell->name[i] = tolower(name[i]);
154 	cell->name[i] = 0;
155 
156 	cell->net = net;
157 	refcount_set(&cell->ref, 1);
158 	atomic_set(&cell->active, 0);
159 	INIT_WORK(&cell->destroyer, afs_destroy_cell_work);
160 	INIT_WORK(&cell->manager, afs_manage_cell_work);
161 	timer_setup(&cell->management_timer, afs_cell_timer, 0);
162 	init_rwsem(&cell->vs_lock);
163 	cell->volumes = RB_ROOT;
164 	INIT_HLIST_HEAD(&cell->proc_volumes);
165 	seqlock_init(&cell->volume_lock);
166 	cell->fs_servers = RB_ROOT;
167 	init_rwsem(&cell->fs_lock);
168 	rwlock_init(&cell->vl_servers_lock);
169 	cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
170 
171 	/* Provide a VL server list, filling it in if we were given a list of
172 	 * addresses to use.
173 	 */
174 	if (addresses) {
175 		vllist = afs_parse_text_addrs(net,
176 					      addresses, strlen(addresses), ':',
177 					      VL_SERVICE, AFS_VL_PORT);
178 		if (IS_ERR(vllist)) {
179 			ret = PTR_ERR(vllist);
180 			goto parse_failed;
181 		}
182 
183 		vllist->source = DNS_RECORD_FROM_CONFIG;
184 		vllist->status = DNS_LOOKUP_NOT_DONE;
185 		cell->dns_expiry = TIME64_MAX;
186 	} else {
187 		ret = -ENOMEM;
188 		vllist = afs_alloc_vlserver_list(0);
189 		if (!vllist)
190 			goto error;
191 		vllist->source = DNS_RECORD_UNAVAILABLE;
192 		vllist->status = DNS_LOOKUP_NOT_DONE;
193 		cell->dns_expiry = ktime_get_real_seconds();
194 	}
195 
196 	rcu_assign_pointer(cell->vl_servers, vllist);
197 
198 	cell->dns_source = vllist->source;
199 	cell->dns_status = vllist->status;
200 	smp_store_release(&cell->dns_lookup_count, 1); /* vs source/status */
201 	atomic_inc(&net->cells_outstanding);
202 	ret = idr_alloc_cyclic(&net->cells_dyn_ino, cell,
203 			       2, INT_MAX / 2, GFP_KERNEL);
204 	if (ret < 0)
205 		goto error;
206 	cell->dynroot_ino = ret;
207 	cell->debug_id = atomic_inc_return(&cell_debug_id);
208 
209 	trace_afs_cell(cell->debug_id, 1, 0, afs_cell_trace_alloc);
210 
211 	_leave(" = %p", cell);
212 	return cell;
213 
214 parse_failed:
215 	if (ret == -EINVAL)
216 		printk(KERN_ERR "kAFS: bad VL server IP address\n");
217 error:
218 	afs_put_vlserverlist(cell->net, vllist);
219 	kfree(cell->name - 1);
220 	kfree(cell);
221 	_leave(" = %d", ret);
222 	return ERR_PTR(ret);
223 }
224 
225 /*
226  * afs_lookup_cell - Look up or create a cell record.
227  * @net:	The network namespace
228  * @name:	The name of the cell.
229  * @namesz:	The strlen of the cell name.
230  * @vllist:	A colon/comma separated list of numeric IP addresses or NULL.
231  * @excl:	T if an error should be given if the cell name already exists.
232  * @trace:	The reason to be logged if the lookup is successful.
233  *
234  * Look up a cell record by name and query the DNS for VL server addresses if
235  * needed.  Note that that actual DNS query is punted off to the manager thread
236  * so that this function can return immediately if interrupted whilst allowing
237  * cell records to be shared even if not yet fully constructed.
238  */
239 struct afs_cell *afs_lookup_cell(struct afs_net *net,
240 				 const char *name, unsigned int namesz,
241 				 const char *vllist, bool excl,
242 				 enum afs_cell_trace trace)
243 {
244 	struct afs_cell *cell, *candidate, *cursor;
245 	struct rb_node *parent, **pp;
246 	enum afs_cell_state state;
247 	int ret, n;
248 
249 	_enter("%s,%s", name, vllist);
250 
251 	if (!excl) {
252 		cell = afs_find_cell(net, name, namesz, trace);
253 		if (!IS_ERR(cell))
254 			goto wait_for_cell;
255 	}
256 
257 	/* Assume we're probably going to create a cell and preallocate and
258 	 * mostly set up a candidate record.  We can then use this to stash the
259 	 * name, the net namespace and VL server addresses.
260 	 *
261 	 * We also want to do this before we hold any locks as it may involve
262 	 * upcalling to userspace to make DNS queries.
263 	 */
264 	candidate = afs_alloc_cell(net, name, namesz, vllist);
265 	if (IS_ERR(candidate)) {
266 		_leave(" = %ld", PTR_ERR(candidate));
267 		return candidate;
268 	}
269 
270 	/* Find the insertion point and check to see if someone else added a
271 	 * cell whilst we were allocating.
272 	 */
273 	down_write(&net->cells_lock);
274 
275 	pp = &net->cells.rb_node;
276 	parent = NULL;
277 	while (*pp) {
278 		parent = *pp;
279 		cursor = rb_entry(parent, struct afs_cell, net_node);
280 
281 		n = strncasecmp(cursor->name, name,
282 				min_t(size_t, cursor->name_len, namesz));
283 		if (n == 0)
284 			n = cursor->name_len - namesz;
285 		if (n < 0)
286 			pp = &(*pp)->rb_left;
287 		else if (n > 0)
288 			pp = &(*pp)->rb_right;
289 		else
290 			goto cell_already_exists;
291 	}
292 
293 	cell = candidate;
294 	candidate = NULL;
295 	afs_use_cell(cell, trace);
296 	rb_link_node_rcu(&cell->net_node, parent, pp);
297 	rb_insert_color(&cell->net_node, &net->cells);
298 	up_write(&net->cells_lock);
299 
300 	afs_queue_cell(cell, afs_cell_trace_queue_new);
301 
302 wait_for_cell:
303 	_debug("wait_for_cell");
304 	state = smp_load_acquire(&cell->state); /* vs error */
305 	if (state != AFS_CELL_ACTIVE &&
306 	    state != AFS_CELL_DEAD) {
307 		afs_see_cell(cell, afs_cell_trace_wait);
308 		wait_var_event(&cell->state,
309 			       ({
310 				       state = smp_load_acquire(&cell->state); /* vs error */
311 				       state == AFS_CELL_ACTIVE || state == AFS_CELL_DEAD;
312 			       }));
313 	}
314 
315 	/* Check the state obtained from the wait check. */
316 	if (state == AFS_CELL_DEAD) {
317 		ret = cell->error;
318 		goto error;
319 	}
320 
321 	_leave(" = %p [cell]", cell);
322 	return cell;
323 
324 cell_already_exists:
325 	_debug("cell exists");
326 	cell = cursor;
327 	if (excl) {
328 		ret = -EEXIST;
329 	} else {
330 		afs_use_cell(cursor, trace);
331 		ret = 0;
332 	}
333 	up_write(&net->cells_lock);
334 	if (candidate)
335 		afs_put_cell(candidate, afs_cell_trace_put_candidate);
336 	if (ret == 0)
337 		goto wait_for_cell;
338 	goto error_noput;
339 error:
340 	afs_unuse_cell(cell, afs_cell_trace_unuse_lookup_error);
341 error_noput:
342 	_leave(" = %d [error]", ret);
343 	return ERR_PTR(ret);
344 }
345 
346 /*
347  * set the root cell information
348  * - can be called with a module parameter string
349  * - can be called from a write to /proc/fs/afs/rootcell
350  */
351 int afs_cell_init(struct afs_net *net, const char *rootcell)
352 {
353 	struct afs_cell *old_root, *new_root;
354 	const char *cp, *vllist;
355 	size_t len;
356 
357 	_enter("");
358 
359 	if (!rootcell) {
360 		/* module is loaded with no parameters, or built statically.
361 		 * - in the future we might initialize cell DB here.
362 		 */
363 		_leave(" = 0 [no root]");
364 		return 0;
365 	}
366 
367 	cp = strchr(rootcell, ':');
368 	if (!cp) {
369 		_debug("kAFS: no VL server IP addresses specified");
370 		vllist = NULL;
371 		len = strlen(rootcell);
372 	} else {
373 		vllist = cp + 1;
374 		len = cp - rootcell;
375 	}
376 
377 	if (len == 0 || !rootcell[0] || rootcell[0] == '.' || rootcell[len - 1] == '.')
378 		return -EINVAL;
379 	if (memchr(rootcell, '/', len))
380 		return -EINVAL;
381 	cp = strstr(rootcell, "..");
382 	if (cp && cp < rootcell + len)
383 		return -EINVAL;
384 
385 	/* allocate a cell record for the root/workstation cell */
386 	new_root = afs_lookup_cell(net, rootcell, len, vllist, false,
387 				   afs_cell_trace_use_lookup_ws);
388 	if (IS_ERR(new_root)) {
389 		_leave(" = %ld", PTR_ERR(new_root));
390 		return PTR_ERR(new_root);
391 	}
392 
393 	if (!test_and_set_bit(AFS_CELL_FL_NO_GC, &new_root->flags))
394 		afs_use_cell(new_root, afs_cell_trace_use_pin);
395 
396 	/* install the new cell */
397 	down_write(&net->cells_lock);
398 	old_root = rcu_replace_pointer(net->ws_cell, new_root,
399 				       lockdep_is_held(&net->cells_lock));
400 	up_write(&net->cells_lock);
401 
402 	afs_unuse_cell(old_root, afs_cell_trace_unuse_ws);
403 	_leave(" = 0");
404 	return 0;
405 }
406 
407 /*
408  * Update a cell's VL server address list from the DNS.
409  */
410 static int afs_update_cell(struct afs_cell *cell)
411 {
412 	struct afs_vlserver_list *vllist, *old = NULL, *p;
413 	unsigned int min_ttl = READ_ONCE(afs_cell_min_ttl);
414 	unsigned int max_ttl = READ_ONCE(afs_cell_max_ttl);
415 	time64_t now, expiry = 0;
416 	int ret = 0;
417 
418 	_enter("%s", cell->name);
419 
420 	vllist = afs_dns_query(cell, &expiry);
421 	if (IS_ERR(vllist)) {
422 		ret = PTR_ERR(vllist);
423 
424 		_debug("%s: fail %d", cell->name, ret);
425 		if (ret == -ENOMEM)
426 			goto out_wake;
427 
428 		vllist = afs_alloc_vlserver_list(0);
429 		if (!vllist) {
430 			if (ret >= 0)
431 				ret = -ENOMEM;
432 			goto out_wake;
433 		}
434 
435 		switch (ret) {
436 		case -ENODATA:
437 		case -EDESTADDRREQ:
438 			vllist->status = DNS_LOOKUP_GOT_NOT_FOUND;
439 			break;
440 		case -EAGAIN:
441 		case -ECONNREFUSED:
442 			vllist->status = DNS_LOOKUP_GOT_TEMP_FAILURE;
443 			break;
444 		default:
445 			vllist->status = DNS_LOOKUP_GOT_LOCAL_FAILURE;
446 			break;
447 		}
448 	}
449 
450 	_debug("%s: got list %d %d", cell->name, vllist->source, vllist->status);
451 	cell->dns_status = vllist->status;
452 
453 	now = ktime_get_real_seconds();
454 	if (min_ttl > max_ttl)
455 		max_ttl = min_ttl;
456 	if (expiry < now + min_ttl)
457 		expiry = now + min_ttl;
458 	else if (expiry > now + max_ttl)
459 		expiry = now + max_ttl;
460 
461 	_debug("%s: status %d", cell->name, vllist->status);
462 	if (vllist->source == DNS_RECORD_UNAVAILABLE) {
463 		switch (vllist->status) {
464 		case DNS_LOOKUP_GOT_NOT_FOUND:
465 			/* The DNS said that the cell does not exist or there
466 			 * weren't any addresses to be had.
467 			 */
468 			cell->dns_expiry = expiry;
469 			break;
470 
471 		case DNS_LOOKUP_BAD:
472 		case DNS_LOOKUP_GOT_LOCAL_FAILURE:
473 		case DNS_LOOKUP_GOT_TEMP_FAILURE:
474 		case DNS_LOOKUP_GOT_NS_FAILURE:
475 		default:
476 			cell->dns_expiry = now + 10;
477 			break;
478 		}
479 	} else {
480 		cell->dns_expiry = expiry;
481 	}
482 
483 	/* Replace the VL server list if the new record has servers or the old
484 	 * record doesn't.
485 	 */
486 	write_lock(&cell->vl_servers_lock);
487 	p = rcu_dereference_protected(cell->vl_servers, true);
488 	if (vllist->nr_servers > 0 || p->nr_servers == 0) {
489 		rcu_assign_pointer(cell->vl_servers, vllist);
490 		cell->dns_source = vllist->source;
491 		old = p;
492 	}
493 	write_unlock(&cell->vl_servers_lock);
494 	afs_put_vlserverlist(cell->net, old);
495 
496 out_wake:
497 	smp_store_release(&cell->dns_lookup_count,
498 			  cell->dns_lookup_count + 1); /* vs source/status */
499 	wake_up_var(&cell->dns_lookup_count);
500 	_leave(" = %d", ret);
501 	return ret;
502 }
503 
504 /*
505  * Destroy a cell record
506  */
507 static void afs_cell_destroy(struct rcu_head *rcu)
508 {
509 	struct afs_cell *cell = container_of(rcu, struct afs_cell, rcu);
510 	struct afs_net *net = cell->net;
511 	int r;
512 
513 	_enter("%p{%s}", cell, cell->name);
514 
515 	r = refcount_read(&cell->ref);
516 	ASSERTCMP(r, ==, 0);
517 	trace_afs_cell(cell->debug_id, r, atomic_read(&cell->active), afs_cell_trace_free);
518 
519 	afs_put_vlserverlist(net, rcu_access_pointer(cell->vl_servers));
520 	afs_unuse_cell(cell->alias_of, afs_cell_trace_unuse_alias);
521 	key_put(cell->anonymous_key);
522 	idr_remove(&net->cells_dyn_ino, cell->dynroot_ino);
523 	kfree(cell->name - 1);
524 	kfree(cell);
525 
526 	afs_dec_cells_outstanding(net);
527 	_leave(" [destroyed]");
528 }
529 
530 static void afs_destroy_cell_work(struct work_struct *work)
531 {
532 	struct afs_cell *cell = container_of(work, struct afs_cell, destroyer);
533 
534 	afs_see_cell(cell, afs_cell_trace_destroy);
535 	timer_delete_sync(&cell->management_timer);
536 	cancel_work_sync(&cell->manager);
537 	call_rcu(&cell->rcu, afs_cell_destroy);
538 }
539 
540 /*
541  * Get a reference on a cell record.
542  */
543 struct afs_cell *afs_get_cell(struct afs_cell *cell, enum afs_cell_trace reason)
544 {
545 	int r;
546 
547 	__refcount_inc(&cell->ref, &r);
548 	trace_afs_cell(cell->debug_id, r + 1, atomic_read(&cell->active), reason);
549 	return cell;
550 }
551 
552 /*
553  * Drop a reference on a cell record.
554  */
555 void afs_put_cell(struct afs_cell *cell, enum afs_cell_trace reason)
556 {
557 	if (cell) {
558 		unsigned int debug_id = cell->debug_id;
559 		unsigned int a;
560 		bool zero;
561 		int r;
562 
563 		a = atomic_read(&cell->active);
564 		zero = __refcount_dec_and_test(&cell->ref, &r);
565 		trace_afs_cell(debug_id, r - 1, a, reason);
566 		if (zero) {
567 			a = atomic_read(&cell->active);
568 			WARN(a != 0, "Cell active count %u > 0\n", a);
569 			WARN_ON(!queue_work(afs_wq, &cell->destroyer));
570 		}
571 	}
572 }
573 
574 /*
575  * Note a cell becoming more active.
576  */
577 struct afs_cell *afs_use_cell(struct afs_cell *cell, enum afs_cell_trace reason)
578 {
579 	int r, a;
580 
581 	__refcount_inc(&cell->ref, &r);
582 	a = atomic_inc_return(&cell->active);
583 	trace_afs_cell(cell->debug_id, r + 1, a, reason);
584 	return cell;
585 }
586 
587 /*
588  * Record a cell becoming less active.  When the active counter reaches 1, it
589  * is scheduled for destruction, but may get reactivated.
590  */
591 void afs_unuse_cell(struct afs_cell *cell, enum afs_cell_trace reason)
592 {
593 	unsigned int debug_id;
594 	time64_t now, expire_delay;
595 	bool zero;
596 	int r, a;
597 
598 	if (!cell)
599 		return;
600 
601 	_enter("%s", cell->name);
602 
603 	now = ktime_get_real_seconds();
604 	cell->last_inactive = now;
605 	expire_delay = 0;
606 	if (cell->vl_servers->nr_servers)
607 		expire_delay = afs_cell_gc_delay;
608 
609 	debug_id = cell->debug_id;
610 	a = atomic_dec_return(&cell->active);
611 	if (!a)
612 		/* 'cell' may now be garbage collected. */
613 		afs_set_cell_timer(cell, expire_delay);
614 
615 	zero = __refcount_dec_and_test(&cell->ref, &r);
616 	trace_afs_cell(debug_id, r - 1, a, reason);
617 	if (zero)
618 		WARN_ON(!queue_work(afs_wq, &cell->destroyer));
619 }
620 
621 /*
622  * Note that a cell has been seen.
623  */
624 void afs_see_cell(struct afs_cell *cell, enum afs_cell_trace reason)
625 {
626 	int r, a;
627 
628 	r = refcount_read(&cell->ref);
629 	a = atomic_read(&cell->active);
630 	trace_afs_cell(cell->debug_id, r, a, reason);
631 }
632 
633 /*
634  * Queue a cell for management, giving the workqueue a ref to hold.
635  */
636 void afs_queue_cell(struct afs_cell *cell, enum afs_cell_trace reason)
637 {
638 	queue_work(afs_wq, &cell->manager);
639 }
640 
641 /*
642  * Cell-specific management timer.
643  */
644 static void afs_cell_timer(struct timer_list *timer)
645 {
646 	struct afs_cell *cell = container_of(timer, struct afs_cell, management_timer);
647 
648 	afs_see_cell(cell, afs_cell_trace_see_mgmt_timer);
649 	if (refcount_read(&cell->ref) > 0 && cell->net->live)
650 		queue_work(afs_wq, &cell->manager);
651 }
652 
653 /*
654  * Set/reduce the cell timer.
655  */
656 void afs_set_cell_timer(struct afs_cell *cell, unsigned int delay_secs)
657 {
658 	timer_reduce(&cell->management_timer, jiffies + delay_secs * HZ);
659 }
660 
661 /*
662  * Allocate a key to use as a placeholder for anonymous user security.
663  */
664 static int afs_alloc_anon_key(struct afs_cell *cell)
665 {
666 	struct key *key;
667 	char keyname[4 + AFS_MAXCELLNAME + 1], *cp, *dp;
668 
669 	/* Create a key to represent an anonymous user. */
670 	memcpy(keyname, "afs@", 4);
671 	dp = keyname + 4;
672 	cp = cell->name;
673 	do {
674 		*dp++ = tolower(*cp);
675 	} while (*cp++);
676 
677 	key = rxrpc_get_null_key(keyname);
678 	if (IS_ERR(key))
679 		return PTR_ERR(key);
680 
681 	cell->anonymous_key = key;
682 
683 	_debug("anon key %p{%x}",
684 	       cell->anonymous_key, key_serial(cell->anonymous_key));
685 	return 0;
686 }
687 
688 /*
689  * Activate a cell.
690  */
691 static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
692 {
693 	struct hlist_node **p;
694 	struct afs_cell *pcell;
695 	int ret;
696 
697 	if (!cell->anonymous_key) {
698 		ret = afs_alloc_anon_key(cell);
699 		if (ret < 0)
700 			return ret;
701 	}
702 
703 	ret = afs_proc_cell_setup(cell);
704 	if (ret < 0)
705 		return ret;
706 
707 	mutex_lock(&net->proc_cells_lock);
708 	for (p = &net->proc_cells.first; *p; p = &(*p)->next) {
709 		pcell = hlist_entry(*p, struct afs_cell, proc_link);
710 		if (strcmp(cell->name, pcell->name) < 0)
711 			break;
712 	}
713 
714 	cell->proc_link.pprev = p;
715 	cell->proc_link.next = *p;
716 	rcu_assign_pointer(*p, &cell->proc_link.next);
717 	if (cell->proc_link.next)
718 		cell->proc_link.next->pprev = &cell->proc_link.next;
719 
720 	mutex_unlock(&net->proc_cells_lock);
721 	return 0;
722 }
723 
724 /*
725  * Deactivate a cell.
726  */
727 static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
728 {
729 	_enter("%s", cell->name);
730 
731 	afs_proc_cell_remove(cell);
732 
733 	mutex_lock(&net->proc_cells_lock);
734 	if (!hlist_unhashed(&cell->proc_link))
735 		hlist_del_rcu(&cell->proc_link);
736 	mutex_unlock(&net->proc_cells_lock);
737 
738 	_leave("");
739 }
740 
741 static bool afs_has_cell_expired(struct afs_cell *cell, time64_t *_next_manage)
742 {
743 	const struct afs_vlserver_list *vllist;
744 	time64_t expire_at = cell->last_inactive;
745 	time64_t now = ktime_get_real_seconds();
746 
747 	if (atomic_read(&cell->active))
748 		return false;
749 	if (!cell->net->live)
750 		return true;
751 
752 	vllist = rcu_dereference_protected(cell->vl_servers, true);
753 	if (vllist && vllist->nr_servers > 0)
754 		expire_at += afs_cell_gc_delay;
755 
756 	if (expire_at <= now)
757 		return true;
758 	if (expire_at < *_next_manage)
759 		*_next_manage = expire_at;
760 	return false;
761 }
762 
763 /*
764  * Manage a cell record, initialising and destroying it, maintaining its DNS
765  * records.
766  */
767 static bool afs_manage_cell(struct afs_cell *cell)
768 {
769 	struct afs_net *net = cell->net;
770 	time64_t next_manage = TIME64_MAX;
771 	int ret;
772 
773 	_enter("%s", cell->name);
774 
775 	_debug("state %u", cell->state);
776 	switch (cell->state) {
777 	case AFS_CELL_SETTING_UP:
778 		goto set_up_cell;
779 	case AFS_CELL_ACTIVE:
780 		goto cell_is_active;
781 	case AFS_CELL_REMOVING:
782 		WARN_ON_ONCE(1);
783 		return false;
784 	case AFS_CELL_DEAD:
785 		return false;
786 	default:
787 		_debug("bad state %u", cell->state);
788 		WARN_ON_ONCE(1); /* Unhandled state */
789 		return false;
790 	}
791 
792 set_up_cell:
793 	ret = afs_activate_cell(net, cell);
794 	if (ret < 0) {
795 		cell->error = ret;
796 		goto remove_cell;
797 	}
798 
799 	afs_set_cell_state(cell, AFS_CELL_ACTIVE);
800 
801 cell_is_active:
802 	if (afs_has_cell_expired(cell, &next_manage))
803 		goto remove_cell;
804 
805 	if (test_and_clear_bit(AFS_CELL_FL_DO_LOOKUP, &cell->flags)) {
806 		ret = afs_update_cell(cell);
807 		if (ret < 0)
808 			cell->error = ret;
809 	}
810 
811 	if (next_manage < TIME64_MAX && cell->net->live) {
812 		time64_t now = ktime_get_real_seconds();
813 
814 		if (next_manage - now <= 0)
815 			afs_queue_cell(cell, afs_cell_trace_queue_again);
816 		else
817 			afs_set_cell_timer(cell, next_manage - now);
818 	}
819 	_leave(" [done %u]", cell->state);
820 	return false;
821 
822 remove_cell:
823 	down_write(&net->cells_lock);
824 
825 	if (atomic_read(&cell->active)) {
826 		up_write(&net->cells_lock);
827 		goto cell_is_active;
828 	}
829 
830 	/* Make sure that the expiring server records are going to see the fact
831 	 * that the cell is caput.
832 	 */
833 	afs_set_cell_state(cell, AFS_CELL_REMOVING);
834 
835 	afs_deactivate_cell(net, cell);
836 	afs_purge_servers(cell);
837 
838 	rb_erase(&cell->net_node, &net->cells);
839 	afs_see_cell(cell, afs_cell_trace_unuse_delete);
840 	up_write(&net->cells_lock);
841 
842 	/* The root volume is pinning the cell */
843 	afs_put_volume(cell->root_volume, afs_volume_trace_put_cell_root);
844 	cell->root_volume = NULL;
845 
846 	afs_set_cell_state(cell, AFS_CELL_DEAD);
847 	return true;
848 }
849 
850 static void afs_manage_cell_work(struct work_struct *work)
851 {
852 	struct afs_cell *cell = container_of(work, struct afs_cell, manager);
853 	bool final_put;
854 
855 	afs_see_cell(cell, afs_cell_trace_manage);
856 	final_put = afs_manage_cell(cell);
857 	afs_see_cell(cell, afs_cell_trace_managed);
858 	if (final_put)
859 		afs_put_cell(cell, afs_cell_trace_put_final);
860 }
861 
862 /*
863  * Purge in-memory cell database.
864  */
865 void afs_cell_purge(struct afs_net *net)
866 {
867 	struct afs_cell *ws;
868 	struct rb_node *cursor;
869 
870 	_enter("");
871 
872 	down_write(&net->cells_lock);
873 	ws = rcu_replace_pointer(net->ws_cell, NULL,
874 				 lockdep_is_held(&net->cells_lock));
875 	up_write(&net->cells_lock);
876 	afs_unuse_cell(ws, afs_cell_trace_unuse_ws);
877 
878 	_debug("kick cells");
879 	down_read(&net->cells_lock);
880 	for (cursor = rb_first(&net->cells); cursor; cursor = rb_next(cursor)) {
881 		struct afs_cell *cell = rb_entry(cursor, struct afs_cell, net_node);
882 
883 		afs_see_cell(cell, afs_cell_trace_purge);
884 
885 		if (test_and_clear_bit(AFS_CELL_FL_NO_GC, &cell->flags))
886 			afs_unuse_cell(cell, afs_cell_trace_unuse_pin);
887 
888 		afs_queue_cell(cell, afs_cell_trace_queue_purge);
889 	}
890 	up_read(&net->cells_lock);
891 
892 	_debug("wait");
893 	wait_var_event(&net->cells_outstanding,
894 		       !atomic_read(&net->cells_outstanding));
895 	_leave("");
896 }
897