1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* RxRPC virtual connection handler, common bits.
3  *
4  * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/net.h>
13 #include <linux/skbuff.h>
14 #include "ar-internal.h"
15 
16 /*
17  * Time till a connection expires after last use (in seconds).
18  */
19 unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20 unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21 
22 static void rxrpc_clean_up_connection(struct work_struct *work);
23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24 					 unsigned long reap_at);
25 
26 void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
27 {
28 	struct rxrpc_local *local = conn->local;
29 	bool busy;
30 
31 	if (WARN_ON_ONCE(!local))
32 		return;
33 
34 	spin_lock_irq(&local->lock);
35 	busy = !list_empty(&conn->attend_link);
36 	if (!busy) {
37 		rxrpc_get_connection(conn, why);
38 		list_add_tail(&conn->attend_link, &local->conn_attend_q);
39 	}
40 	spin_unlock_irq(&local->lock);
41 	rxrpc_wake_up_io_thread(local);
42 }
43 
44 static void rxrpc_connection_timer(struct timer_list *timer)
45 {
46 	struct rxrpc_connection *conn =
47 		container_of(timer, struct rxrpc_connection, timer);
48 
49 	rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
50 }
51 
52 /*
53  * allocate a new connection
54  */
55 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
56 						gfp_t gfp)
57 {
58 	struct rxrpc_connection *conn;
59 
60 	_enter("");
61 
62 	conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
63 	if (conn) {
64 		INIT_LIST_HEAD(&conn->cache_link);
65 		timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
66 		INIT_WORK(&conn->processor, rxrpc_process_connection);
67 		INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
68 		INIT_LIST_HEAD(&conn->proc_link);
69 		INIT_LIST_HEAD(&conn->link);
70 		INIT_LIST_HEAD(&conn->attend_link);
71 		mutex_init(&conn->security_lock);
72 		mutex_init(&conn->tx_data_alloc_lock);
73 		skb_queue_head_init(&conn->rx_queue);
74 		conn->rxnet = rxnet;
75 		conn->security = &rxrpc_no_security;
76 		rwlock_init(&conn->security_use_lock);
77 		spin_lock_init(&conn->state_lock);
78 		conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
79 		conn->idle_timestamp = jiffies;
80 	}
81 
82 	_leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
83 	return conn;
84 }
85 
86 /*
87  * Look up a connection in the cache by protocol parameters.
88  *
89  * If successful, a pointer to the connection is returned, but no ref is taken.
90  * NULL is returned if there is no match.
91  *
92  * When searching for a service call, if we find a peer but no connection, we
93  * return that through *_peer in case we need to create a new service call.
94  *
95  * The caller must be holding the RCU read lock.
96  */
97 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
98 							  struct sockaddr_rxrpc *srx,
99 							  struct sk_buff *skb)
100 {
101 	struct rxrpc_connection *conn;
102 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
103 	struct rxrpc_peer *peer;
104 
105 	_enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
106 
107 	/* Look up client connections by connection ID alone as their
108 	 * IDs are unique for this machine.
109 	 */
110 	conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
111 	if (!conn || refcount_read(&conn->ref) == 0) {
112 		_debug("no conn");
113 		goto not_found;
114 	}
115 
116 	if (conn->proto.epoch != sp->hdr.epoch ||
117 	    conn->local != local)
118 		goto not_found;
119 
120 	peer = conn->peer;
121 	switch (srx->transport.family) {
122 	case AF_INET:
123 		if (peer->srx.transport.sin.sin_port !=
124 		    srx->transport.sin.sin_port)
125 			goto not_found;
126 		break;
127 #ifdef CONFIG_AF_RXRPC_IPV6
128 	case AF_INET6:
129 		if (peer->srx.transport.sin6.sin6_port !=
130 		    srx->transport.sin6.sin6_port)
131 			goto not_found;
132 		break;
133 #endif
134 	default:
135 		BUG();
136 	}
137 
138 	_leave(" = %p", conn);
139 	return conn;
140 
141 not_found:
142 	_leave(" = NULL");
143 	return NULL;
144 }
145 
146 /*
147  * Disconnect a call and clear any channel it occupies when that call
148  * terminates.  The caller must hold the channel_lock and must release the
149  * call's ref on the connection.
150  */
151 void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
152 			     struct rxrpc_call *call)
153 {
154 	struct rxrpc_channel *chan =
155 		&conn->channels[call->cid & RXRPC_CHANNELMASK];
156 
157 	_enter("%d,%x", conn->debug_id, call->cid);
158 
159 	if (chan->call == call) {
160 		/* Save the result of the call so that we can repeat it if necessary
161 		 * through the channel, whilst disposing of the actual call record.
162 		 */
163 		trace_rxrpc_disconnect_call(call);
164 		switch (call->completion) {
165 		case RXRPC_CALL_SUCCEEDED:
166 			chan->last_seq = call->rx_highest_seq;
167 			chan->last_type = RXRPC_PACKET_TYPE_ACK;
168 			break;
169 		case RXRPC_CALL_LOCALLY_ABORTED:
170 			chan->last_abort = call->abort_code;
171 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
172 			break;
173 		default:
174 			chan->last_abort = RX_CALL_DEAD;
175 			chan->last_type = RXRPC_PACKET_TYPE_ABORT;
176 			break;
177 		}
178 
179 		chan->last_call = chan->call_id;
180 		chan->call_id = chan->call_counter;
181 		chan->call = NULL;
182 	}
183 
184 	_leave("");
185 }
186 
187 /*
188  * Disconnect a call and clear any channel it occupies when that call
189  * terminates.
190  */
191 void rxrpc_disconnect_call(struct rxrpc_call *call)
192 {
193 	struct rxrpc_connection *conn = call->conn;
194 
195 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
196 	rxrpc_see_call(call, rxrpc_call_see_disconnected);
197 
198 	call->peer->cong_ssthresh = call->cong_ssthresh;
199 
200 	if (!hlist_unhashed(&call->error_link)) {
201 		spin_lock_irq(&call->peer->lock);
202 		hlist_del_init(&call->error_link);
203 		spin_unlock_irq(&call->peer->lock);
204 	}
205 
206 	if (rxrpc_is_client_call(call)) {
207 		rxrpc_disconnect_client_call(call->bundle, call);
208 	} else {
209 		__rxrpc_disconnect_call(conn, call);
210 		conn->idle_timestamp = jiffies;
211 		if (atomic_dec_and_test(&conn->active))
212 			rxrpc_set_service_reap_timer(conn->rxnet,
213 						     jiffies + rxrpc_connection_expiry * HZ);
214 	}
215 
216 	rxrpc_put_call(call, rxrpc_call_put_io_thread);
217 }
218 
219 /*
220  * Queue a connection's work processor, getting a ref to pass to the work
221  * queue.
222  */
223 void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
224 {
225 	if (atomic_read(&conn->active) >= 0 &&
226 	    rxrpc_queue_work(&conn->processor))
227 		rxrpc_see_connection(conn, why);
228 }
229 
230 /*
231  * Note the re-emergence of a connection.
232  */
233 void rxrpc_see_connection(struct rxrpc_connection *conn,
234 			  enum rxrpc_conn_trace why)
235 {
236 	if (conn) {
237 		int r = refcount_read(&conn->ref);
238 
239 		trace_rxrpc_conn(conn->debug_id, r, why);
240 	}
241 }
242 
243 /*
244  * Get a ref on a connection.
245  */
246 struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
247 					      enum rxrpc_conn_trace why)
248 {
249 	int r;
250 
251 	__refcount_inc(&conn->ref, &r);
252 	trace_rxrpc_conn(conn->debug_id, r + 1, why);
253 	return conn;
254 }
255 
256 /*
257  * Try to get a ref on a connection.
258  */
259 struct rxrpc_connection *
260 rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
261 			   enum rxrpc_conn_trace why)
262 {
263 	int r;
264 
265 	if (conn) {
266 		if (__refcount_inc_not_zero(&conn->ref, &r))
267 			trace_rxrpc_conn(conn->debug_id, r + 1, why);
268 		else
269 			conn = NULL;
270 	}
271 	return conn;
272 }
273 
274 /*
275  * Set the service connection reap timer.
276  */
277 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
278 					 unsigned long reap_at)
279 {
280 	if (rxnet->live)
281 		timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
282 }
283 
284 /*
285  * destroy a virtual connection
286  */
287 static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
288 {
289 	struct rxrpc_connection *conn =
290 		container_of(rcu, struct rxrpc_connection, rcu);
291 	struct rxrpc_net *rxnet = conn->rxnet;
292 
293 	_enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
294 
295 	trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
296 			 rxrpc_conn_free);
297 	kfree(conn);
298 
299 	if (atomic_dec_and_test(&rxnet->nr_conns))
300 		wake_up_var(&rxnet->nr_conns);
301 }
302 
303 /*
304  * Clean up a dead connection.
305  */
306 static void rxrpc_clean_up_connection(struct work_struct *work)
307 {
308 	struct rxrpc_connection *conn =
309 		container_of(work, struct rxrpc_connection, destructor);
310 	struct rxrpc_net *rxnet = conn->rxnet;
311 
312 	ASSERT(!conn->channels[0].call &&
313 	       !conn->channels[1].call &&
314 	       !conn->channels[2].call &&
315 	       !conn->channels[3].call);
316 	ASSERT(list_empty(&conn->cache_link));
317 
318 	timer_delete_sync(&conn->timer);
319 	cancel_work_sync(&conn->processor); /* Processing may restart the timer */
320 	timer_delete_sync(&conn->timer);
321 
322 	write_lock(&rxnet->conn_lock);
323 	list_del_init(&conn->proc_link);
324 	write_unlock(&rxnet->conn_lock);
325 
326 	if (conn->pmtud_probe) {
327 		trace_rxrpc_pmtud_lost(conn, 0);
328 		conn->peer->pmtud_probing = false;
329 		conn->peer->pmtud_pending = true;
330 	}
331 
332 	rxrpc_purge_queue(&conn->rx_queue);
333 	rxrpc_free_skb(conn->tx_response, rxrpc_skb_put_response);
334 
335 	rxrpc_kill_client_conn(conn);
336 
337 	conn->security->clear(conn);
338 	key_put(conn->key);
339 	rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
340 	rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
341 	rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
342 
343 	/* Drain the Rx queue.  Note that even though we've unpublished, an
344 	 * incoming packet could still be being added to our Rx queue, so we
345 	 * will need to drain it again in the RCU cleanup handler.
346 	 */
347 	rxrpc_purge_queue(&conn->rx_queue);
348 
349 	page_frag_cache_drain(&conn->tx_data_alloc);
350 	call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
351 }
352 
353 /*
354  * Drop a ref on a connection.
355  */
356 void rxrpc_put_connection(struct rxrpc_connection *conn,
357 			  enum rxrpc_conn_trace why)
358 {
359 	unsigned int debug_id;
360 	bool dead;
361 	int r;
362 
363 	if (!conn)
364 		return;
365 
366 	debug_id = conn->debug_id;
367 	dead = __refcount_dec_and_test(&conn->ref, &r);
368 	trace_rxrpc_conn(debug_id, r - 1, why);
369 	if (dead) {
370 		timer_delete(&conn->timer);
371 		cancel_work(&conn->processor);
372 
373 		if (in_softirq() || work_busy(&conn->processor) ||
374 		    timer_pending(&conn->timer))
375 			/* Can't use the rxrpc workqueue as we need to cancel/flush
376 			 * something that may be running/waiting there.
377 			 */
378 			schedule_work(&conn->destructor);
379 		else
380 			rxrpc_clean_up_connection(&conn->destructor);
381 	}
382 }
383 
384 /*
385  * reap dead service connections
386  */
387 void rxrpc_service_connection_reaper(struct work_struct *work)
388 {
389 	struct rxrpc_connection *conn, *_p;
390 	struct rxrpc_net *rxnet =
391 		container_of(work, struct rxrpc_net, service_conn_reaper);
392 	unsigned long expire_at, earliest, idle_timestamp, now;
393 	int active;
394 
395 	LIST_HEAD(graveyard);
396 
397 	_enter("");
398 
399 	now = jiffies;
400 	earliest = now + MAX_JIFFY_OFFSET;
401 
402 	write_lock(&rxnet->conn_lock);
403 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
404 		ASSERTCMP(atomic_read(&conn->active), >=, 0);
405 		if (likely(atomic_read(&conn->active) > 0))
406 			continue;
407 		if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
408 			continue;
409 
410 		if (rxnet->live && !conn->local->dead) {
411 			idle_timestamp = READ_ONCE(conn->idle_timestamp);
412 			expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
413 			if (conn->local->service_closed)
414 				expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
415 
416 			_debug("reap CONN %d { a=%d,t=%ld }",
417 			       conn->debug_id, atomic_read(&conn->active),
418 			       (long)expire_at - (long)now);
419 
420 			if (time_before(now, expire_at)) {
421 				if (time_before(expire_at, earliest))
422 					earliest = expire_at;
423 				continue;
424 			}
425 		}
426 
427 		/* The activity count sits at 0 whilst the conn is unused on
428 		 * the list; we reduce that to -1 to make the conn unavailable.
429 		 */
430 		active = 0;
431 		if (!atomic_try_cmpxchg(&conn->active, &active, -1))
432 			continue;
433 		rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
434 
435 		if (rxrpc_conn_is_client(conn))
436 			BUG();
437 		else
438 			rxrpc_unpublish_service_conn(conn);
439 
440 		list_move_tail(&conn->link, &graveyard);
441 	}
442 	write_unlock(&rxnet->conn_lock);
443 
444 	if (earliest != now + MAX_JIFFY_OFFSET) {
445 		_debug("reschedule reaper %ld", (long)earliest - (long)now);
446 		ASSERT(time_after(earliest, now));
447 		rxrpc_set_service_reap_timer(rxnet, earliest);
448 	}
449 
450 	while (!list_empty(&graveyard)) {
451 		conn = list_entry(graveyard.next, struct rxrpc_connection,
452 				  link);
453 		list_del_init(&conn->link);
454 
455 		ASSERTCMP(atomic_read(&conn->active), ==, -1);
456 		rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
457 	}
458 
459 	_leave("");
460 }
461 
462 /*
463  * preemptively destroy all the service connection records rather than
464  * waiting for them to time out
465  */
466 void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
467 {
468 	struct rxrpc_connection *conn, *_p;
469 	bool leak = false;
470 
471 	_enter("");
472 
473 	atomic_dec(&rxnet->nr_conns);
474 
475 	timer_delete_sync(&rxnet->service_conn_reap_timer);
476 	rxrpc_queue_work(&rxnet->service_conn_reaper);
477 	flush_workqueue(rxrpc_workqueue);
478 
479 	write_lock(&rxnet->conn_lock);
480 	list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
481 		pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
482 		       conn, refcount_read(&conn->ref));
483 		leak = true;
484 	}
485 	write_unlock(&rxnet->conn_lock);
486 	BUG_ON(leak);
487 
488 	ASSERT(list_empty(&rxnet->conn_proc_list));
489 
490 	/* We need to wait for the connections to be destroyed by RCU as they
491 	 * pin things that we still need to get rid of.
492 	 */
493 	wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
494 	_leave("");
495 }
496