1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AF_RXRPC implementation
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/net.h>
13 #include <linux/slab.h>
14 #include <linux/skbuff.h>
15 #include <linux/random.h>
16 #include <linux/poll.h>
17 #include <linux/proc_fs.h>
18 #include <linux/key-type.h>
19 #include <net/net_namespace.h>
20 #include <net/sock.h>
21 #include <net/af_rxrpc.h>
22 #define CREATE_TRACE_POINTS
23 #include "ar-internal.h"
24 
25 MODULE_DESCRIPTION("RxRPC network protocol");
26 MODULE_AUTHOR("Red Hat, Inc.");
27 MODULE_LICENSE("GPL");
28 MODULE_ALIAS_NETPROTO(PF_RXRPC);
29 
30 unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
31 module_param_named(debug, rxrpc_debug, uint, 0644);
32 MODULE_PARM_DESC(debug, "RxRPC debugging mask");
33 
34 static struct proto rxrpc_proto;
35 static const struct proto_ops rxrpc_rpc_ops;
36 
37 /* current debugging ID */
38 atomic_t rxrpc_debug_id;
39 EXPORT_SYMBOL(rxrpc_debug_id);
40 
41 /* count of skbs currently in use */
42 atomic_t rxrpc_n_rx_skbs;
43 
44 struct workqueue_struct *rxrpc_workqueue;
45 
46 static void rxrpc_sock_destructor(struct sock *);
47 
48 /*
49  * see if an RxRPC socket is currently writable
50  */
51 static inline int rxrpc_writable(struct sock *sk)
52 {
53 	return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
54 }
55 
56 /*
57  * wait for write bufferage to become available
58  */
59 static void rxrpc_write_space(struct sock *sk)
60 {
61 	_enter("%p", sk);
62 	rcu_read_lock();
63 	if (rxrpc_writable(sk)) {
64 		struct socket_wq *wq = rcu_dereference(sk->sk_wq);
65 
66 		if (skwq_has_sleeper(wq))
67 			wake_up_interruptible(&wq->wait);
68 		sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
69 	}
70 	rcu_read_unlock();
71 }
72 
73 /*
74  * validate an RxRPC address
75  */
76 static int rxrpc_validate_address(struct rxrpc_sock *rx,
77 				  struct sockaddr_rxrpc *srx,
78 				  int len)
79 {
80 	unsigned int tail;
81 
82 	if (len < sizeof(struct sockaddr_rxrpc))
83 		return -EINVAL;
84 
85 	if (srx->srx_family != AF_RXRPC)
86 		return -EAFNOSUPPORT;
87 
88 	if (srx->transport_type != SOCK_DGRAM)
89 		return -ESOCKTNOSUPPORT;
90 
91 	len -= offsetof(struct sockaddr_rxrpc, transport);
92 	if (srx->transport_len < sizeof(sa_family_t) ||
93 	    srx->transport_len > len)
94 		return -EINVAL;
95 
96 	switch (srx->transport.family) {
97 	case AF_INET:
98 		if (rx->family != AF_INET &&
99 		    rx->family != AF_INET6)
100 			return -EAFNOSUPPORT;
101 		if (srx->transport_len < sizeof(struct sockaddr_in))
102 			return -EINVAL;
103 		tail = offsetof(struct sockaddr_rxrpc, transport.sin.__pad);
104 		break;
105 
106 #ifdef CONFIG_AF_RXRPC_IPV6
107 	case AF_INET6:
108 		if (rx->family != AF_INET6)
109 			return -EAFNOSUPPORT;
110 		if (srx->transport_len < sizeof(struct sockaddr_in6))
111 			return -EINVAL;
112 		tail = offsetof(struct sockaddr_rxrpc, transport) +
113 			sizeof(struct sockaddr_in6);
114 		break;
115 #endif
116 
117 	default:
118 		return -EAFNOSUPPORT;
119 	}
120 
121 	if (tail < len)
122 		memset((void *)srx + tail, 0, len - tail);
123 	_debug("INET: %pISp", &srx->transport);
124 	return 0;
125 }
126 
127 /*
128  * bind a local address to an RxRPC socket
129  */
130 static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
131 {
132 	struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
133 	struct rxrpc_local *local;
134 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
135 	u16 service_id;
136 	int ret;
137 
138 	_enter("%p,%p,%d", rx, saddr, len);
139 
140 	ret = rxrpc_validate_address(rx, srx, len);
141 	if (ret < 0)
142 		goto error;
143 	service_id = srx->srx_service;
144 
145 	lock_sock(&rx->sk);
146 
147 	switch (rx->sk.sk_state) {
148 	case RXRPC_UNBOUND:
149 		rx->srx = *srx;
150 		local = rxrpc_lookup_local(sock_net(&rx->sk), &rx->srx);
151 		if (IS_ERR(local)) {
152 			ret = PTR_ERR(local);
153 			goto error_unlock;
154 		}
155 
156 		if (service_id) {
157 			write_lock(&local->services_lock);
158 			if (local->service)
159 				goto service_in_use;
160 			rx->local = local;
161 			local->service = rx;
162 			write_unlock(&local->services_lock);
163 
164 			rx->sk.sk_state = RXRPC_SERVER_BOUND;
165 		} else {
166 			rx->local = local;
167 			rx->sk.sk_state = RXRPC_CLIENT_BOUND;
168 		}
169 		break;
170 
171 	case RXRPC_SERVER_BOUND:
172 		ret = -EINVAL;
173 		if (service_id == 0)
174 			goto error_unlock;
175 		ret = -EADDRINUSE;
176 		if (service_id == rx->srx.srx_service)
177 			goto error_unlock;
178 		ret = -EINVAL;
179 		srx->srx_service = rx->srx.srx_service;
180 		if (memcmp(srx, &rx->srx, sizeof(*srx)) != 0)
181 			goto error_unlock;
182 		rx->second_service = service_id;
183 		rx->sk.sk_state = RXRPC_SERVER_BOUND2;
184 		break;
185 
186 	default:
187 		ret = -EINVAL;
188 		goto error_unlock;
189 	}
190 
191 	release_sock(&rx->sk);
192 	_leave(" = 0");
193 	return 0;
194 
195 service_in_use:
196 	write_unlock(&local->services_lock);
197 	rxrpc_unuse_local(local, rxrpc_local_unuse_bind);
198 	rxrpc_put_local(local, rxrpc_local_put_bind);
199 	ret = -EADDRINUSE;
200 error_unlock:
201 	release_sock(&rx->sk);
202 error:
203 	_leave(" = %d", ret);
204 	return ret;
205 }
206 
207 /*
208  * set the number of pending calls permitted on a listening socket
209  */
210 static int rxrpc_listen(struct socket *sock, int backlog)
211 {
212 	struct sock *sk = sock->sk;
213 	struct rxrpc_sock *rx = rxrpc_sk(sk);
214 	unsigned int max, old;
215 	int ret;
216 
217 	_enter("%p,%d", rx, backlog);
218 
219 	lock_sock(&rx->sk);
220 
221 	switch (rx->sk.sk_state) {
222 	case RXRPC_UNBOUND:
223 		ret = -EADDRNOTAVAIL;
224 		break;
225 	case RXRPC_SERVER_BOUND:
226 	case RXRPC_SERVER_BOUND2:
227 		ASSERT(rx->local != NULL);
228 		max = READ_ONCE(rxrpc_max_backlog);
229 		ret = -EINVAL;
230 		if (backlog == INT_MAX)
231 			backlog = max;
232 		else if (backlog < 0 || backlog > max)
233 			break;
234 		old = sk->sk_max_ack_backlog;
235 		sk->sk_max_ack_backlog = backlog;
236 		ret = rxrpc_service_prealloc(rx, GFP_KERNEL);
237 		if (ret == 0)
238 			rx->sk.sk_state = RXRPC_SERVER_LISTENING;
239 		else
240 			sk->sk_max_ack_backlog = old;
241 		break;
242 	case RXRPC_SERVER_LISTENING:
243 		if (backlog == 0) {
244 			rx->sk.sk_state = RXRPC_SERVER_LISTEN_DISABLED;
245 			sk->sk_max_ack_backlog = 0;
246 			rxrpc_discard_prealloc(rx);
247 			ret = 0;
248 			break;
249 		}
250 		fallthrough;
251 	default:
252 		ret = -EBUSY;
253 		break;
254 	}
255 
256 	release_sock(&rx->sk);
257 	_leave(" = %d", ret);
258 	return ret;
259 }
260 
261 /**
262  * rxrpc_kernel_lookup_peer - Obtain remote transport endpoint for an address
263  * @sock: The socket through which it will be accessed
264  * @srx: The network address
265  * @gfp: Allocation flags
266  *
267  * Lookup or create a remote transport endpoint record for the specified
268  * address.
269  *
270  * Return: The peer record found with a reference, %NULL if no record is found
271  * or a negative error code if the address is invalid or unsupported.
272  */
273 struct rxrpc_peer *rxrpc_kernel_lookup_peer(struct socket *sock,
274 					    struct sockaddr_rxrpc *srx, gfp_t gfp)
275 {
276 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
277 	int ret;
278 
279 	ret = rxrpc_validate_address(rx, srx, sizeof(*srx));
280 	if (ret < 0)
281 		return ERR_PTR(ret);
282 
283 	return rxrpc_lookup_peer(rx->local, srx, gfp);
284 }
285 EXPORT_SYMBOL(rxrpc_kernel_lookup_peer);
286 
287 /**
288  * rxrpc_kernel_get_peer - Get a reference on a peer
289  * @peer: The peer to get a reference on (may be NULL).
290  *
291  * Get a reference for a remote peer record (if not NULL).
292  *
293  * Return: The @peer argument.
294  */
295 struct rxrpc_peer *rxrpc_kernel_get_peer(struct rxrpc_peer *peer)
296 {
297 	return peer ? rxrpc_get_peer(peer, rxrpc_peer_get_application) : NULL;
298 }
299 EXPORT_SYMBOL(rxrpc_kernel_get_peer);
300 
301 /**
302  * rxrpc_kernel_put_peer - Allow a kernel app to drop a peer reference
303  * @peer: The peer to drop a ref on
304  *
305  * Drop a reference on a peer record.
306  */
307 void rxrpc_kernel_put_peer(struct rxrpc_peer *peer)
308 {
309 	rxrpc_put_peer(peer, rxrpc_peer_put_application);
310 }
311 EXPORT_SYMBOL(rxrpc_kernel_put_peer);
312 
313 /**
314  * rxrpc_kernel_begin_call - Allow a kernel service to begin a call
315  * @sock: The socket on which to make the call
316  * @peer: The peer to contact
317  * @key: The security context to use (defaults to socket setting)
318  * @user_call_ID: The ID to use
319  * @tx_total_len: Total length of data to transmit during the call (or -1)
320  * @hard_timeout: The maximum lifespan of the call in sec
321  * @gfp: The allocation constraints
322  * @notify_rx: Where to send notifications instead of socket queue
323  * @service_id: The ID of the service to contact
324  * @upgrade: Request service upgrade for call
325  * @interruptibility: The call is interruptible, or can be canceled.
326  * @debug_id: The debug ID for tracing to be assigned to the call
327  *
328  * Allow a kernel service to begin a call on the nominated socket.  This just
329  * sets up all the internal tracking structures and allocates connection and
330  * call IDs as appropriate.
331  *
332  * The default socket destination address and security may be overridden by
333  * supplying @srx and @key.
334  *
335  * Return: The new call or an error code.
336  */
337 struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
338 					   struct rxrpc_peer *peer,
339 					   struct key *key,
340 					   unsigned long user_call_ID,
341 					   s64 tx_total_len,
342 					   u32 hard_timeout,
343 					   gfp_t gfp,
344 					   rxrpc_notify_rx_t notify_rx,
345 					   u16 service_id,
346 					   bool upgrade,
347 					   enum rxrpc_interruptibility interruptibility,
348 					   unsigned int debug_id)
349 {
350 	struct rxrpc_conn_parameters cp;
351 	struct rxrpc_call_params p;
352 	struct rxrpc_call *call;
353 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
354 
355 	_enter(",,%x,%lx", key_serial(key), user_call_ID);
356 
357 	if (WARN_ON_ONCE(peer->local != rx->local))
358 		return ERR_PTR(-EIO);
359 
360 	lock_sock(&rx->sk);
361 
362 	if (!key)
363 		key = rx->key;
364 	if (key && !key->payload.data[0])
365 		key = NULL; /* a no-security key */
366 
367 	memset(&p, 0, sizeof(p));
368 	p.user_call_ID		= user_call_ID;
369 	p.tx_total_len		= tx_total_len;
370 	p.interruptibility	= interruptibility;
371 	p.kernel		= true;
372 	p.timeouts.hard		= hard_timeout;
373 
374 	memset(&cp, 0, sizeof(cp));
375 	cp.local		= rx->local;
376 	cp.peer			= peer;
377 	cp.key			= key;
378 	cp.security_level	= rx->min_sec_level;
379 	cp.exclusive		= false;
380 	cp.upgrade		= upgrade;
381 	cp.service_id		= service_id;
382 	call = rxrpc_new_client_call(rx, &cp, &p, gfp, debug_id);
383 	/* The socket has been unlocked. */
384 	if (!IS_ERR(call)) {
385 		call->notify_rx = notify_rx;
386 		mutex_unlock(&call->user_mutex);
387 	}
388 
389 	_leave(" = %p", call);
390 	return call;
391 }
392 EXPORT_SYMBOL(rxrpc_kernel_begin_call);
393 
394 /*
395  * Dummy function used to stop the notifier talking to recvmsg().
396  */
397 static void rxrpc_dummy_notify_rx(struct sock *sk, struct rxrpc_call *rxcall,
398 				  unsigned long call_user_ID)
399 {
400 }
401 
402 /**
403  * rxrpc_kernel_shutdown_call - Allow a kernel service to shut down a call it was using
404  * @sock: The socket the call is on
405  * @call: The call to end
406  *
407  * Allow a kernel service to shut down a call it was using.  The call must be
408  * complete before this is called (the call should be aborted if necessary).
409  */
410 void rxrpc_kernel_shutdown_call(struct socket *sock, struct rxrpc_call *call)
411 {
412 	_enter("%d{%d}", call->debug_id, refcount_read(&call->ref));
413 
414 	mutex_lock(&call->user_mutex);
415 	if (!test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
416 		rxrpc_release_call(rxrpc_sk(sock->sk), call);
417 
418 		/* Make sure we're not going to call back into a kernel service */
419 		if (call->notify_rx) {
420 			spin_lock_irq(&call->notify_lock);
421 			call->notify_rx = rxrpc_dummy_notify_rx;
422 			spin_unlock_irq(&call->notify_lock);
423 		}
424 	}
425 	mutex_unlock(&call->user_mutex);
426 }
427 EXPORT_SYMBOL(rxrpc_kernel_shutdown_call);
428 
429 /**
430  * rxrpc_kernel_put_call - Release a reference to a call
431  * @sock: The socket the call is on
432  * @call: The call to put
433  *
434  * Drop the application's ref on an rxrpc call.
435  */
436 void rxrpc_kernel_put_call(struct socket *sock, struct rxrpc_call *call)
437 {
438 	rxrpc_put_call(call, rxrpc_call_put_kernel);
439 }
440 EXPORT_SYMBOL(rxrpc_kernel_put_call);
441 
442 /**
443  * rxrpc_kernel_check_life - Check to see whether a call is still alive
444  * @sock: The socket the call is on
445  * @call: The call to check
446  *
447  * Allow a kernel service to find out whether a call is still alive - whether
448  * it has completed successfully and all received data has been consumed.
449  *
450  * Return: %true if the call is still ongoing and %false if it has completed.
451  */
452 bool rxrpc_kernel_check_life(const struct socket *sock,
453 			     const struct rxrpc_call *call)
454 {
455 	if (!rxrpc_call_is_complete(call))
456 		return true;
457 	if (call->completion != RXRPC_CALL_SUCCEEDED)
458 		return false;
459 	return !skb_queue_empty(&call->recvmsg_queue);
460 }
461 EXPORT_SYMBOL(rxrpc_kernel_check_life);
462 
463 /**
464  * rxrpc_kernel_set_notifications - Set table of callback operations
465  * @sock: The socket to install table upon
466  * @app_ops: Callback operation table to set
467  *
468  * Allow a kernel service to set a table of event notifications on a socket.
469  */
470 void rxrpc_kernel_set_notifications(struct socket *sock,
471 				    const struct rxrpc_kernel_ops *app_ops)
472 {
473 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
474 
475 	rx->app_ops = app_ops;
476 }
477 EXPORT_SYMBOL(rxrpc_kernel_set_notifications);
478 
479 /*
480  * connect an RxRPC socket
481  * - this just targets it at a specific destination; no actual connection
482  *   negotiation takes place
483  */
484 static int rxrpc_connect(struct socket *sock, struct sockaddr *addr,
485 			 int addr_len, int flags)
486 {
487 	struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)addr;
488 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
489 	int ret;
490 
491 	_enter("%p,%p,%d,%d", rx, addr, addr_len, flags);
492 
493 	ret = rxrpc_validate_address(rx, srx, addr_len);
494 	if (ret < 0) {
495 		_leave(" = %d [bad addr]", ret);
496 		return ret;
497 	}
498 
499 	lock_sock(&rx->sk);
500 
501 	ret = -EISCONN;
502 	if (test_bit(RXRPC_SOCK_CONNECTED, &rx->flags))
503 		goto error;
504 
505 	switch (rx->sk.sk_state) {
506 	case RXRPC_UNBOUND:
507 		rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
508 		break;
509 	case RXRPC_CLIENT_UNBOUND:
510 	case RXRPC_CLIENT_BOUND:
511 		break;
512 	default:
513 		ret = -EBUSY;
514 		goto error;
515 	}
516 
517 	rx->connect_srx = *srx;
518 	set_bit(RXRPC_SOCK_CONNECTED, &rx->flags);
519 	ret = 0;
520 
521 error:
522 	release_sock(&rx->sk);
523 	return ret;
524 }
525 
526 /*
527  * send a message through an RxRPC socket
528  * - in a client this does a number of things:
529  *   - finds/sets up a connection for the security specified (if any)
530  *   - initiates a call (ID in control data)
531  *   - ends the request phase of a call (if MSG_MORE is not set)
532  *   - sends a call data packet
533  *   - may send an abort (abort code in control data)
534  */
535 static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
536 {
537 	struct rxrpc_local *local;
538 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
539 	int ret;
540 
541 	_enter(",{%d},,%zu", rx->sk.sk_state, len);
542 
543 	if (m->msg_flags & MSG_OOB)
544 		return -EOPNOTSUPP;
545 
546 	if (m->msg_name) {
547 		ret = rxrpc_validate_address(rx, m->msg_name, m->msg_namelen);
548 		if (ret < 0) {
549 			_leave(" = %d [bad addr]", ret);
550 			return ret;
551 		}
552 	}
553 
554 	lock_sock(&rx->sk);
555 
556 	switch (rx->sk.sk_state) {
557 	case RXRPC_UNBOUND:
558 	case RXRPC_CLIENT_UNBOUND:
559 		rx->srx.srx_family = AF_RXRPC;
560 		rx->srx.srx_service = 0;
561 		rx->srx.transport_type = SOCK_DGRAM;
562 		rx->srx.transport.family = rx->family;
563 		switch (rx->family) {
564 		case AF_INET:
565 			rx->srx.transport_len = sizeof(struct sockaddr_in);
566 			break;
567 #ifdef CONFIG_AF_RXRPC_IPV6
568 		case AF_INET6:
569 			rx->srx.transport_len = sizeof(struct sockaddr_in6);
570 			break;
571 #endif
572 		default:
573 			ret = -EAFNOSUPPORT;
574 			goto error_unlock;
575 		}
576 		local = rxrpc_lookup_local(sock_net(sock->sk), &rx->srx);
577 		if (IS_ERR(local)) {
578 			ret = PTR_ERR(local);
579 			goto error_unlock;
580 		}
581 
582 		rx->local = local;
583 		rx->sk.sk_state = RXRPC_CLIENT_BOUND;
584 		fallthrough;
585 
586 	case RXRPC_CLIENT_BOUND:
587 		if (!m->msg_name &&
588 		    test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
589 			m->msg_name = &rx->connect_srx;
590 			m->msg_namelen = sizeof(rx->connect_srx);
591 		}
592 		fallthrough;
593 	case RXRPC_SERVER_BOUND:
594 	case RXRPC_SERVER_LISTENING:
595 		if (m->msg_flags & MSG_OOB)
596 			ret = rxrpc_sendmsg_oob(rx, m, len);
597 		else
598 			ret = rxrpc_do_sendmsg(rx, m, len);
599 		/* The socket has been unlocked */
600 		goto out;
601 	default:
602 		ret = -EINVAL;
603 		goto error_unlock;
604 	}
605 
606 error_unlock:
607 	release_sock(&rx->sk);
608 out:
609 	_leave(" = %d", ret);
610 	return ret;
611 }
612 
613 int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val)
614 {
615 	if (sk->sk_state != RXRPC_UNBOUND)
616 		return -EISCONN;
617 	if (val > RXRPC_SECURITY_MAX)
618 		return -EINVAL;
619 	lock_sock(sk);
620 	rxrpc_sk(sk)->min_sec_level = val;
621 	release_sock(sk);
622 	return 0;
623 }
624 EXPORT_SYMBOL(rxrpc_sock_set_min_security_level);
625 
626 /*
627  * set RxRPC socket options
628  */
629 static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
630 			    sockptr_t optval, unsigned int optlen)
631 {
632 	struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
633 	unsigned int min_sec_level, val;
634 	u16 service_upgrade[2];
635 	int ret;
636 
637 	_enter(",%d,%d,,%d", level, optname, optlen);
638 
639 	lock_sock(&rx->sk);
640 	ret = -EOPNOTSUPP;
641 
642 	if (level == SOL_RXRPC) {
643 		switch (optname) {
644 		case RXRPC_EXCLUSIVE_CONNECTION:
645 			ret = -EINVAL;
646 			if (optlen != 0)
647 				goto error;
648 			ret = -EISCONN;
649 			if (rx->sk.sk_state != RXRPC_UNBOUND)
650 				goto error;
651 			rx->exclusive = true;
652 			goto success;
653 
654 		case RXRPC_SECURITY_KEY:
655 			ret = -EINVAL;
656 			if (rx->key)
657 				goto error;
658 			ret = -EISCONN;
659 			if (rx->sk.sk_state != RXRPC_UNBOUND)
660 				goto error;
661 			ret = rxrpc_request_key(rx, optval, optlen);
662 			goto error;
663 
664 		case RXRPC_SECURITY_KEYRING:
665 			ret = -EINVAL;
666 			if (rx->key)
667 				goto error;
668 			ret = -EISCONN;
669 			if (rx->sk.sk_state != RXRPC_UNBOUND)
670 				goto error;
671 			ret = rxrpc_server_keyring(rx, optval, optlen);
672 			goto error;
673 
674 		case RXRPC_MIN_SECURITY_LEVEL:
675 			ret = -EINVAL;
676 			if (optlen != sizeof(unsigned int))
677 				goto error;
678 			ret = -EISCONN;
679 			if (rx->sk.sk_state != RXRPC_UNBOUND)
680 				goto error;
681 			ret = copy_safe_from_sockptr(&min_sec_level,
682 						     sizeof(min_sec_level),
683 						     optval, optlen);
684 			if (ret)
685 				goto error;
686 			ret = -EINVAL;
687 			if (min_sec_level > RXRPC_SECURITY_MAX)
688 				goto error;
689 			rx->min_sec_level = min_sec_level;
690 			goto success;
691 
692 		case RXRPC_UPGRADEABLE_SERVICE:
693 			ret = -EINVAL;
694 			if (optlen != sizeof(service_upgrade) ||
695 			    rx->service_upgrade.from != 0)
696 				goto error;
697 			ret = -EISCONN;
698 			if (rx->sk.sk_state != RXRPC_SERVER_BOUND2)
699 				goto error;
700 			ret = -EFAULT;
701 			if (copy_from_sockptr(service_upgrade, optval,
702 					   sizeof(service_upgrade)) != 0)
703 				goto error;
704 			ret = -EINVAL;
705 			if ((service_upgrade[0] != rx->srx.srx_service ||
706 			     service_upgrade[1] != rx->second_service) &&
707 			    (service_upgrade[0] != rx->second_service ||
708 			     service_upgrade[1] != rx->srx.srx_service))
709 				goto error;
710 			rx->service_upgrade.from = service_upgrade[0];
711 			rx->service_upgrade.to = service_upgrade[1];
712 			goto success;
713 
714 		case RXRPC_MANAGE_RESPONSE:
715 			ret = -EINVAL;
716 			if (optlen != sizeof(unsigned int))
717 				goto error;
718 			ret = -EISCONN;
719 			if (rx->sk.sk_state != RXRPC_UNBOUND)
720 				goto error;
721 			ret = copy_safe_from_sockptr(&val, sizeof(val),
722 						     optval, optlen);
723 			if (ret)
724 				goto error;
725 			ret = -EINVAL;
726 			if (val > 1)
727 				goto error;
728 			if (val)
729 				set_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags);
730 			else
731 				clear_bit(RXRPC_SOCK_MANAGE_RESPONSE, &rx->flags);
732 			goto success;
733 
734 		default:
735 			break;
736 		}
737 	}
738 
739 success:
740 	ret = 0;
741 error:
742 	release_sock(&rx->sk);
743 	return ret;
744 }
745 
746 /*
747  * Get socket options.
748  */
749 static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
750 			    char __user *optval, int __user *_optlen)
751 {
752 	int optlen;
753 
754 	if (level != SOL_RXRPC)
755 		return -EOPNOTSUPP;
756 
757 	if (get_user(optlen, _optlen))
758 		return -EFAULT;
759 
760 	switch (optname) {
761 	case RXRPC_SUPPORTED_CMSG:
762 		if (optlen < sizeof(int))
763 			return -ETOOSMALL;
764 		if (put_user(RXRPC__SUPPORTED - 1, (int __user *)optval) ||
765 		    put_user(sizeof(int), _optlen))
766 			return -EFAULT;
767 		return 0;
768 
769 	default:
770 		return -EOPNOTSUPP;
771 	}
772 }
773 
774 /*
775  * permit an RxRPC socket to be polled
776  */
777 static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
778 			       poll_table *wait)
779 {
780 	struct sock *sk = sock->sk;
781 	struct rxrpc_sock *rx = rxrpc_sk(sk);
782 	__poll_t mask;
783 
784 	sock_poll_wait(file, sock, wait);
785 	mask = 0;
786 
787 	/* the socket is readable if there are any messages waiting on the Rx
788 	 * queue */
789 	if (!list_empty(&rx->recvmsg_q))
790 		mask |= EPOLLIN | EPOLLRDNORM;
791 
792 	/* the socket is writable if there is space to add new data to the
793 	 * socket; there is no guarantee that any particular call in progress
794 	 * on the socket may have space in the Tx ACK window */
795 	if (rxrpc_writable(sk))
796 		mask |= EPOLLOUT | EPOLLWRNORM;
797 
798 	return mask;
799 }
800 
801 /*
802  * create an RxRPC socket
803  */
804 static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
805 			int kern)
806 {
807 	struct rxrpc_net *rxnet;
808 	struct rxrpc_sock *rx;
809 	struct sock *sk;
810 
811 	_enter("%p,%d", sock, protocol);
812 
813 	/* we support transport protocol UDP/UDP6 only */
814 	if (protocol != PF_INET &&
815 	    IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6)
816 		return -EPROTONOSUPPORT;
817 
818 	if (sock->type != SOCK_DGRAM)
819 		return -ESOCKTNOSUPPORT;
820 
821 	sock->ops = &rxrpc_rpc_ops;
822 	sock->state = SS_UNCONNECTED;
823 
824 	sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern);
825 	if (!sk)
826 		return -ENOMEM;
827 
828 	sock_init_data(sock, sk);
829 	sock_set_flag(sk, SOCK_RCU_FREE);
830 	sk->sk_state		= RXRPC_UNBOUND;
831 	sk->sk_write_space	= rxrpc_write_space;
832 	sk->sk_max_ack_backlog	= 0;
833 	sk->sk_destruct		= rxrpc_sock_destructor;
834 
835 	rx = rxrpc_sk(sk);
836 	rx->family = protocol;
837 	rx->calls = RB_ROOT;
838 
839 	spin_lock_init(&rx->incoming_lock);
840 	skb_queue_head_init(&rx->recvmsg_oobq);
841 	rx->pending_oobq = RB_ROOT;
842 	INIT_LIST_HEAD(&rx->sock_calls);
843 	INIT_LIST_HEAD(&rx->to_be_accepted);
844 	INIT_LIST_HEAD(&rx->recvmsg_q);
845 	spin_lock_init(&rx->recvmsg_lock);
846 	rwlock_init(&rx->call_lock);
847 	memset(&rx->srx, 0, sizeof(rx->srx));
848 
849 	rxnet = rxrpc_net(sock_net(&rx->sk));
850 	timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1);
851 
852 	_leave(" = 0 [%p]", rx);
853 	return 0;
854 }
855 
856 /*
857  * Kill all the calls on a socket and shut it down.
858  */
859 static int rxrpc_shutdown(struct socket *sock, int flags)
860 {
861 	struct sock *sk = sock->sk;
862 	struct rxrpc_sock *rx = rxrpc_sk(sk);
863 	int ret = 0;
864 
865 	_enter("%p,%d", sk, flags);
866 
867 	if (flags != SHUT_RDWR)
868 		return -EOPNOTSUPP;
869 	if (sk->sk_state == RXRPC_CLOSE)
870 		return -ESHUTDOWN;
871 
872 	lock_sock(sk);
873 
874 	if (sk->sk_state < RXRPC_CLOSE) {
875 		spin_lock_irq(&rx->recvmsg_lock);
876 		sk->sk_state = RXRPC_CLOSE;
877 		sk->sk_shutdown = SHUTDOWN_MASK;
878 		spin_unlock_irq(&rx->recvmsg_lock);
879 	} else {
880 		ret = -ESHUTDOWN;
881 	}
882 
883 	rxrpc_discard_prealloc(rx);
884 
885 	release_sock(sk);
886 	return ret;
887 }
888 
889 /*
890  * Purge the out-of-band queue.
891  */
892 static void rxrpc_purge_oob_queue(struct sock *sk)
893 {
894 	struct rxrpc_sock *rx = rxrpc_sk(sk);
895 	struct sk_buff *skb;
896 
897 	while ((skb = skb_dequeue(&rx->recvmsg_oobq)))
898 		rxrpc_kernel_free_oob(skb);
899 	while (!RB_EMPTY_ROOT(&rx->pending_oobq)) {
900 		skb = rb_entry(rx->pending_oobq.rb_node, struct sk_buff, rbnode);
901 		rb_erase(&skb->rbnode, &rx->pending_oobq);
902 		rxrpc_kernel_free_oob(skb);
903 	}
904 }
905 
906 /*
907  * RxRPC socket destructor
908  */
909 static void rxrpc_sock_destructor(struct sock *sk)
910 {
911 	_enter("%p", sk);
912 
913 	rxrpc_purge_oob_queue(sk);
914 	rxrpc_purge_queue(&sk->sk_receive_queue);
915 
916 	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
917 	WARN_ON(!sk_unhashed(sk));
918 	WARN_ON(sk->sk_socket);
919 
920 	if (!sock_flag(sk, SOCK_DEAD)) {
921 		printk("Attempt to release alive rxrpc socket: %p\n", sk);
922 		return;
923 	}
924 }
925 
926 /*
927  * release an RxRPC socket
928  */
929 static int rxrpc_release_sock(struct sock *sk)
930 {
931 	struct rxrpc_sock *rx = rxrpc_sk(sk);
932 
933 	_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
934 
935 	/* declare the socket closed for business */
936 	sock_orphan(sk);
937 	sk->sk_shutdown = SHUTDOWN_MASK;
938 
939 	/* We want to kill off all connections from a service socket
940 	 * as fast as possible because we can't share these; client
941 	 * sockets, on the other hand, can share an endpoint.
942 	 */
943 	switch (sk->sk_state) {
944 	case RXRPC_SERVER_BOUND:
945 	case RXRPC_SERVER_BOUND2:
946 	case RXRPC_SERVER_LISTENING:
947 	case RXRPC_SERVER_LISTEN_DISABLED:
948 		rx->local->service_closed = true;
949 		break;
950 	}
951 
952 	spin_lock_irq(&rx->recvmsg_lock);
953 	sk->sk_state = RXRPC_CLOSE;
954 	spin_unlock_irq(&rx->recvmsg_lock);
955 
956 	if (rx->local && rx->local->service == rx) {
957 		write_lock(&rx->local->services_lock);
958 		rx->local->service = NULL;
959 		write_unlock(&rx->local->services_lock);
960 	}
961 
962 	/* try to flush out this socket */
963 	rxrpc_discard_prealloc(rx);
964 	rxrpc_release_calls_on_socket(rx);
965 	flush_workqueue(rxrpc_workqueue);
966 	rxrpc_purge_oob_queue(sk);
967 	rxrpc_purge_queue(&sk->sk_receive_queue);
968 
969 	rxrpc_unuse_local(rx->local, rxrpc_local_unuse_release_sock);
970 	rxrpc_put_local(rx->local, rxrpc_local_put_release_sock);
971 	rx->local = NULL;
972 	key_put(rx->key);
973 	rx->key = NULL;
974 	key_put(rx->securities);
975 	rx->securities = NULL;
976 	sock_put(sk);
977 
978 	_leave(" = 0");
979 	return 0;
980 }
981 
982 /*
983  * release an RxRPC BSD socket on close() or equivalent
984  */
985 static int rxrpc_release(struct socket *sock)
986 {
987 	struct sock *sk = sock->sk;
988 
989 	_enter("%p{%p}", sock, sk);
990 
991 	if (!sk)
992 		return 0;
993 
994 	sock->sk = NULL;
995 
996 	return rxrpc_release_sock(sk);
997 }
998 
999 /*
1000  * RxRPC network protocol
1001  */
1002 static const struct proto_ops rxrpc_rpc_ops = {
1003 	.family		= PF_RXRPC,
1004 	.owner		= THIS_MODULE,
1005 	.release	= rxrpc_release,
1006 	.bind		= rxrpc_bind,
1007 	.connect	= rxrpc_connect,
1008 	.socketpair	= sock_no_socketpair,
1009 	.accept		= sock_no_accept,
1010 	.getname	= sock_no_getname,
1011 	.poll		= rxrpc_poll,
1012 	.ioctl		= sock_no_ioctl,
1013 	.listen		= rxrpc_listen,
1014 	.shutdown	= rxrpc_shutdown,
1015 	.setsockopt	= rxrpc_setsockopt,
1016 	.getsockopt	= rxrpc_getsockopt,
1017 	.sendmsg	= rxrpc_sendmsg,
1018 	.recvmsg	= rxrpc_recvmsg,
1019 	.mmap		= sock_no_mmap,
1020 };
1021 
1022 static struct proto rxrpc_proto = {
1023 	.name		= "RXRPC",
1024 	.owner		= THIS_MODULE,
1025 	.obj_size	= sizeof(struct rxrpc_sock),
1026 	.max_header	= sizeof(struct rxrpc_wire_header),
1027 };
1028 
1029 static const struct net_proto_family rxrpc_family_ops = {
1030 	.family	= PF_RXRPC,
1031 	.create = rxrpc_create,
1032 	.owner	= THIS_MODULE,
1033 };
1034 
1035 /*
1036  * initialise and register the RxRPC protocol
1037  */
1038 static int __init af_rxrpc_init(void)
1039 {
1040 	int ret = -1;
1041 
1042 	BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
1043 
1044 	ret = -ENOMEM;
1045 	rxrpc_gen_version_string();
1046 	rxrpc_call_jar = kmem_cache_create(
1047 		"rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
1048 		SLAB_HWCACHE_ALIGN, NULL);
1049 	if (!rxrpc_call_jar) {
1050 		pr_notice("Failed to allocate call jar\n");
1051 		goto error_call_jar;
1052 	}
1053 
1054 	rxrpc_workqueue = alloc_ordered_workqueue("krxrpcd", WQ_HIGHPRI | WQ_MEM_RECLAIM);
1055 	if (!rxrpc_workqueue) {
1056 		pr_notice("Failed to allocate work queue\n");
1057 		goto error_work_queue;
1058 	}
1059 
1060 	ret = rxrpc_init_security();
1061 	if (ret < 0) {
1062 		pr_crit("Cannot initialise security\n");
1063 		goto error_security;
1064 	}
1065 
1066 	ret = register_pernet_device(&rxrpc_net_ops);
1067 	if (ret)
1068 		goto error_pernet;
1069 
1070 	ret = proto_register(&rxrpc_proto, 1);
1071 	if (ret < 0) {
1072 		pr_crit("Cannot register protocol\n");
1073 		goto error_proto;
1074 	}
1075 
1076 	ret = sock_register(&rxrpc_family_ops);
1077 	if (ret < 0) {
1078 		pr_crit("Cannot register socket family\n");
1079 		goto error_sock;
1080 	}
1081 
1082 	ret = register_key_type(&key_type_rxrpc);
1083 	if (ret < 0) {
1084 		pr_crit("Cannot register client key type\n");
1085 		goto error_key_type;
1086 	}
1087 
1088 	ret = register_key_type(&key_type_rxrpc_s);
1089 	if (ret < 0) {
1090 		pr_crit("Cannot register server key type\n");
1091 		goto error_key_type_s;
1092 	}
1093 
1094 	ret = rxrpc_sysctl_init();
1095 	if (ret < 0) {
1096 		pr_crit("Cannot register sysctls\n");
1097 		goto error_sysctls;
1098 	}
1099 
1100 	return 0;
1101 
1102 error_sysctls:
1103 	unregister_key_type(&key_type_rxrpc_s);
1104 error_key_type_s:
1105 	unregister_key_type(&key_type_rxrpc);
1106 error_key_type:
1107 	sock_unregister(PF_RXRPC);
1108 error_sock:
1109 	proto_unregister(&rxrpc_proto);
1110 error_proto:
1111 	unregister_pernet_device(&rxrpc_net_ops);
1112 error_pernet:
1113 	rxrpc_exit_security();
1114 error_security:
1115 	destroy_workqueue(rxrpc_workqueue);
1116 error_work_queue:
1117 	kmem_cache_destroy(rxrpc_call_jar);
1118 error_call_jar:
1119 	return ret;
1120 }
1121 
1122 /*
1123  * unregister the RxRPC protocol
1124  */
1125 static void __exit af_rxrpc_exit(void)
1126 {
1127 	_enter("");
1128 	rxrpc_sysctl_exit();
1129 	unregister_key_type(&key_type_rxrpc_s);
1130 	unregister_key_type(&key_type_rxrpc);
1131 	sock_unregister(PF_RXRPC);
1132 	proto_unregister(&rxrpc_proto);
1133 	unregister_pernet_device(&rxrpc_net_ops);
1134 	ASSERTCMP(atomic_read(&rxrpc_n_rx_skbs), ==, 0);
1135 
1136 	/* Make sure the local and peer records pinned by any dying connections
1137 	 * are released.
1138 	 */
1139 	rcu_barrier();
1140 
1141 	destroy_workqueue(rxrpc_workqueue);
1142 	rxrpc_exit_security();
1143 	kmem_cache_destroy(rxrpc_call_jar);
1144 	_leave("");
1145 }
1146 
1147 module_init(af_rxrpc_init);
1148 module_exit(af_rxrpc_exit);
1149