1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/ioctls.h>
3 #include <linux/io_uring/net.h>
4 #include <linux/errqueue.h>
5 #include <net/sock.h>
6
7 #include "uring_cmd.h"
8 #include "io_uring.h"
9
io_uring_cmd_get_sock_ioctl(struct socket * sock,int op)10 static int io_uring_cmd_get_sock_ioctl(struct socket *sock, int op)
11 {
12 struct sock *sk = sock->sk;
13 struct proto *prot = READ_ONCE(sk->sk_prot);
14 int ret, arg = 0;
15
16 if (!prot || !prot->ioctl)
17 return -EOPNOTSUPP;
18
19 ret = prot->ioctl(sk, op, &arg);
20 if (ret)
21 return ret;
22 return arg;
23 }
24
io_uring_cmd_getsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)25 static inline int io_uring_cmd_getsockopt(struct socket *sock,
26 struct io_uring_cmd *cmd,
27 unsigned int issue_flags)
28 {
29 const struct io_uring_sqe *sqe = cmd->sqe;
30 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
31 int optlen, optname, level, err;
32 void __user *optval;
33
34 level = READ_ONCE(sqe->level);
35 if (level != SOL_SOCKET)
36 return -EOPNOTSUPP;
37
38 optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
39 optname = READ_ONCE(sqe->optname);
40 optlen = READ_ONCE(sqe->optlen);
41
42 err = do_sock_getsockopt(sock, compat, level, optname,
43 USER_SOCKPTR(optval),
44 KERNEL_SOCKPTR(&optlen));
45 if (err)
46 return err;
47
48 /* On success, return optlen */
49 return optlen;
50 }
51
io_uring_cmd_setsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)52 static inline int io_uring_cmd_setsockopt(struct socket *sock,
53 struct io_uring_cmd *cmd,
54 unsigned int issue_flags)
55 {
56 const struct io_uring_sqe *sqe = cmd->sqe;
57 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
58 int optname, optlen, level;
59 void __user *optval;
60 sockptr_t optval_s;
61
62 optval = u64_to_user_ptr(READ_ONCE(sqe->optval));
63 optname = READ_ONCE(sqe->optname);
64 optlen = READ_ONCE(sqe->optlen);
65 level = READ_ONCE(sqe->level);
66 optval_s = USER_SOCKPTR(optval);
67
68 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
69 optlen);
70 }
71
io_process_timestamp_skb(struct io_uring_cmd * cmd,struct sock * sk,struct sk_buff * skb,unsigned issue_flags)72 static bool io_process_timestamp_skb(struct io_uring_cmd *cmd, struct sock *sk,
73 struct sk_buff *skb, unsigned issue_flags)
74 {
75 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
76 struct io_uring_cqe cqe[2];
77 struct io_timespec *iots;
78 struct timespec64 ts;
79 u32 tstype, tskey;
80 int ret;
81
82 BUILD_BUG_ON(sizeof(struct io_uring_cqe) != sizeof(struct io_timespec));
83
84 ret = skb_get_tx_timestamp(skb, sk, &ts);
85 if (ret < 0)
86 return false;
87
88 tskey = serr->ee.ee_data;
89 tstype = serr->ee.ee_info;
90
91 cqe->user_data = 0;
92 cqe->res = tskey;
93 cqe->flags = IORING_CQE_F_MORE | ctx_cqe32_flags(cmd_to_io_kiocb(cmd)->ctx);
94 cqe->flags |= tstype << IORING_TIMESTAMP_TYPE_SHIFT;
95 if (ret == SOF_TIMESTAMPING_TX_HARDWARE)
96 cqe->flags |= IORING_CQE_F_TSTAMP_HW;
97
98 iots = (struct io_timespec *)&cqe[1];
99 iots->tv_sec = ts.tv_sec;
100 iots->tv_nsec = ts.tv_nsec;
101 return io_uring_cmd_post_mshot_cqe32(cmd, issue_flags, cqe);
102 }
103
io_uring_cmd_timestamp(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)104 static int io_uring_cmd_timestamp(struct socket *sock,
105 struct io_uring_cmd *cmd,
106 unsigned int issue_flags)
107 {
108 struct sock *sk = sock->sk;
109 struct sk_buff_head *q = &sk->sk_error_queue;
110 struct sk_buff *skb, *tmp;
111 struct sk_buff_head list;
112 int ret;
113
114 if (!(issue_flags & IO_URING_F_CQE32))
115 return -EINVAL;
116 ret = io_cmd_poll_multishot(cmd, issue_flags, EPOLLERR);
117 if (unlikely(ret))
118 return ret;
119
120 if (skb_queue_empty_lockless(q))
121 return -EAGAIN;
122 __skb_queue_head_init(&list);
123
124 scoped_guard(spinlock_irq, &q->lock) {
125 skb_queue_walk_safe(q, skb, tmp) {
126 /* don't support skbs with payload */
127 if (!skb_has_tx_timestamp(skb, sk) || skb->len)
128 continue;
129 __skb_unlink(skb, q);
130 __skb_queue_tail(&list, skb);
131 }
132 }
133
134 while (1) {
135 skb = skb_peek(&list);
136 if (!skb)
137 break;
138 if (!io_process_timestamp_skb(cmd, sk, skb, issue_flags))
139 break;
140 __skb_dequeue(&list);
141 consume_skb(skb);
142 }
143
144 if (!unlikely(skb_queue_empty(&list))) {
145 scoped_guard(spinlock_irqsave, &q->lock)
146 skb_queue_splice(&list, q);
147 }
148 return -EAGAIN;
149 }
150
io_uring_cmd_getsockname(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)151 static int io_uring_cmd_getsockname(struct socket *sock,
152 struct io_uring_cmd *cmd,
153 unsigned int issue_flags)
154 {
155 const struct io_uring_sqe *sqe = cmd->sqe;
156 struct sockaddr __user *uaddr;
157 unsigned int peer;
158 int __user *ulen;
159
160 if (sqe->ioprio || sqe->__pad1 || sqe->len || sqe->rw_flags)
161 return -EINVAL;
162
163 uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
164 ulen = u64_to_user_ptr(READ_ONCE(sqe->addr3));
165 peer = READ_ONCE(sqe->optlen);
166 if (peer > 1)
167 return -EINVAL;
168 return do_getsockname(sock, peer, uaddr, ulen);
169 }
170
io_uring_cmd_sock(struct io_uring_cmd * cmd,unsigned int issue_flags)171 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
172 {
173 struct socket *sock = cmd->file->private_data;
174
175 switch (cmd->cmd_op) {
176 case SOCKET_URING_OP_SIOCINQ:
177 return io_uring_cmd_get_sock_ioctl(sock, SIOCINQ);
178 case SOCKET_URING_OP_SIOCOUTQ:
179 return io_uring_cmd_get_sock_ioctl(sock, SIOCOUTQ);
180 case SOCKET_URING_OP_GETSOCKOPT:
181 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
182 case SOCKET_URING_OP_SETSOCKOPT:
183 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
184 case SOCKET_URING_OP_TX_TIMESTAMP:
185 return io_uring_cmd_timestamp(sock, cmd, issue_flags);
186 case SOCKET_URING_OP_GETSOCKNAME:
187 return io_uring_cmd_getsockname(sock, cmd, issue_flags);
188 default:
189 return -EOPNOTSUPP;
190 }
191 }
192 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
193