1 /*
2  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/netdevice.h>
35 #include <net/ipv6.h>
36 #include "en_accel/tls.h"
37 #include "accel/tls.h"
38 
mlx5e_tls_set_ipv4_flow(void * flow,struct sock * sk)39 static void mlx5e_tls_set_ipv4_flow(void *flow, struct sock *sk)
40 {
41 	struct inet_sock *inet = inet_sk(sk);
42 
43 	MLX5_SET(tls_flow, flow, ipv6, 0);
44 	memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
45 	       &inet->inet_daddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
46 	memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv4_layout.ipv4),
47 	       &inet->inet_rcv_saddr, MLX5_FLD_SZ_BYTES(ipv4_layout, ipv4));
48 }
49 
50 #if IS_ENABLED(CONFIG_IPV6)
mlx5e_tls_set_ipv6_flow(void * flow,struct sock * sk)51 static void mlx5e_tls_set_ipv6_flow(void *flow, struct sock *sk)
52 {
53 	struct ipv6_pinfo *np = inet6_sk(sk);
54 
55 	MLX5_SET(tls_flow, flow, ipv6, 1);
56 	memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
57 	       &sk->sk_v6_daddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
58 	memcpy(MLX5_ADDR_OF(tls_flow, flow, src_ipv4_src_ipv6.ipv6_layout.ipv6),
59 	       &np->saddr, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
60 }
61 #endif
62 
mlx5e_tls_set_flow_tcp_ports(void * flow,struct sock * sk)63 static void mlx5e_tls_set_flow_tcp_ports(void *flow, struct sock *sk)
64 {
65 	struct inet_sock *inet = inet_sk(sk);
66 
67 	memcpy(MLX5_ADDR_OF(tls_flow, flow, src_port), &inet->inet_sport,
68 	       MLX5_FLD_SZ_BYTES(tls_flow, src_port));
69 	memcpy(MLX5_ADDR_OF(tls_flow, flow, dst_port), &inet->inet_dport,
70 	       MLX5_FLD_SZ_BYTES(tls_flow, dst_port));
71 }
72 
mlx5e_tls_set_flow(void * flow,struct sock * sk,u32 caps)73 static int mlx5e_tls_set_flow(void *flow, struct sock *sk, u32 caps)
74 {
75 	switch (sk->sk_family) {
76 	case AF_INET:
77 		mlx5e_tls_set_ipv4_flow(flow, sk);
78 		break;
79 #if IS_ENABLED(CONFIG_IPV6)
80 	case AF_INET6:
81 		if (!sk->sk_ipv6only &&
82 		    ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
83 			mlx5e_tls_set_ipv4_flow(flow, sk);
84 			break;
85 		}
86 		if (!(caps & MLX5_ACCEL_TLS_IPV6))
87 			goto error_out;
88 
89 		mlx5e_tls_set_ipv6_flow(flow, sk);
90 		break;
91 #endif
92 	default:
93 		goto error_out;
94 	}
95 
96 	mlx5e_tls_set_flow_tcp_ports(flow, sk);
97 	return 0;
98 error_out:
99 	return -EINVAL;
100 }
101 
mlx5e_tls_add(struct net_device * netdev,struct sock * sk,enum tls_offload_ctx_dir direction,struct tls_crypto_info * crypto_info,u32 start_offload_tcp_sn)102 static int mlx5e_tls_add(struct net_device *netdev, struct sock *sk,
103 			 enum tls_offload_ctx_dir direction,
104 			 struct tls_crypto_info *crypto_info,
105 			 u32 start_offload_tcp_sn)
106 {
107 	struct mlx5e_priv *priv = netdev_priv(netdev);
108 	struct tls_context *tls_ctx = tls_get_ctx(sk);
109 	struct mlx5_core_dev *mdev = priv->mdev;
110 	u32 caps = mlx5_accel_tls_device_caps(mdev);
111 	int ret = -ENOMEM;
112 	void *flow;
113 	u32 swid;
114 
115 	flow = kzalloc(MLX5_ST_SZ_BYTES(tls_flow), GFP_KERNEL);
116 	if (!flow)
117 		return ret;
118 
119 	ret = mlx5e_tls_set_flow(flow, sk, caps);
120 	if (ret)
121 		goto free_flow;
122 
123 	ret = mlx5_accel_tls_add_flow(mdev, flow, crypto_info,
124 				      start_offload_tcp_sn, &swid,
125 				      direction == TLS_OFFLOAD_CTX_DIR_TX);
126 	if (ret < 0)
127 		goto free_flow;
128 
129 	if (direction == TLS_OFFLOAD_CTX_DIR_TX) {
130 		struct mlx5e_tls_offload_context_tx *tx_ctx =
131 		    mlx5e_get_tls_tx_context(tls_ctx);
132 
133 		tx_ctx->swid = htonl(swid);
134 		tx_ctx->expected_seq = start_offload_tcp_sn;
135 	} else {
136 		struct mlx5e_tls_offload_context_rx *rx_ctx =
137 		    mlx5e_get_tls_rx_context(tls_ctx);
138 
139 		rx_ctx->handle = htonl(swid);
140 	}
141 
142 	return 0;
143 free_flow:
144 	kfree(flow);
145 	return ret;
146 }
147 
mlx5e_tls_del(struct net_device * netdev,struct tls_context * tls_ctx,enum tls_offload_ctx_dir direction)148 static void mlx5e_tls_del(struct net_device *netdev,
149 			  struct tls_context *tls_ctx,
150 			  enum tls_offload_ctx_dir direction)
151 {
152 	struct mlx5e_priv *priv = netdev_priv(netdev);
153 	unsigned int handle;
154 
155 	handle = ntohl((direction == TLS_OFFLOAD_CTX_DIR_TX) ?
156 		       mlx5e_get_tls_tx_context(tls_ctx)->swid :
157 		       mlx5e_get_tls_rx_context(tls_ctx)->handle);
158 
159 	mlx5_accel_tls_del_flow(priv->mdev, handle,
160 				direction == TLS_OFFLOAD_CTX_DIR_TX);
161 }
162 
mlx5e_tls_resync(struct net_device * netdev,struct sock * sk,u32 seq,u8 * rcd_sn_data,enum tls_offload_ctx_dir direction)163 static int mlx5e_tls_resync(struct net_device *netdev, struct sock *sk,
164 			    u32 seq, u8 *rcd_sn_data,
165 			    enum tls_offload_ctx_dir direction)
166 {
167 	struct tls_context *tls_ctx = tls_get_ctx(sk);
168 	struct mlx5e_priv *priv = netdev_priv(netdev);
169 	struct mlx5e_tls_offload_context_rx *rx_ctx;
170 	__be64 rcd_sn = *(__be64 *)rcd_sn_data;
171 
172 	if (WARN_ON_ONCE(direction != TLS_OFFLOAD_CTX_DIR_RX))
173 		return -EINVAL;
174 	rx_ctx = mlx5e_get_tls_rx_context(tls_ctx);
175 
176 	netdev_info(netdev, "resyncing seq %d rcd %lld\n", seq,
177 		    be64_to_cpu(rcd_sn));
178 	mlx5_accel_tls_resync_rx(priv->mdev, rx_ctx->handle, seq, rcd_sn);
179 	atomic64_inc(&priv->tls->sw_stats.rx_tls_resync_reply);
180 
181 	return 0;
182 }
183 
184 static const struct tlsdev_ops mlx5e_tls_ops = {
185 	.tls_dev_add = mlx5e_tls_add,
186 	.tls_dev_del = mlx5e_tls_del,
187 	.tls_dev_resync = mlx5e_tls_resync,
188 };
189 
mlx5e_tls_build_netdev(struct mlx5e_priv * priv)190 void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
191 {
192 	struct net_device *netdev = priv->netdev;
193 	u32 caps;
194 
195 	if (mlx5_accel_is_ktls_device(priv->mdev)) {
196 		mlx5e_ktls_build_netdev(priv);
197 		return;
198 	}
199 
200 	/* FPGA */
201 	if (!mlx5_accel_is_tls_device(priv->mdev))
202 		return;
203 
204 	caps = mlx5_accel_tls_device_caps(priv->mdev);
205 	if (caps & MLX5_ACCEL_TLS_TX) {
206 		netdev->features          |= NETIF_F_HW_TLS_TX;
207 		netdev->hw_features       |= NETIF_F_HW_TLS_TX;
208 	}
209 
210 	if (caps & MLX5_ACCEL_TLS_RX) {
211 		netdev->features          |= NETIF_F_HW_TLS_RX;
212 		netdev->hw_features       |= NETIF_F_HW_TLS_RX;
213 	}
214 
215 	if (!(caps & MLX5_ACCEL_TLS_LRO)) {
216 		netdev->features          &= ~NETIF_F_LRO;
217 		netdev->hw_features       &= ~NETIF_F_LRO;
218 	}
219 
220 	netdev->tlsdev_ops = &mlx5e_tls_ops;
221 }
222 
mlx5e_tls_init(struct mlx5e_priv * priv)223 int mlx5e_tls_init(struct mlx5e_priv *priv)
224 {
225 	struct mlx5e_tls *tls;
226 
227 	if (!mlx5_accel_is_tls_device(priv->mdev))
228 		return 0;
229 
230 	tls = kzalloc(sizeof(*tls), GFP_KERNEL);
231 	if (!tls)
232 		return -ENOMEM;
233 
234 	tls->rx_wq = create_singlethread_workqueue("mlx5e_tls_rx");
235 	if (!tls->rx_wq) {
236 		kfree(tls);
237 		return -ENOMEM;
238 	}
239 
240 	priv->tls = tls;
241 	return 0;
242 }
243 
mlx5e_tls_cleanup(struct mlx5e_priv * priv)244 void mlx5e_tls_cleanup(struct mlx5e_priv *priv)
245 {
246 	struct mlx5e_tls *tls = priv->tls;
247 
248 	if (!tls)
249 		return;
250 
251 	destroy_workqueue(tls->rx_wq);
252 	kfree(tls);
253 	priv->tls = NULL;
254 }
255