xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /*
2  * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #ifndef __MLX5E_EN_ACCEL_H__
35 #define __MLX5E_EN_ACCEL_H__
36 
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include "en_accel/ipsec_rxtx.h"
40 #include "en_accel/ktls.h"
41 #include "en_accel/ktls_txrx.h"
42 #include <en_accel/macsec.h>
43 #include "en.h"
44 #include "en/txrx.h"
45 #include "en_accel/psp.h"
46 #include "en_accel/psp_rxtx.h"
47 
48 #if IS_ENABLED(CONFIG_GENEVE)
49 #include <net/geneve.h>
50 
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)51 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
52 {
53 	return mlx5_tx_swp_supported(mdev);
54 }
55 
56 static inline void
mlx5e_tx_tunnel_accel(struct sk_buff * skb,struct mlx5_wqe_eth_seg * eseg,u16 ihs)57 mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
58 {
59 	struct mlx5e_swp_spec swp_spec = {};
60 	unsigned int offset = 0;
61 	__be16 l3_proto;
62 	u8 l4_proto;
63 
64 	l3_proto = vlan_get_protocol(skb);
65 	switch (l3_proto) {
66 	case htons(ETH_P_IP):
67 		l4_proto = ip_hdr(skb)->protocol;
68 		break;
69 	case htons(ETH_P_IPV6):
70 		l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
71 		break;
72 	default:
73 		return;
74 	}
75 
76 	if (l4_proto != IPPROTO_UDP ||
77 	    udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
78 		return;
79 	swp_spec.l3_proto = l3_proto;
80 	swp_spec.l4_proto = l4_proto;
81 	swp_spec.is_tun = true;
82 	if (inner_ip_hdr(skb)->version == 6) {
83 		swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
84 		swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
85 	} else {
86 		swp_spec.tun_l3_proto = htons(ETH_P_IP);
87 		swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
88 	}
89 
90 	mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
91 	if (skb_vlan_tag_present(skb) && ihs)
92 		mlx5e_eseg_swp_offsets_add_vlan(eseg);
93 }
94 
95 #else
mlx5_geneve_tx_allowed(struct mlx5_core_dev * mdev)96 static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
97 {
98 	return false;
99 }
100 
101 #endif /* CONFIG_GENEVE */
102 
103 struct mlx5e_accel_tx_state {
104 #ifdef CONFIG_MLX5_EN_TLS
105 	struct mlx5e_accel_tx_tls_state tls;
106 #endif
107 #ifdef CONFIG_MLX5_EN_IPSEC
108 	struct mlx5e_accel_tx_ipsec_state ipsec;
109 #endif
110 #ifdef CONFIG_MLX5_EN_PSP
111 	struct mlx5e_accel_tx_psp_state psp_st;
112 #endif
113 };
114 
mlx5e_accel_tx_begin(struct net_device * dev,struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * state)115 static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
116 					struct mlx5e_txqsq *sq,
117 					struct sk_buff *skb,
118 					struct mlx5e_accel_tx_state *state)
119 {
120 #ifdef CONFIG_MLX5_EN_TLS
121 	/* May send WQEs. */
122 	if (tls_is_skb_tx_device_offloaded(skb))
123 		if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
124 						       &state->tls)))
125 			return false;
126 #endif
127 
128 #ifdef CONFIG_MLX5_EN_PSP
129 	if (mlx5e_psp_is_offload(skb, dev)) {
130 		if (unlikely(!mlx5e_psp_handle_tx_skb(dev, skb, &state->psp_st)))
131 			return false;
132 	}
133 #endif
134 
135 #ifdef CONFIG_MLX5_EN_IPSEC
136 	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
137 		if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
138 			return false;
139 	}
140 #endif
141 
142 #ifdef CONFIG_MLX5_MACSEC
143 	if (unlikely(mlx5e_macsec_skb_is_offload(skb))) {
144 		struct mlx5e_priv *priv = netdev_priv(dev);
145 
146 		if (unlikely(!mlx5e_macsec_handle_tx_skb(priv->macsec, skb)))
147 			return false;
148 	}
149 #endif
150 
151 	return true;
152 }
153 
mlx5e_accel_tx_ids_len(struct mlx5e_txqsq * sq,struct sk_buff * skb,struct mlx5e_accel_tx_state * state)154 static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
155 						  struct sk_buff *skb,
156 						  struct mlx5e_accel_tx_state *state)
157 {
158 #ifdef CONFIG_MLX5_EN_PSP
159 	if (mlx5e_psp_is_offload_state(&state->psp_st))
160 		return mlx5e_psp_tx_ids_len(&state->psp_st);
161 #endif
162 
163 #ifdef CONFIG_MLX5_EN_IPSEC
164 	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
165 		return mlx5e_ipsec_tx_ids_len(&state->ipsec);
166 #endif
167 
168 	return 0;
169 }
170 
171 /* Part of the eseg touched by TX offloads */
172 #define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
173 
mlx5e_accel_tx_eseg(struct mlx5e_priv * priv,struct sk_buff * skb,struct mlx5e_accel_tx_state * accel,struct mlx5_wqe_eth_seg * eseg,u16 ihs)174 static inline void mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
175 				       struct sk_buff *skb,
176 				       struct mlx5e_accel_tx_state *accel,
177 				       struct mlx5_wqe_eth_seg *eseg, u16 ihs)
178 {
179 #ifdef CONFIG_MLX5_EN_PSP
180 	if (mlx5e_psp_is_offload_state(&accel->psp_st))
181 		mlx5e_psp_tx_build_eseg(priv, skb, &accel->psp_st, eseg);
182 #endif
183 
184 #ifdef CONFIG_MLX5_EN_IPSEC
185 	if (xfrm_offload(skb))
186 		mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
187 #endif
188 
189 #ifdef CONFIG_MLX5_MACSEC
190 	if (unlikely(mlx5e_macsec_skb_is_offload(skb)))
191 		mlx5e_macsec_tx_build_eseg(priv->macsec, skb, eseg);
192 #endif
193 
194 #if IS_ENABLED(CONFIG_GENEVE)
195 	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
196 		mlx5e_tx_tunnel_accel(skb, eseg, ihs);
197 #endif
198 }
199 
mlx5e_accel_tx_finish(struct mlx5e_txqsq * sq,struct mlx5e_tx_wqe * wqe,struct mlx5e_accel_tx_state * state,struct mlx5_wqe_inline_seg * inlseg)200 static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
201 					 struct mlx5e_tx_wqe *wqe,
202 					 struct mlx5e_accel_tx_state *state,
203 					 struct mlx5_wqe_inline_seg *inlseg)
204 {
205 #ifdef CONFIG_MLX5_EN_TLS
206 	mlx5e_ktls_handle_tx_wqe(&wqe->ctrl, &state->tls);
207 #endif
208 
209 #ifdef CONFIG_MLX5_EN_PSP
210 	if (mlx5e_psp_is_offload_state(&state->psp_st))
211 		mlx5e_psp_handle_tx_wqe(wqe, &state->psp_st, inlseg);
212 #endif
213 
214 #ifdef CONFIG_MLX5_EN_IPSEC
215 	if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
216 	    state->ipsec.xo && state->ipsec.tailen)
217 		mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
218 #endif
219 }
220 
mlx5e_accel_init_rx(struct mlx5e_priv * priv)221 static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
222 {
223 	int err;
224 
225 	err = mlx5_accel_psp_fs_init_rx_tables(priv);
226 	if (err)
227 		goto out;
228 
229 	err = mlx5e_ktls_init_rx(priv);
230 	if (err)
231 		mlx5_accel_psp_fs_cleanup_rx_tables(priv);
232 
233 out:
234 	return err;
235 }
236 
mlx5e_accel_cleanup_rx(struct mlx5e_priv * priv)237 static inline void mlx5e_accel_cleanup_rx(struct mlx5e_priv *priv)
238 {
239 	mlx5e_ktls_cleanup_rx(priv);
240 	mlx5_accel_psp_fs_cleanup_rx_tables(priv);
241 }
242 
mlx5e_accel_init_tx(struct mlx5e_priv * priv)243 static inline int mlx5e_accel_init_tx(struct mlx5e_priv *priv)
244 {
245 	int err;
246 
247 	err = mlx5_accel_psp_fs_init_tx_tables(priv);
248 	if (err)
249 		return err;
250 
251 	return mlx5e_ktls_init_tx(priv);
252 }
253 
mlx5e_accel_cleanup_tx(struct mlx5e_priv * priv)254 static inline void mlx5e_accel_cleanup_tx(struct mlx5e_priv *priv)
255 {
256 	mlx5e_ktls_cleanup_tx(priv);
257 	mlx5_accel_psp_fs_cleanup_tx_tables(priv);
258 }
259 #endif /* __MLX5E_EN_ACCEL_H__ */
260