1 /*
2  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 #include <linux/rhashtable.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/fs_helpers.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/rbtree.h>
39 
40 #include "mlx5_core.h"
41 #include "fs_cmd.h"
42 #include "fpga/ipsec.h"
43 #include "fpga/sdk.h"
44 #include "fpga/core.h"
45 
46 enum mlx5_fpga_ipsec_cmd_status {
47 	MLX5_FPGA_IPSEC_CMD_PENDING,
48 	MLX5_FPGA_IPSEC_CMD_SEND_FAIL,
49 	MLX5_FPGA_IPSEC_CMD_COMPLETE,
50 };
51 
52 struct mlx5_fpga_ipsec_cmd_context {
53 	struct mlx5_fpga_dma_buf buf;
54 	enum mlx5_fpga_ipsec_cmd_status status;
55 	struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
56 	int status_code;
57 	struct completion complete;
58 	struct mlx5_fpga_device *dev;
59 	struct list_head list; /* Item in pending_cmds */
60 	u8 command[];
61 };
62 
63 struct mlx5_fpga_esp_xfrm;
64 
65 struct mlx5_fpga_ipsec_sa_ctx {
66 	struct rhash_head		hash;
67 	struct mlx5_ifc_fpga_ipsec_sa	hw_sa;
68 	u32				sa_handle;
69 	struct mlx5_core_dev		*dev;
70 	struct mlx5_fpga_esp_xfrm	*fpga_xfrm;
71 };
72 
73 struct mlx5_fpga_esp_xfrm {
74 	unsigned int			num_rules;
75 	struct mlx5_fpga_ipsec_sa_ctx	*sa_ctx;
76 	struct mutex			lock; /* xfrm lock */
77 	struct mlx5_accel_esp_xfrm	accel_xfrm;
78 };
79 
80 struct mlx5_fpga_ipsec_rule {
81 	struct rb_node			node;
82 	struct fs_fte			*fte;
83 	struct mlx5_fpga_ipsec_sa_ctx	*ctx;
84 };
85 
86 static const struct rhashtable_params rhash_sa = {
87 	/* Keep out "cmd" field from the key as it's
88 	 * value is not constant during the lifetime
89 	 * of the key object.
90 	 */
91 	.key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) -
92 		   sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
93 	.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) +
94 		      sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd),
95 	.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
96 	.automatic_shrinking = true,
97 	.min_size = 1,
98 };
99 
100 struct mlx5_fpga_ipsec {
101 	struct mlx5_fpga_device *fdev;
102 	struct list_head pending_cmds;
103 	spinlock_t pending_cmds_lock; /* Protects pending_cmds */
104 	u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
105 	struct mlx5_fpga_conn *conn;
106 
107 	struct notifier_block	fs_notifier_ingress_bypass;
108 	struct notifier_block	fs_notifier_egress;
109 
110 	/* Map hardware SA           -->  SA context
111 	 *     (mlx5_fpga_ipsec_sa)       (mlx5_fpga_ipsec_sa_ctx)
112 	 * We will use this hash to avoid SAs duplication in fpga which
113 	 * aren't allowed
114 	 */
115 	struct rhashtable sa_hash;	/* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
116 	struct mutex sa_hash_lock;
117 
118 	/* Tree holding all rules for this fpga device
119 	 * Key for searching a rule (mlx5_fpga_ipsec_rule) is (ft, id)
120 	 */
121 	struct rb_root rules_rb;
122 	struct mutex rules_rb_lock; /* rules lock */
123 
124 	struct ida halloc;
125 };
126 
mlx5_fpga_is_ipsec_device(struct mlx5_core_dev * mdev)127 static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
128 {
129 	if (!mdev->fpga || !MLX5_CAP_GEN(mdev, fpga))
130 		return false;
131 
132 	if (MLX5_CAP_FPGA(mdev, ieee_vendor_id) !=
133 	    MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX)
134 		return false;
135 
136 	if (MLX5_CAP_FPGA(mdev, sandbox_product_id) !=
137 	    MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC)
138 		return false;
139 
140 	return true;
141 }
142 
mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn * conn,struct mlx5_fpga_device * fdev,struct mlx5_fpga_dma_buf * buf,u8 status)143 static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
144 					  struct mlx5_fpga_device *fdev,
145 					  struct mlx5_fpga_dma_buf *buf,
146 					  u8 status)
147 {
148 	struct mlx5_fpga_ipsec_cmd_context *context;
149 
150 	if (status) {
151 		context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
152 				       buf);
153 		mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
154 			       status);
155 		context->status = MLX5_FPGA_IPSEC_CMD_SEND_FAIL;
156 		complete(&context->complete);
157 	}
158 }
159 
160 static inline
syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)161 int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
162 {
163 	switch (syndrome) {
164 	case MLX5_FPGA_IPSEC_RESPONSE_SUCCESS:
165 		return 0;
166 	case MLX5_FPGA_IPSEC_RESPONSE_SADB_ISSUE:
167 		return -EEXIST;
168 	case MLX5_FPGA_IPSEC_RESPONSE_ILLEGAL_REQUEST:
169 		return -EINVAL;
170 	case MLX5_FPGA_IPSEC_RESPONSE_WRITE_RESPONSE_ISSUE:
171 		return -EIO;
172 	}
173 	return -EIO;
174 }
175 
mlx5_fpga_ipsec_recv(void * cb_arg,struct mlx5_fpga_dma_buf * buf)176 static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
177 {
178 	struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
179 	struct mlx5_fpga_ipsec_cmd_context *context;
180 	enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
181 	struct mlx5_fpga_device *fdev = cb_arg;
182 	unsigned long flags;
183 
184 	if (buf->sg[0].size < sizeof(*resp)) {
185 		mlx5_fpga_warn(fdev, "Short receive from FPGA IPSec: %u < %zu bytes\n",
186 			       buf->sg[0].size, sizeof(*resp));
187 		return;
188 	}
189 
190 	mlx5_fpga_dbg(fdev, "mlx5_ipsec recv_cb syndrome %08x\n",
191 		      ntohl(resp->syndrome));
192 
193 	spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
194 	context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
195 					   struct mlx5_fpga_ipsec_cmd_context,
196 					   list);
197 	if (context)
198 		list_del(&context->list);
199 	spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
200 
201 	if (!context) {
202 		mlx5_fpga_warn(fdev, "Received IPSec offload response without pending command request\n");
203 		return;
204 	}
205 	mlx5_fpga_dbg(fdev, "Handling response for %p\n", context);
206 
207 	syndrome = ntohl(resp->syndrome);
208 	context->status_code = syndrome_to_errno(syndrome);
209 	context->status = MLX5_FPGA_IPSEC_CMD_COMPLETE;
210 	memcpy(&context->resp, resp, sizeof(*resp));
211 
212 	if (context->status_code)
213 		mlx5_fpga_warn(fdev, "IPSec command failed with syndrome %08x\n",
214 			       syndrome);
215 
216 	complete(&context->complete);
217 }
218 
mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev * mdev,const void * cmd,int cmd_size)219 static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
220 				      const void *cmd, int cmd_size)
221 {
222 	struct mlx5_fpga_ipsec_cmd_context *context;
223 	struct mlx5_fpga_device *fdev = mdev->fpga;
224 	unsigned long flags;
225 	int res;
226 
227 	if (!fdev || !fdev->ipsec)
228 		return ERR_PTR(-EOPNOTSUPP);
229 
230 	if (cmd_size & 3)
231 		return ERR_PTR(-EINVAL);
232 
233 	context = kzalloc(sizeof(*context) + cmd_size, GFP_ATOMIC);
234 	if (!context)
235 		return ERR_PTR(-ENOMEM);
236 
237 	context->status = MLX5_FPGA_IPSEC_CMD_PENDING;
238 	context->dev = fdev;
239 	context->buf.complete = mlx5_fpga_ipsec_send_complete;
240 	init_completion(&context->complete);
241 	memcpy(&context->command, cmd, cmd_size);
242 	context->buf.sg[0].size = cmd_size;
243 	context->buf.sg[0].data = &context->command;
244 
245 	spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
246 	res = mlx5_fpga_sbu_conn_sendmsg(fdev->ipsec->conn, &context->buf);
247 	if (!res)
248 		list_add_tail(&context->list, &fdev->ipsec->pending_cmds);
249 	spin_unlock_irqrestore(&fdev->ipsec->pending_cmds_lock, flags);
250 
251 	if (res) {
252 		mlx5_fpga_warn(fdev, "Failed to send IPSec command: %d\n", res);
253 		kfree(context);
254 		return ERR_PTR(res);
255 	}
256 
257 	/* Context should be freed by the caller after completion. */
258 	return context;
259 }
260 
mlx5_fpga_ipsec_cmd_wait(void * ctx)261 static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
262 {
263 	struct mlx5_fpga_ipsec_cmd_context *context = ctx;
264 	unsigned long timeout =
265 		msecs_to_jiffies(MLX5_FPGA_CMD_TIMEOUT_MSEC);
266 	int res;
267 
268 	res = wait_for_completion_timeout(&context->complete, timeout);
269 	if (!res) {
270 		mlx5_fpga_warn(context->dev, "Failure waiting for IPSec command response\n");
271 		return -ETIMEDOUT;
272 	}
273 
274 	if (context->status == MLX5_FPGA_IPSEC_CMD_COMPLETE)
275 		res = context->status_code;
276 	else
277 		res = -EIO;
278 
279 	return res;
280 }
281 
is_v2_sadb_supported(struct mlx5_fpga_ipsec * fipsec)282 static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
283 {
284 	if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
285 		return true;
286 	return false;
287 }
288 
mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device * fdev,struct mlx5_ifc_fpga_ipsec_sa * hw_sa,int opcode)289 static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
290 					struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
291 					int opcode)
292 {
293 	struct mlx5_core_dev *dev = fdev->mdev;
294 	struct mlx5_ifc_fpga_ipsec_sa *sa;
295 	struct mlx5_fpga_ipsec_cmd_context *cmd_context;
296 	size_t sa_cmd_size;
297 	int err;
298 
299 	hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
300 	if (is_v2_sadb_supported(fdev->ipsec))
301 		sa_cmd_size = sizeof(*hw_sa);
302 	else
303 		sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
304 
305 	cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
306 			mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
307 	if (IS_ERR(cmd_context))
308 		return PTR_ERR(cmd_context);
309 
310 	err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
311 	if (err)
312 		goto out;
313 
314 	sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
315 	if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
316 		mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
317 			      ntohl(sa->ipsec_sa_v1.sw_sa_handle),
318 			      ntohl(cmd_context->resp.sw_sa_handle));
319 		err = -EIO;
320 	}
321 
322 out:
323 	kfree(cmd_context);
324 	return err;
325 }
326 
mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev * mdev)327 u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
328 {
329 	struct mlx5_fpga_device *fdev = mdev->fpga;
330 	u32 ret = 0;
331 
332 	if (mlx5_fpga_is_ipsec_device(mdev)) {
333 		ret |= MLX5_ACCEL_IPSEC_CAP_DEVICE;
334 		ret |= MLX5_ACCEL_IPSEC_CAP_REQUIRED_METADATA;
335 	} else {
336 		return ret;
337 	}
338 
339 	if (!fdev->ipsec)
340 		return ret;
341 
342 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esp))
343 		ret |= MLX5_ACCEL_IPSEC_CAP_ESP;
344 
345 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, ipv6))
346 		ret |= MLX5_ACCEL_IPSEC_CAP_IPV6;
347 
348 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, lso))
349 		ret |= MLX5_ACCEL_IPSEC_CAP_LSO;
350 
351 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
352 		ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
353 
354 	if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, esn)) {
355 		ret |= MLX5_ACCEL_IPSEC_CAP_ESN;
356 		ret |= MLX5_ACCEL_IPSEC_CAP_TX_IV_IS_ESN;
357 	}
358 
359 	return ret;
360 }
361 
mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev * mdev)362 static unsigned int mlx5_fpga_ipsec_counters_count(struct mlx5_core_dev *mdev)
363 {
364 	struct mlx5_fpga_device *fdev = mdev->fpga;
365 
366 	if (!fdev || !fdev->ipsec)
367 		return 0;
368 
369 	return MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
370 			number_of_ipsec_counters);
371 }
372 
mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev * mdev,u64 * counters,unsigned int counters_count)373 static int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
374 					 unsigned int counters_count)
375 {
376 	struct mlx5_fpga_device *fdev = mdev->fpga;
377 	unsigned int i;
378 	__be32 *data;
379 	u32 count;
380 	u64 addr;
381 	int ret;
382 
383 	if (!fdev || !fdev->ipsec)
384 		return 0;
385 
386 	addr = (u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
387 			     ipsec_counters_addr_low) +
388 	       ((u64)MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps,
389 			     ipsec_counters_addr_high) << 32);
390 
391 	count = mlx5_fpga_ipsec_counters_count(mdev);
392 
393 	data = kzalloc(array3_size(sizeof(*data), count, 2), GFP_KERNEL);
394 	if (!data) {
395 		ret = -ENOMEM;
396 		goto out;
397 	}
398 
399 	ret = mlx5_fpga_mem_read(fdev, count * sizeof(u64), addr, data,
400 				 MLX5_FPGA_ACCESS_TYPE_DONTCARE);
401 	if (ret < 0) {
402 		mlx5_fpga_err(fdev, "Failed to read IPSec counters from HW: %d\n",
403 			      ret);
404 		goto out;
405 	}
406 	ret = 0;
407 
408 	if (count > counters_count)
409 		count = counters_count;
410 
411 	/* Each counter is low word, then high. But each word is big-endian */
412 	for (i = 0; i < count; i++)
413 		counters[i] = (u64)ntohl(data[i * 2]) |
414 			      ((u64)ntohl(data[i * 2 + 1]) << 32);
415 
416 out:
417 	kfree(data);
418 	return ret;
419 }
420 
mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev * mdev,u32 flags)421 static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags)
422 {
423 	struct mlx5_fpga_ipsec_cmd_context *context;
424 	struct mlx5_ifc_fpga_ipsec_cmd_cap cmd = {0};
425 	int err;
426 
427 	cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP);
428 	cmd.flags = htonl(flags);
429 	context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd));
430 	if (IS_ERR(context))
431 		return PTR_ERR(context);
432 
433 	err = mlx5_fpga_ipsec_cmd_wait(context);
434 	if (err)
435 		goto out;
436 
437 	if ((context->resp.flags & cmd.flags) != cmd.flags) {
438 		mlx5_fpga_err(context->dev, "Failed to set capabilities. cmd 0x%08x vs resp 0x%08x\n",
439 			      cmd.flags,
440 			      context->resp.flags);
441 		err = -EIO;
442 	}
443 
444 out:
445 	kfree(context);
446 	return err;
447 }
448 
mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev * mdev)449 static int mlx5_fpga_ipsec_enable_supported_caps(struct mlx5_core_dev *mdev)
450 {
451 	u32 dev_caps = mlx5_fpga_ipsec_device_caps(mdev);
452 	u32 flags = 0;
453 
454 	if (dev_caps & MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER)
455 		flags |= MLX5_FPGA_IPSEC_CAP_NO_TRAILER;
456 
457 	return mlx5_fpga_ipsec_set_caps(mdev, flags);
458 }
459 
460 static void
mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev * mdev,const struct mlx5_accel_esp_xfrm_attrs * xfrm_attrs,struct mlx5_ifc_fpga_ipsec_sa * hw_sa)461 mlx5_fpga_ipsec_build_hw_xfrm(struct mlx5_core_dev *mdev,
462 			      const struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
463 			      struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
464 {
465 	const struct aes_gcm_keymat *aes_gcm = &xfrm_attrs->keymat.aes_gcm;
466 
467 	/* key */
468 	memcpy(&hw_sa->ipsec_sa_v1.key_enc, aes_gcm->aes_key,
469 	       aes_gcm->key_len / 8);
470 	/* Duplicate 128 bit key twice according to HW layout */
471 	if (aes_gcm->key_len == 128)
472 		memcpy(&hw_sa->ipsec_sa_v1.key_enc[16],
473 		       aes_gcm->aes_key, aes_gcm->key_len / 8);
474 
475 	/* salt and seq_iv */
476 	memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, &aes_gcm->seq_iv,
477 	       sizeof(aes_gcm->seq_iv));
478 	memcpy(&hw_sa->ipsec_sa_v1.gcm.salt, &aes_gcm->salt,
479 	       sizeof(aes_gcm->salt));
480 
481 	/* esn */
482 	if (xfrm_attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
483 		hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_ESN_EN;
484 		hw_sa->ipsec_sa_v1.flags |=
485 				(xfrm_attrs->flags &
486 				 MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
487 					MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
488 		hw_sa->esn = htonl(xfrm_attrs->esn);
489 	} else {
490 		hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_ESN_EN;
491 		hw_sa->ipsec_sa_v1.flags &=
492 				~(xfrm_attrs->flags &
493 				  MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP) ?
494 					MLX5_FPGA_IPSEC_SA_ESN_OVERLAP : 0;
495 		hw_sa->esn = 0;
496 	}
497 
498 	/* rx handle */
499 	hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(xfrm_attrs->sa_handle);
500 
501 	/* enc mode */
502 	switch (aes_gcm->key_len) {
503 	case 128:
504 		hw_sa->ipsec_sa_v1.enc_mode =
505 			MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_128_AUTH_128;
506 		break;
507 	case 256:
508 		hw_sa->ipsec_sa_v1.enc_mode =
509 			MLX5_FPGA_IPSEC_SA_ENC_MODE_AES_GCM_256_AUTH_128;
510 		break;
511 	}
512 
513 	/* flags */
514 	hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_SA_VALID |
515 			MLX5_FPGA_IPSEC_SA_SPI_EN |
516 			MLX5_FPGA_IPSEC_SA_IP_ESP;
517 
518 	if (xfrm_attrs->action & MLX5_ACCEL_ESP_ACTION_ENCRYPT)
519 		hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_DIR_SX;
520 	else
521 		hw_sa->ipsec_sa_v1.flags &= ~MLX5_FPGA_IPSEC_SA_DIR_SX;
522 }
523 
524 static void
mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * xfrm_attrs,const __be32 saddr[4],const __be32 daddr[4],const __be32 spi,bool is_ipv6,struct mlx5_ifc_fpga_ipsec_sa * hw_sa)525 mlx5_fpga_ipsec_build_hw_sa(struct mlx5_core_dev *mdev,
526 			    struct mlx5_accel_esp_xfrm_attrs *xfrm_attrs,
527 			    const __be32 saddr[4],
528 			    const __be32 daddr[4],
529 			    const __be32 spi, bool is_ipv6,
530 			    struct mlx5_ifc_fpga_ipsec_sa *hw_sa)
531 {
532 	mlx5_fpga_ipsec_build_hw_xfrm(mdev, xfrm_attrs, hw_sa);
533 
534 	/* IPs */
535 	memcpy(hw_sa->ipsec_sa_v1.sip, saddr, sizeof(hw_sa->ipsec_sa_v1.sip));
536 	memcpy(hw_sa->ipsec_sa_v1.dip, daddr, sizeof(hw_sa->ipsec_sa_v1.dip));
537 
538 	/* SPI */
539 	hw_sa->ipsec_sa_v1.spi = spi;
540 
541 	/* flags */
542 	if (is_ipv6)
543 		hw_sa->ipsec_sa_v1.flags |= MLX5_FPGA_IPSEC_SA_IPV6;
544 }
545 
is_full_mask(const void * p,size_t len)546 static bool is_full_mask(const void *p, size_t len)
547 {
548 	WARN_ON(len % 4);
549 
550 	return !memchr_inv(p, 0xff, len);
551 }
552 
validate_fpga_full_mask(struct mlx5_core_dev * dev,const u32 * match_c,const u32 * match_v)553 static bool validate_fpga_full_mask(struct mlx5_core_dev *dev,
554 				    const u32 *match_c,
555 				    const u32 *match_v)
556 {
557 	const void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
558 						 match_c,
559 						 misc_parameters);
560 	const void *headers_c = MLX5_ADDR_OF(fte_match_param,
561 					     match_c,
562 					     outer_headers);
563 	const void *headers_v = MLX5_ADDR_OF(fte_match_param,
564 					     match_v,
565 					     outer_headers);
566 
567 	if (mlx5_fs_is_outer_ipv4_flow(dev, headers_c, headers_v)) {
568 		const void *s_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
569 						    headers_c,
570 						    src_ipv4_src_ipv6.ipv4_layout.ipv4);
571 		const void *d_ipv4_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
572 						    headers_c,
573 						    dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
574 
575 		if (!is_full_mask(s_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
576 							      ipv4)) ||
577 		    !is_full_mask(d_ipv4_c, MLX5_FLD_SZ_BYTES(ipv4_layout,
578 							      ipv4)))
579 			return false;
580 	} else {
581 		const void *s_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
582 						    headers_c,
583 						    src_ipv4_src_ipv6.ipv6_layout.ipv6);
584 		const void *d_ipv6_c = MLX5_ADDR_OF(fte_match_set_lyr_2_4,
585 						    headers_c,
586 						    dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
587 
588 		if (!is_full_mask(s_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
589 							      ipv6)) ||
590 		    !is_full_mask(d_ipv6_c, MLX5_FLD_SZ_BYTES(ipv6_layout,
591 							      ipv6)))
592 			return false;
593 	}
594 
595 	if (!is_full_mask(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
596 				       outer_esp_spi),
597 			  MLX5_FLD_SZ_BYTES(fte_match_set_misc, outer_esp_spi)))
598 		return false;
599 
600 	return true;
601 }
602 
mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev * dev,u8 match_criteria_enable,const u32 * match_c,const u32 * match_v)603 static bool mlx5_is_fpga_ipsec_rule(struct mlx5_core_dev *dev,
604 				    u8 match_criteria_enable,
605 				    const u32 *match_c,
606 				    const u32 *match_v)
607 {
608 	u32 ipsec_dev_caps = mlx5_fpga_ipsec_device_caps(dev);
609 	bool ipv6_flow;
610 
611 	ipv6_flow = mlx5_fs_is_outer_ipv6_flow(dev, match_c, match_v);
612 
613 	if (!(match_criteria_enable & MLX5_MATCH_OUTER_HEADERS) ||
614 	    mlx5_fs_is_outer_udp_flow(match_c, match_v) ||
615 	    mlx5_fs_is_outer_tcp_flow(match_c, match_v) ||
616 	    mlx5_fs_is_vxlan_flow(match_c) ||
617 	    !(mlx5_fs_is_outer_ipv4_flow(dev, match_c, match_v) ||
618 	      ipv6_flow))
619 		return false;
620 
621 	if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_DEVICE))
622 		return false;
623 
624 	if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_ESP) &&
625 	    mlx5_fs_is_outer_ipsec_flow(match_c))
626 		return false;
627 
628 	if (!(ipsec_dev_caps & MLX5_ACCEL_IPSEC_CAP_IPV6) &&
629 	    ipv6_flow)
630 		return false;
631 
632 	if (!validate_fpga_full_mask(dev, match_c, match_v))
633 		return false;
634 
635 	return true;
636 }
637 
mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev * dev,u8 match_criteria_enable,const u32 * match_c,const u32 * match_v,struct mlx5_flow_act * flow_act,struct mlx5_flow_context * flow_context)638 static bool mlx5_is_fpga_egress_ipsec_rule(struct mlx5_core_dev *dev,
639 					   u8 match_criteria_enable,
640 					   const u32 *match_c,
641 					   const u32 *match_v,
642 					   struct mlx5_flow_act *flow_act,
643 					   struct mlx5_flow_context *flow_context)
644 {
645 	const void *outer_c = MLX5_ADDR_OF(fte_match_param, match_c,
646 					   outer_headers);
647 	bool is_dmac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_47_16) ||
648 			MLX5_GET(fte_match_set_lyr_2_4, outer_c, dmac_15_0);
649 	bool is_smac = MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_47_16) ||
650 			MLX5_GET(fte_match_set_lyr_2_4, outer_c, smac_15_0);
651 	int ret;
652 
653 	ret = mlx5_is_fpga_ipsec_rule(dev, match_criteria_enable, match_c,
654 				      match_v);
655 	if (!ret)
656 		return ret;
657 
658 	if (is_dmac || is_smac ||
659 	    (match_criteria_enable &
660 	     ~(MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS)) ||
661 	    (flow_act->action & ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT | MLX5_FLOW_CONTEXT_ACTION_ALLOW)) ||
662 	     (flow_context->flags & FLOW_CONTEXT_HAS_TAG))
663 		return false;
664 
665 	return true;
666 }
667 
mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm * accel_xfrm,const __be32 saddr[4],const __be32 daddr[4],const __be32 spi,bool is_ipv6,u32 * sa_handle)668 static void *mlx5_fpga_ipsec_create_sa_ctx(struct mlx5_core_dev *mdev,
669 					   struct mlx5_accel_esp_xfrm *accel_xfrm,
670 					   const __be32 saddr[4], const __be32 daddr[4],
671 					   const __be32 spi, bool is_ipv6, u32 *sa_handle)
672 {
673 	struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
674 	struct mlx5_fpga_esp_xfrm *fpga_xfrm =
675 			container_of(accel_xfrm, typeof(*fpga_xfrm),
676 				     accel_xfrm);
677 	struct mlx5_fpga_device *fdev = mdev->fpga;
678 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
679 	int opcode, err;
680 	void *context;
681 
682 	/* alloc SA */
683 	sa_ctx = kzalloc(sizeof(*sa_ctx), GFP_KERNEL);
684 	if (!sa_ctx)
685 		return ERR_PTR(-ENOMEM);
686 
687 	sa_ctx->dev = mdev;
688 
689 	/* build candidate SA */
690 	mlx5_fpga_ipsec_build_hw_sa(mdev, &accel_xfrm->attrs,
691 				    saddr, daddr, spi, is_ipv6,
692 				    &sa_ctx->hw_sa);
693 
694 	mutex_lock(&fpga_xfrm->lock);
695 
696 	if (fpga_xfrm->sa_ctx) {        /* multiple rules for same accel_xfrm */
697 		/* all rules must be with same IPs and SPI */
698 		if (memcmp(&sa_ctx->hw_sa, &fpga_xfrm->sa_ctx->hw_sa,
699 			   sizeof(sa_ctx->hw_sa))) {
700 			context = ERR_PTR(-EINVAL);
701 			goto exists;
702 		}
703 
704 		++fpga_xfrm->num_rules;
705 		context = fpga_xfrm->sa_ctx;
706 		goto exists;
707 	}
708 
709 	if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT) {
710 		err = ida_simple_get(&fipsec->halloc, 1, 0, GFP_KERNEL);
711 		if (err < 0) {
712 			context = ERR_PTR(err);
713 			goto exists;
714 		}
715 
716 		sa_ctx->sa_handle = err;
717 		if (sa_handle)
718 			*sa_handle = sa_ctx->sa_handle;
719 	}
720 	/* This is unbounded fpga_xfrm, try to add to hash */
721 	mutex_lock(&fipsec->sa_hash_lock);
722 
723 	err = rhashtable_lookup_insert_fast(&fipsec->sa_hash, &sa_ctx->hash,
724 					    rhash_sa);
725 	if (err) {
726 		/* Can't bound different accel_xfrm to already existing sa_ctx.
727 		 * This is because we can't support multiple ketmats for
728 		 * same IPs and SPI
729 		 */
730 		context = ERR_PTR(-EEXIST);
731 		goto unlock_hash;
732 	}
733 
734 	/* Bound accel_xfrm to sa_ctx */
735 	opcode = is_v2_sadb_supported(fdev->ipsec) ?
736 			MLX5_FPGA_IPSEC_CMD_OP_ADD_SA_V2 :
737 			MLX5_FPGA_IPSEC_CMD_OP_ADD_SA;
738 	err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
739 	sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
740 	if (err) {
741 		context = ERR_PTR(err);
742 		goto delete_hash;
743 	}
744 
745 	mutex_unlock(&fipsec->sa_hash_lock);
746 
747 	++fpga_xfrm->num_rules;
748 	fpga_xfrm->sa_ctx = sa_ctx;
749 	sa_ctx->fpga_xfrm = fpga_xfrm;
750 
751 	mutex_unlock(&fpga_xfrm->lock);
752 
753 	return sa_ctx;
754 
755 delete_hash:
756 	WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
757 				       rhash_sa));
758 unlock_hash:
759 	mutex_unlock(&fipsec->sa_hash_lock);
760 	if (accel_xfrm->attrs.action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
761 		ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
762 exists:
763 	mutex_unlock(&fpga_xfrm->lock);
764 	kfree(sa_ctx);
765 	return context;
766 }
767 
768 static void *
mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev * mdev,struct fs_fte * fte,bool is_egress)769 mlx5_fpga_ipsec_fs_create_sa_ctx(struct mlx5_core_dev *mdev,
770 				 struct fs_fte *fte,
771 				 bool is_egress)
772 {
773 	struct mlx5_accel_esp_xfrm *accel_xfrm;
774 	__be32 saddr[4], daddr[4], spi;
775 	struct mlx5_flow_group *fg;
776 	bool is_ipv6 = false;
777 
778 	fs_get_obj(fg, fte->node.parent);
779 	/* validate */
780 	if (is_egress &&
781 	    !mlx5_is_fpga_egress_ipsec_rule(mdev,
782 					    fg->mask.match_criteria_enable,
783 					    fg->mask.match_criteria,
784 					    fte->val,
785 					    &fte->action,
786 					    &fte->flow_context))
787 		return ERR_PTR(-EINVAL);
788 	else if (!mlx5_is_fpga_ipsec_rule(mdev,
789 					  fg->mask.match_criteria_enable,
790 					  fg->mask.match_criteria,
791 					  fte->val))
792 		return ERR_PTR(-EINVAL);
793 
794 	/* get xfrm context */
795 	accel_xfrm =
796 		(struct mlx5_accel_esp_xfrm *)fte->action.esp_id;
797 
798 	/* IPs */
799 	if (mlx5_fs_is_outer_ipv4_flow(mdev, fg->mask.match_criteria,
800 				       fte->val)) {
801 		memcpy(&saddr[3],
802 		       MLX5_ADDR_OF(fte_match_set_lyr_2_4,
803 				    fte->val,
804 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
805 				    sizeof(saddr[3]));
806 		memcpy(&daddr[3],
807 		       MLX5_ADDR_OF(fte_match_set_lyr_2_4,
808 				    fte->val,
809 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
810 				    sizeof(daddr[3]));
811 	} else {
812 		memcpy(saddr,
813 		       MLX5_ADDR_OF(fte_match_param,
814 				    fte->val,
815 				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
816 				    sizeof(saddr));
817 		memcpy(daddr,
818 		       MLX5_ADDR_OF(fte_match_param,
819 				    fte->val,
820 				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
821 				    sizeof(daddr));
822 		is_ipv6 = true;
823 	}
824 
825 	/* SPI */
826 	spi = MLX5_GET_BE(typeof(spi),
827 			  fte_match_param, fte->val,
828 			  misc_parameters.outer_esp_spi);
829 
830 	/* create */
831 	return mlx5_fpga_ipsec_create_sa_ctx(mdev, accel_xfrm,
832 					     saddr, daddr,
833 					     spi, is_ipv6, NULL);
834 }
835 
836 static void
mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx * sa_ctx)837 mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
838 {
839 	struct mlx5_fpga_device *fdev = sa_ctx->dev->fpga;
840 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
841 	int opcode = is_v2_sadb_supported(fdev->ipsec) ?
842 			MLX5_FPGA_IPSEC_CMD_OP_DEL_SA_V2 :
843 			MLX5_FPGA_IPSEC_CMD_OP_DEL_SA;
844 	int err;
845 
846 	err = mlx5_fpga_ipsec_update_hw_sa(fdev, &sa_ctx->hw_sa, opcode);
847 	sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
848 	if (err) {
849 		WARN_ON(err);
850 		return;
851 	}
852 
853 	if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
854 	    MLX5_ACCEL_ESP_ACTION_DECRYPT)
855 		ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
856 
857 	mutex_lock(&fipsec->sa_hash_lock);
858 	WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash, &sa_ctx->hash,
859 				       rhash_sa));
860 	mutex_unlock(&fipsec->sa_hash_lock);
861 }
862 
mlx5_fpga_ipsec_delete_sa_ctx(void * context)863 static void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
864 {
865 	struct mlx5_fpga_esp_xfrm *fpga_xfrm =
866 			((struct mlx5_fpga_ipsec_sa_ctx *)context)->fpga_xfrm;
867 
868 	mutex_lock(&fpga_xfrm->lock);
869 	if (!--fpga_xfrm->num_rules) {
870 		mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
871 		kfree(fpga_xfrm->sa_ctx);
872 		fpga_xfrm->sa_ctx = NULL;
873 	}
874 	mutex_unlock(&fpga_xfrm->lock);
875 }
876 
877 static inline struct mlx5_fpga_ipsec_rule *
_rule_search(struct rb_root * root,struct fs_fte * fte)878 _rule_search(struct rb_root *root, struct fs_fte *fte)
879 {
880 	struct rb_node *node = root->rb_node;
881 
882 	while (node) {
883 		struct mlx5_fpga_ipsec_rule *rule =
884 				container_of(node, struct mlx5_fpga_ipsec_rule,
885 					     node);
886 
887 		if (rule->fte < fte)
888 			node = node->rb_left;
889 		else if (rule->fte > fte)
890 			node = node->rb_right;
891 		else
892 			return rule;
893 	}
894 	return NULL;
895 }
896 
897 static struct mlx5_fpga_ipsec_rule *
rule_search(struct mlx5_fpga_ipsec * ipsec_dev,struct fs_fte * fte)898 rule_search(struct mlx5_fpga_ipsec *ipsec_dev, struct fs_fte *fte)
899 {
900 	struct mlx5_fpga_ipsec_rule *rule;
901 
902 	mutex_lock(&ipsec_dev->rules_rb_lock);
903 	rule = _rule_search(&ipsec_dev->rules_rb, fte);
904 	mutex_unlock(&ipsec_dev->rules_rb_lock);
905 
906 	return rule;
907 }
908 
_rule_insert(struct rb_root * root,struct mlx5_fpga_ipsec_rule * rule)909 static inline int _rule_insert(struct rb_root *root,
910 			       struct mlx5_fpga_ipsec_rule *rule)
911 {
912 	struct rb_node **new = &root->rb_node, *parent = NULL;
913 
914 	/* Figure out where to put new node */
915 	while (*new) {
916 		struct mlx5_fpga_ipsec_rule *this =
917 				container_of(*new, struct mlx5_fpga_ipsec_rule,
918 					     node);
919 
920 		parent = *new;
921 		if (rule->fte < this->fte)
922 			new = &((*new)->rb_left);
923 		else if (rule->fte > this->fte)
924 			new = &((*new)->rb_right);
925 		else
926 			return -EEXIST;
927 	}
928 
929 	/* Add new node and rebalance tree. */
930 	rb_link_node(&rule->node, parent, new);
931 	rb_insert_color(&rule->node, root);
932 
933 	return 0;
934 }
935 
rule_insert(struct mlx5_fpga_ipsec * ipsec_dev,struct mlx5_fpga_ipsec_rule * rule)936 static int rule_insert(struct mlx5_fpga_ipsec *ipsec_dev,
937 		       struct mlx5_fpga_ipsec_rule *rule)
938 {
939 	int ret;
940 
941 	mutex_lock(&ipsec_dev->rules_rb_lock);
942 	ret = _rule_insert(&ipsec_dev->rules_rb, rule);
943 	mutex_unlock(&ipsec_dev->rules_rb_lock);
944 
945 	return ret;
946 }
947 
_rule_delete(struct mlx5_fpga_ipsec * ipsec_dev,struct mlx5_fpga_ipsec_rule * rule)948 static inline void _rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
949 				struct mlx5_fpga_ipsec_rule *rule)
950 {
951 	struct rb_root *root = &ipsec_dev->rules_rb;
952 
953 	mutex_lock(&ipsec_dev->rules_rb_lock);
954 	rb_erase(&rule->node, root);
955 	mutex_unlock(&ipsec_dev->rules_rb_lock);
956 }
957 
rule_delete(struct mlx5_fpga_ipsec * ipsec_dev,struct mlx5_fpga_ipsec_rule * rule)958 static void rule_delete(struct mlx5_fpga_ipsec *ipsec_dev,
959 			struct mlx5_fpga_ipsec_rule *rule)
960 {
961 	_rule_delete(ipsec_dev, rule);
962 	kfree(rule);
963 }
964 
965 struct mailbox_mod {
966 	uintptr_t			saved_esp_id;
967 	u32				saved_action;
968 	u32				saved_outer_esp_spi_value;
969 };
970 
restore_spec_mailbox(struct fs_fte * fte,struct mailbox_mod * mbox_mod)971 static void restore_spec_mailbox(struct fs_fte *fte,
972 				 struct mailbox_mod *mbox_mod)
973 {
974 	char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
975 					   fte->val,
976 					   misc_parameters);
977 
978 	MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi,
979 		 mbox_mod->saved_outer_esp_spi_value);
980 	fte->action.action |= mbox_mod->saved_action;
981 	fte->action.esp_id = (uintptr_t)mbox_mod->saved_esp_id;
982 }
983 
modify_spec_mailbox(struct mlx5_core_dev * mdev,struct fs_fte * fte,struct mailbox_mod * mbox_mod)984 static void modify_spec_mailbox(struct mlx5_core_dev *mdev,
985 				struct fs_fte *fte,
986 				struct mailbox_mod *mbox_mod)
987 {
988 	char *misc_params_v = MLX5_ADDR_OF(fte_match_param,
989 					   fte->val,
990 					   misc_parameters);
991 
992 	mbox_mod->saved_esp_id = fte->action.esp_id;
993 	mbox_mod->saved_action = fte->action.action &
994 			(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
995 			 MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
996 	mbox_mod->saved_outer_esp_spi_value =
997 			MLX5_GET(fte_match_set_misc, misc_params_v,
998 				 outer_esp_spi);
999 
1000 	fte->action.esp_id = 0;
1001 	fte->action.action &= ~(MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1002 				MLX5_FLOW_CONTEXT_ACTION_DECRYPT);
1003 	if (!MLX5_CAP_FLOWTABLE(mdev,
1004 				flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1005 		MLX5_SET(fte_match_set_misc, misc_params_v, outer_esp_spi, 0);
1006 }
1007 
egress_to_fs_ft(bool egress)1008 static enum fs_flow_table_type egress_to_fs_ft(bool egress)
1009 {
1010 	return egress ? FS_FT_NIC_TX : FS_FT_NIC_RX;
1011 }
1012 
fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg,bool is_egress)1013 static int fpga_ipsec_fs_create_flow_group(struct mlx5_flow_root_namespace *ns,
1014 					   struct mlx5_flow_table *ft,
1015 					   u32 *in,
1016 					   struct mlx5_flow_group *fg,
1017 					   bool is_egress)
1018 {
1019 	int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
1020 				 struct mlx5_flow_table *ft, u32 *in,
1021 				 struct mlx5_flow_group *fg) =
1022 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_flow_group;
1023 	char *misc_params_c = MLX5_ADDR_OF(create_flow_group_in, in,
1024 					   match_criteria.misc_parameters);
1025 	struct mlx5_core_dev *dev = ns->dev;
1026 	u32 saved_outer_esp_spi_mask;
1027 	u8 match_criteria_enable;
1028 	int ret;
1029 
1030 	if (MLX5_CAP_FLOWTABLE(dev,
1031 			       flow_table_properties_nic_receive.ft_field_support.outer_esp_spi))
1032 		return create_flow_group(ns, ft, in, fg);
1033 
1034 	match_criteria_enable =
1035 		MLX5_GET(create_flow_group_in, in, match_criteria_enable);
1036 	saved_outer_esp_spi_mask =
1037 		MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi);
1038 	if (!match_criteria_enable || !saved_outer_esp_spi_mask)
1039 		return create_flow_group(ns, ft, in, fg);
1040 
1041 	MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, 0);
1042 
1043 	if (!(*misc_params_c) &&
1044 	    !memcmp(misc_params_c, misc_params_c + 1, MLX5_ST_SZ_BYTES(fte_match_set_misc) - 1))
1045 		MLX5_SET(create_flow_group_in, in, match_criteria_enable,
1046 			 match_criteria_enable & ~MLX5_MATCH_MISC_PARAMETERS);
1047 
1048 	ret = create_flow_group(ns, ft, in, fg);
1049 
1050 	MLX5_SET(fte_match_set_misc, misc_params_c, outer_esp_spi, saved_outer_esp_spi_mask);
1051 	MLX5_SET(create_flow_group_in, in, match_criteria_enable, match_criteria_enable);
1052 
1053 	return ret;
1054 }
1055 
fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,struct fs_fte * fte,bool is_egress)1056 static int fpga_ipsec_fs_create_fte(struct mlx5_flow_root_namespace *ns,
1057 				    struct mlx5_flow_table *ft,
1058 				    struct mlx5_flow_group *fg,
1059 				    struct fs_fte *fte,
1060 				    bool is_egress)
1061 {
1062 	int (*create_fte)(struct mlx5_flow_root_namespace *ns,
1063 			  struct mlx5_flow_table *ft,
1064 			  struct mlx5_flow_group *fg,
1065 			  struct fs_fte *fte) =
1066 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->create_fte;
1067 	struct mlx5_core_dev *dev = ns->dev;
1068 	struct mlx5_fpga_device *fdev = dev->fpga;
1069 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1070 	struct mlx5_fpga_ipsec_rule *rule;
1071 	bool is_esp = fte->action.esp_id;
1072 	struct mailbox_mod mbox_mod;
1073 	int ret;
1074 
1075 	if (!is_esp ||
1076 	    !(fte->action.action &
1077 	      (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1078 	       MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1079 		return create_fte(ns, ft, fg, fte);
1080 
1081 	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
1082 	if (!rule)
1083 		return -ENOMEM;
1084 
1085 	rule->ctx = mlx5_fpga_ipsec_fs_create_sa_ctx(dev, fte, is_egress);
1086 	if (IS_ERR(rule->ctx)) {
1087 		int err = PTR_ERR(rule->ctx);
1088 		kfree(rule);
1089 		return err;
1090 	}
1091 
1092 	rule->fte = fte;
1093 	WARN_ON(rule_insert(fipsec, rule));
1094 
1095 	modify_spec_mailbox(dev, fte, &mbox_mod);
1096 	ret = create_fte(ns, ft, fg, fte);
1097 	restore_spec_mailbox(fte, &mbox_mod);
1098 	if (ret) {
1099 		_rule_delete(fipsec, rule);
1100 		mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1101 		kfree(rule);
1102 	}
1103 
1104 	return ret;
1105 }
1106 
fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte,bool is_egress)1107 static int fpga_ipsec_fs_update_fte(struct mlx5_flow_root_namespace *ns,
1108 				    struct mlx5_flow_table *ft,
1109 				    struct mlx5_flow_group *fg,
1110 				    int modify_mask,
1111 				    struct fs_fte *fte,
1112 				    bool is_egress)
1113 {
1114 	int (*update_fte)(struct mlx5_flow_root_namespace *ns,
1115 			  struct mlx5_flow_table *ft,
1116 			  struct mlx5_flow_group *fg,
1117 			  int modify_mask,
1118 			  struct fs_fte *fte) =
1119 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->update_fte;
1120 	struct mlx5_core_dev *dev = ns->dev;
1121 	bool is_esp = fte->action.esp_id;
1122 	struct mailbox_mod mbox_mod;
1123 	int ret;
1124 
1125 	if (!is_esp ||
1126 	    !(fte->action.action &
1127 	      (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1128 	       MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1129 		return update_fte(ns, ft, fg, modify_mask, fte);
1130 
1131 	modify_spec_mailbox(dev, fte, &mbox_mod);
1132 	ret = update_fte(ns, ft, fg, modify_mask, fte);
1133 	restore_spec_mailbox(fte, &mbox_mod);
1134 
1135 	return ret;
1136 }
1137 
fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte,bool is_egress)1138 static int fpga_ipsec_fs_delete_fte(struct mlx5_flow_root_namespace *ns,
1139 				    struct mlx5_flow_table *ft,
1140 				    struct fs_fte *fte,
1141 				    bool is_egress)
1142 {
1143 	int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
1144 			  struct mlx5_flow_table *ft,
1145 			  struct fs_fte *fte) =
1146 		mlx5_fs_cmd_get_default(egress_to_fs_ft(is_egress))->delete_fte;
1147 	struct mlx5_core_dev *dev = ns->dev;
1148 	struct mlx5_fpga_device *fdev = dev->fpga;
1149 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1150 	struct mlx5_fpga_ipsec_rule *rule;
1151 	bool is_esp = fte->action.esp_id;
1152 	struct mailbox_mod mbox_mod;
1153 	int ret;
1154 
1155 	if (!is_esp ||
1156 	    !(fte->action.action &
1157 	      (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
1158 	       MLX5_FLOW_CONTEXT_ACTION_DECRYPT)))
1159 		return delete_fte(ns, ft, fte);
1160 
1161 	rule = rule_search(fipsec, fte);
1162 	if (!rule)
1163 		return -ENOENT;
1164 
1165 	mlx5_fpga_ipsec_delete_sa_ctx(rule->ctx);
1166 	rule_delete(fipsec, rule);
1167 
1168 	modify_spec_mailbox(dev, fte, &mbox_mod);
1169 	ret = delete_fte(ns, ft, fte);
1170 	restore_spec_mailbox(fte, &mbox_mod);
1171 
1172 	return ret;
1173 }
1174 
1175 static int
mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)1176 mlx5_fpga_ipsec_fs_create_flow_group_egress(struct mlx5_flow_root_namespace *ns,
1177 					    struct mlx5_flow_table *ft,
1178 					    u32 *in,
1179 					    struct mlx5_flow_group *fg)
1180 {
1181 	return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, true);
1182 }
1183 
1184 static int
mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,struct fs_fte * fte)1185 mlx5_fpga_ipsec_fs_create_fte_egress(struct mlx5_flow_root_namespace *ns,
1186 				     struct mlx5_flow_table *ft,
1187 				     struct mlx5_flow_group *fg,
1188 				     struct fs_fte *fte)
1189 {
1190 	return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, true);
1191 }
1192 
1193 static int
mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)1194 mlx5_fpga_ipsec_fs_update_fte_egress(struct mlx5_flow_root_namespace *ns,
1195 				     struct mlx5_flow_table *ft,
1196 				     struct mlx5_flow_group *fg,
1197 				     int modify_mask,
1198 				     struct fs_fte *fte)
1199 {
1200 	return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
1201 					true);
1202 }
1203 
1204 static int
mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)1205 mlx5_fpga_ipsec_fs_delete_fte_egress(struct mlx5_flow_root_namespace *ns,
1206 				     struct mlx5_flow_table *ft,
1207 				     struct fs_fte *fte)
1208 {
1209 	return fpga_ipsec_fs_delete_fte(ns, ft, fte, true);
1210 }
1211 
1212 static int
mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,u32 * in,struct mlx5_flow_group * fg)1213 mlx5_fpga_ipsec_fs_create_flow_group_ingress(struct mlx5_flow_root_namespace *ns,
1214 					     struct mlx5_flow_table *ft,
1215 					     u32 *in,
1216 					     struct mlx5_flow_group *fg)
1217 {
1218 	return fpga_ipsec_fs_create_flow_group(ns, ft, in, fg, false);
1219 }
1220 
1221 static int
mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,struct fs_fte * fte)1222 mlx5_fpga_ipsec_fs_create_fte_ingress(struct mlx5_flow_root_namespace *ns,
1223 				      struct mlx5_flow_table *ft,
1224 				      struct mlx5_flow_group *fg,
1225 				      struct fs_fte *fte)
1226 {
1227 	return fpga_ipsec_fs_create_fte(ns, ft, fg, fte, false);
1228 }
1229 
1230 static int
mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct mlx5_flow_group * fg,int modify_mask,struct fs_fte * fte)1231 mlx5_fpga_ipsec_fs_update_fte_ingress(struct mlx5_flow_root_namespace *ns,
1232 				      struct mlx5_flow_table *ft,
1233 				      struct mlx5_flow_group *fg,
1234 				      int modify_mask,
1235 				      struct fs_fte *fte)
1236 {
1237 	return fpga_ipsec_fs_update_fte(ns, ft, fg, modify_mask, fte,
1238 					false);
1239 }
1240 
1241 static int
mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace * ns,struct mlx5_flow_table * ft,struct fs_fte * fte)1242 mlx5_fpga_ipsec_fs_delete_fte_ingress(struct mlx5_flow_root_namespace *ns,
1243 				      struct mlx5_flow_table *ft,
1244 				      struct fs_fte *fte)
1245 {
1246 	return fpga_ipsec_fs_delete_fte(ns, ft, fte, false);
1247 }
1248 
1249 static struct mlx5_flow_cmds fpga_ipsec_ingress;
1250 static struct mlx5_flow_cmds fpga_ipsec_egress;
1251 
mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)1252 const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default_ipsec_fpga_cmds(enum fs_flow_table_type type)
1253 {
1254 	switch (type) {
1255 	case FS_FT_NIC_RX:
1256 		return &fpga_ipsec_ingress;
1257 	case FS_FT_NIC_TX:
1258 		return &fpga_ipsec_egress;
1259 	default:
1260 		WARN_ON(true);
1261 		return NULL;
1262 	}
1263 }
1264 
mlx5_fpga_ipsec_init(struct mlx5_core_dev * mdev)1265 static int mlx5_fpga_ipsec_init(struct mlx5_core_dev *mdev)
1266 {
1267 	struct mlx5_fpga_conn_attr init_attr = {0};
1268 	struct mlx5_fpga_device *fdev = mdev->fpga;
1269 	struct mlx5_fpga_conn *conn;
1270 	int err;
1271 
1272 	if (!mlx5_fpga_is_ipsec_device(mdev))
1273 		return 0;
1274 
1275 	fdev->ipsec = kzalloc(sizeof(*fdev->ipsec), GFP_KERNEL);
1276 	if (!fdev->ipsec)
1277 		return -ENOMEM;
1278 
1279 	fdev->ipsec->fdev = fdev;
1280 
1281 	err = mlx5_fpga_get_sbu_caps(fdev, sizeof(fdev->ipsec->caps),
1282 				     fdev->ipsec->caps);
1283 	if (err) {
1284 		mlx5_fpga_err(fdev, "Failed to retrieve IPSec extended capabilities: %d\n",
1285 			      err);
1286 		goto error;
1287 	}
1288 
1289 	INIT_LIST_HEAD(&fdev->ipsec->pending_cmds);
1290 	spin_lock_init(&fdev->ipsec->pending_cmds_lock);
1291 
1292 	init_attr.rx_size = SBU_QP_QUEUE_SIZE;
1293 	init_attr.tx_size = SBU_QP_QUEUE_SIZE;
1294 	init_attr.recv_cb = mlx5_fpga_ipsec_recv;
1295 	init_attr.cb_arg = fdev;
1296 	conn = mlx5_fpga_sbu_conn_create(fdev, &init_attr);
1297 	if (IS_ERR(conn)) {
1298 		err = PTR_ERR(conn);
1299 		mlx5_fpga_err(fdev, "Error creating IPSec command connection %d\n",
1300 			      err);
1301 		goto error;
1302 	}
1303 	fdev->ipsec->conn = conn;
1304 
1305 	err = rhashtable_init(&fdev->ipsec->sa_hash, &rhash_sa);
1306 	if (err)
1307 		goto err_destroy_conn;
1308 	mutex_init(&fdev->ipsec->sa_hash_lock);
1309 
1310 	fdev->ipsec->rules_rb = RB_ROOT;
1311 	mutex_init(&fdev->ipsec->rules_rb_lock);
1312 
1313 	err = mlx5_fpga_ipsec_enable_supported_caps(mdev);
1314 	if (err) {
1315 		mlx5_fpga_err(fdev, "Failed to enable IPSec extended capabilities: %d\n",
1316 			      err);
1317 		goto err_destroy_hash;
1318 	}
1319 
1320 	ida_init(&fdev->ipsec->halloc);
1321 
1322 	return 0;
1323 
1324 err_destroy_hash:
1325 	rhashtable_destroy(&fdev->ipsec->sa_hash);
1326 
1327 err_destroy_conn:
1328 	mlx5_fpga_sbu_conn_destroy(conn);
1329 
1330 error:
1331 	kfree(fdev->ipsec);
1332 	fdev->ipsec = NULL;
1333 	return err;
1334 }
1335 
destroy_rules_rb(struct rb_root * root)1336 static void destroy_rules_rb(struct rb_root *root)
1337 {
1338 	struct mlx5_fpga_ipsec_rule *r, *tmp;
1339 
1340 	rbtree_postorder_for_each_entry_safe(r, tmp, root, node) {
1341 		rb_erase(&r->node, root);
1342 		mlx5_fpga_ipsec_delete_sa_ctx(r->ctx);
1343 		kfree(r);
1344 	}
1345 }
1346 
mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev * mdev)1347 static void mlx5_fpga_ipsec_cleanup(struct mlx5_core_dev *mdev)
1348 {
1349 	struct mlx5_fpga_device *fdev = mdev->fpga;
1350 
1351 	if (!mlx5_fpga_is_ipsec_device(mdev))
1352 		return;
1353 
1354 	ida_destroy(&fdev->ipsec->halloc);
1355 	destroy_rules_rb(&fdev->ipsec->rules_rb);
1356 	rhashtable_destroy(&fdev->ipsec->sa_hash);
1357 
1358 	mlx5_fpga_sbu_conn_destroy(fdev->ipsec->conn);
1359 	kfree(fdev->ipsec);
1360 	fdev->ipsec = NULL;
1361 }
1362 
mlx5_fpga_ipsec_build_fs_cmds(void)1363 void mlx5_fpga_ipsec_build_fs_cmds(void)
1364 {
1365 	/* ingress */
1366 	fpga_ipsec_ingress.create_flow_table =
1367 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->create_flow_table;
1368 	fpga_ipsec_ingress.destroy_flow_table =
1369 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_table;
1370 	fpga_ipsec_ingress.modify_flow_table =
1371 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->modify_flow_table;
1372 	fpga_ipsec_ingress.create_flow_group =
1373 		mlx5_fpga_ipsec_fs_create_flow_group_ingress;
1374 	fpga_ipsec_ingress.destroy_flow_group =
1375 		 mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->destroy_flow_group;
1376 	fpga_ipsec_ingress.create_fte =
1377 		mlx5_fpga_ipsec_fs_create_fte_ingress;
1378 	fpga_ipsec_ingress.update_fte =
1379 		mlx5_fpga_ipsec_fs_update_fte_ingress;
1380 	fpga_ipsec_ingress.delete_fte =
1381 		mlx5_fpga_ipsec_fs_delete_fte_ingress;
1382 	fpga_ipsec_ingress.update_root_ft =
1383 		mlx5_fs_cmd_get_default(egress_to_fs_ft(false))->update_root_ft;
1384 
1385 	/* egress */
1386 	fpga_ipsec_egress.create_flow_table =
1387 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->create_flow_table;
1388 	fpga_ipsec_egress.destroy_flow_table =
1389 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_table;
1390 	fpga_ipsec_egress.modify_flow_table =
1391 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->modify_flow_table;
1392 	fpga_ipsec_egress.create_flow_group =
1393 		mlx5_fpga_ipsec_fs_create_flow_group_egress;
1394 	fpga_ipsec_egress.destroy_flow_group =
1395 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->destroy_flow_group;
1396 	fpga_ipsec_egress.create_fte =
1397 		mlx5_fpga_ipsec_fs_create_fte_egress;
1398 	fpga_ipsec_egress.update_fte =
1399 		mlx5_fpga_ipsec_fs_update_fte_egress;
1400 	fpga_ipsec_egress.delete_fte =
1401 		mlx5_fpga_ipsec_fs_delete_fte_egress;
1402 	fpga_ipsec_egress.update_root_ft =
1403 		mlx5_fs_cmd_get_default(egress_to_fs_ft(true))->update_root_ft;
1404 }
1405 
1406 static int
mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev * mdev,const struct mlx5_accel_esp_xfrm_attrs * attrs)1407 mlx5_fpga_esp_validate_xfrm_attrs(struct mlx5_core_dev *mdev,
1408 				  const struct mlx5_accel_esp_xfrm_attrs *attrs)
1409 {
1410 	if (attrs->tfc_pad) {
1411 		mlx5_core_err(mdev, "Cannot offload xfrm states with tfc padding\n");
1412 		return -EOPNOTSUPP;
1413 	}
1414 
1415 	if (attrs->replay_type != MLX5_ACCEL_ESP_REPLAY_NONE) {
1416 		mlx5_core_err(mdev, "Cannot offload xfrm states with anti replay\n");
1417 		return -EOPNOTSUPP;
1418 	}
1419 
1420 	if (attrs->keymat_type != MLX5_ACCEL_ESP_KEYMAT_AES_GCM) {
1421 		mlx5_core_err(mdev, "Only aes gcm keymat is supported\n");
1422 		return -EOPNOTSUPP;
1423 	}
1424 
1425 	if (attrs->keymat.aes_gcm.iv_algo !=
1426 	    MLX5_ACCEL_ESP_AES_GCM_IV_ALGO_SEQ) {
1427 		mlx5_core_err(mdev, "Only iv sequence algo is supported\n");
1428 		return -EOPNOTSUPP;
1429 	}
1430 
1431 	if (attrs->keymat.aes_gcm.icv_len != 128) {
1432 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
1433 		return -EOPNOTSUPP;
1434 	}
1435 
1436 	if (attrs->keymat.aes_gcm.key_len != 128 &&
1437 	    attrs->keymat.aes_gcm.key_len != 256) {
1438 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1439 		return -EOPNOTSUPP;
1440 	}
1441 
1442 	if ((attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) &&
1443 	    (!MLX5_GET(ipsec_extended_cap, mdev->fpga->ipsec->caps,
1444 		       v2_command))) {
1445 		mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
1446 		return -EOPNOTSUPP;
1447 	}
1448 
1449 	return 0;
1450 }
1451 
1452 static struct mlx5_accel_esp_xfrm *
mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev * mdev,const struct mlx5_accel_esp_xfrm_attrs * attrs,u32 flags)1453 mlx5_fpga_esp_create_xfrm(struct mlx5_core_dev *mdev,
1454 			  const struct mlx5_accel_esp_xfrm_attrs *attrs,
1455 			  u32 flags)
1456 {
1457 	struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1458 
1459 	if (!(flags & MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA)) {
1460 		mlx5_core_warn(mdev, "Tried to create an esp action without metadata\n");
1461 		return ERR_PTR(-EINVAL);
1462 	}
1463 
1464 	if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1465 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1466 		return ERR_PTR(-EOPNOTSUPP);
1467 	}
1468 
1469 	fpga_xfrm = kzalloc(sizeof(*fpga_xfrm), GFP_KERNEL);
1470 	if (!fpga_xfrm)
1471 		return ERR_PTR(-ENOMEM);
1472 
1473 	mutex_init(&fpga_xfrm->lock);
1474 	memcpy(&fpga_xfrm->accel_xfrm.attrs, attrs,
1475 	       sizeof(fpga_xfrm->accel_xfrm.attrs));
1476 
1477 	return &fpga_xfrm->accel_xfrm;
1478 }
1479 
mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm * xfrm)1480 static void mlx5_fpga_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
1481 {
1482 	struct mlx5_fpga_esp_xfrm *fpga_xfrm =
1483 			container_of(xfrm, struct mlx5_fpga_esp_xfrm,
1484 				     accel_xfrm);
1485 	/* assuming no sa_ctx are connected to this xfrm_ctx */
1486 	kfree(fpga_xfrm);
1487 }
1488 
mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm * xfrm,const struct mlx5_accel_esp_xfrm_attrs * attrs)1489 static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
1490 				     const struct mlx5_accel_esp_xfrm_attrs *attrs)
1491 {
1492 	struct mlx5_core_dev *mdev = xfrm->mdev;
1493 	struct mlx5_fpga_device *fdev = mdev->fpga;
1494 	struct mlx5_fpga_ipsec *fipsec = fdev->ipsec;
1495 	struct mlx5_fpga_esp_xfrm *fpga_xfrm;
1496 	struct mlx5_ifc_fpga_ipsec_sa org_hw_sa;
1497 
1498 	int err = 0;
1499 
1500 	if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
1501 		return 0;
1502 
1503 	if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
1504 		mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
1505 		return -EOPNOTSUPP;
1506 	}
1507 
1508 	if (is_v2_sadb_supported(fipsec)) {
1509 		mlx5_core_warn(mdev, "Modify esp is not supported\n");
1510 		return -EOPNOTSUPP;
1511 	}
1512 
1513 	fpga_xfrm = container_of(xfrm, struct mlx5_fpga_esp_xfrm, accel_xfrm);
1514 
1515 	mutex_lock(&fpga_xfrm->lock);
1516 
1517 	if (!fpga_xfrm->sa_ctx)
1518 		/* Unbounded xfrm, chane only sw attrs */
1519 		goto change_sw_xfrm_attrs;
1520 
1521 	/* copy original hw sa */
1522 	memcpy(&org_hw_sa, &fpga_xfrm->sa_ctx->hw_sa, sizeof(org_hw_sa));
1523 	mutex_lock(&fipsec->sa_hash_lock);
1524 	/* remove original hw sa from hash */
1525 	WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1526 				       &fpga_xfrm->sa_ctx->hash, rhash_sa));
1527 	/* update hw_sa with new xfrm attrs*/
1528 	mlx5_fpga_ipsec_build_hw_xfrm(xfrm->mdev, attrs,
1529 				      &fpga_xfrm->sa_ctx->hw_sa);
1530 	/* try to insert new hw_sa to hash */
1531 	err = rhashtable_insert_fast(&fipsec->sa_hash,
1532 				     &fpga_xfrm->sa_ctx->hash, rhash_sa);
1533 	if (err)
1534 		goto rollback_sa;
1535 
1536 	/* modify device with new hw_sa */
1537 	err = mlx5_fpga_ipsec_update_hw_sa(fdev, &fpga_xfrm->sa_ctx->hw_sa,
1538 					   MLX5_FPGA_IPSEC_CMD_OP_MOD_SA_V2);
1539 	fpga_xfrm->sa_ctx->hw_sa.ipsec_sa_v1.cmd = 0;
1540 	if (err)
1541 		WARN_ON(rhashtable_remove_fast(&fipsec->sa_hash,
1542 					       &fpga_xfrm->sa_ctx->hash,
1543 					       rhash_sa));
1544 rollback_sa:
1545 	if (err) {
1546 		/* return original hw_sa to hash */
1547 		memcpy(&fpga_xfrm->sa_ctx->hw_sa, &org_hw_sa,
1548 		       sizeof(org_hw_sa));
1549 		WARN_ON(rhashtable_insert_fast(&fipsec->sa_hash,
1550 					       &fpga_xfrm->sa_ctx->hash,
1551 					       rhash_sa));
1552 	}
1553 	mutex_unlock(&fipsec->sa_hash_lock);
1554 
1555 change_sw_xfrm_attrs:
1556 	if (!err)
1557 		memcpy(&xfrm->attrs, attrs, sizeof(xfrm->attrs));
1558 	mutex_unlock(&fpga_xfrm->lock);
1559 	return err;
1560 }
1561 
1562 static const struct mlx5_accel_ipsec_ops fpga_ipsec_ops = {
1563 	.device_caps = mlx5_fpga_ipsec_device_caps,
1564 	.counters_count = mlx5_fpga_ipsec_counters_count,
1565 	.counters_read = mlx5_fpga_ipsec_counters_read,
1566 	.create_hw_context = mlx5_fpga_ipsec_create_sa_ctx,
1567 	.free_hw_context = mlx5_fpga_ipsec_delete_sa_ctx,
1568 	.init = mlx5_fpga_ipsec_init,
1569 	.cleanup = mlx5_fpga_ipsec_cleanup,
1570 	.esp_create_xfrm = mlx5_fpga_esp_create_xfrm,
1571 	.esp_modify_xfrm = mlx5_fpga_esp_modify_xfrm,
1572 	.esp_destroy_xfrm = mlx5_fpga_esp_destroy_xfrm,
1573 };
1574 
mlx5_fpga_ipsec_ops(struct mlx5_core_dev * mdev)1575 const struct mlx5_accel_ipsec_ops *mlx5_fpga_ipsec_ops(struct mlx5_core_dev *mdev)
1576 {
1577 	if (!mlx5_fpga_is_ipsec_device(mdev))
1578 		return NULL;
1579 
1580 	return &fpga_ipsec_ops;
1581 }
1582