1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
3
4 #include <linux/netdevice.h>
5 #include "en.h"
6 #include "en/fs.h"
7 #include "eswitch.h"
8 #include "ipsec.h"
9 #include "fs_core.h"
10 #include "lib/ipsec_fs_roce.h"
11 #include "lib/fs_chains.h"
12 #include "esw/ipsec_fs.h"
13 #include "en_rep.h"
14
15 #define NUM_IPSEC_FTE BIT(15)
16 #define MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE 16
17 #define IPSEC_TUNNEL_DEFAULT_TTL 0x40
18
19 struct mlx5e_ipsec_fc {
20 struct mlx5_fc *cnt;
21 struct mlx5_fc *drop;
22 };
23
24 struct mlx5e_ipsec_tx {
25 struct mlx5e_ipsec_ft ft;
26 struct mlx5e_ipsec_miss pol;
27 struct mlx5e_ipsec_miss sa;
28 struct mlx5e_ipsec_rule status;
29 struct mlx5_flow_namespace *ns;
30 struct mlx5e_ipsec_fc *fc;
31 struct mlx5_fs_chains *chains;
32 u8 allow_tunnel_mode : 1;
33 };
34
35 struct mlx5e_ipsec_status_checks {
36 struct mlx5_flow_group *drop_all_group;
37 struct mlx5e_ipsec_drop all;
38 };
39
40 struct mlx5e_ipsec_rx {
41 struct mlx5e_ipsec_ft ft;
42 struct mlx5e_ipsec_miss pol;
43 struct mlx5e_ipsec_miss sa;
44 struct mlx5e_ipsec_rule status;
45 struct mlx5e_ipsec_status_checks status_drops;
46 struct mlx5e_ipsec_fc *fc;
47 struct mlx5_fs_chains *chains;
48 u8 allow_tunnel_mode : 1;
49 };
50
51 /* IPsec RX flow steering */
family2tt(u32 family)52 static enum mlx5_traffic_types family2tt(u32 family)
53 {
54 if (family == AF_INET)
55 return MLX5_TT_IPV4_IPSEC_ESP;
56 return MLX5_TT_IPV6_IPSEC_ESP;
57 }
58
ipsec_rx(struct mlx5e_ipsec * ipsec,u32 family,int type)59 static struct mlx5e_ipsec_rx *ipsec_rx(struct mlx5e_ipsec *ipsec, u32 family, int type)
60 {
61 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
62 return ipsec->rx_esw;
63
64 if (family == AF_INET)
65 return ipsec->rx_ipv4;
66
67 return ipsec->rx_ipv6;
68 }
69
ipsec_tx(struct mlx5e_ipsec * ipsec,int type)70 static struct mlx5e_ipsec_tx *ipsec_tx(struct mlx5e_ipsec *ipsec, int type)
71 {
72 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
73 return ipsec->tx_esw;
74
75 return ipsec->tx;
76 }
77
78 static struct mlx5_fs_chains *
ipsec_chains_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * miss_ft,enum mlx5_flow_namespace_type ns,int base_prio,int base_level,struct mlx5_flow_table ** root_ft)79 ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
80 enum mlx5_flow_namespace_type ns, int base_prio,
81 int base_level, struct mlx5_flow_table **root_ft)
82 {
83 struct mlx5_chains_attr attr = {};
84 struct mlx5_fs_chains *chains;
85 struct mlx5_flow_table *ft;
86 int err;
87
88 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
89 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
90 attr.max_grp_num = 2;
91 attr.default_ft = miss_ft;
92 attr.ns = ns;
93 attr.fs_base_prio = base_prio;
94 attr.fs_base_level = base_level;
95 chains = mlx5_chains_create(mdev, &attr);
96 if (IS_ERR(chains))
97 return chains;
98
99 /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
100 ft = mlx5_chains_get_table(chains, 0, 1, 0);
101 if (IS_ERR(ft)) {
102 err = PTR_ERR(ft);
103 goto err_chains_get;
104 }
105
106 *root_ft = ft;
107 return chains;
108
109 err_chains_get:
110 mlx5_chains_destroy(chains);
111 return ERR_PTR(err);
112 }
113
ipsec_chains_destroy(struct mlx5_fs_chains * chains)114 static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
115 {
116 mlx5_chains_put_table(chains, 0, 1, 0);
117 mlx5_chains_destroy(chains);
118 }
119
120 static struct mlx5_flow_table *
ipsec_chains_get_table(struct mlx5_fs_chains * chains,u32 prio)121 ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
122 {
123 return mlx5_chains_get_table(chains, 0, prio + 1, 0);
124 }
125
ipsec_chains_put_table(struct mlx5_fs_chains * chains,u32 prio)126 static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
127 {
128 mlx5_chains_put_table(chains, 0, prio + 1, 0);
129 }
130
ipsec_ft_create(struct mlx5_flow_namespace * ns,int level,int prio,int max_num_groups,u32 flags)131 static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
132 int level, int prio,
133 int max_num_groups, u32 flags)
134 {
135 struct mlx5_flow_table_attr ft_attr = {};
136
137 ft_attr.autogroup.num_reserved_entries = 1;
138 ft_attr.autogroup.max_num_groups = max_num_groups;
139 ft_attr.max_fte = NUM_IPSEC_FTE;
140 ft_attr.level = level;
141 ft_attr.prio = prio;
142 ft_attr.flags = flags;
143
144 return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
145 }
146
ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)147 static void ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
148 struct mlx5e_ipsec_rx *rx)
149 {
150 mlx5_del_flow_rules(rx->status_drops.all.rule);
151 mlx5_fc_destroy(ipsec->mdev, rx->status_drops.all.fc);
152 mlx5_destroy_flow_group(rx->status_drops.drop_all_group);
153 }
154
ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)155 static void ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
156 struct mlx5e_ipsec_rx *rx)
157 {
158 mlx5_del_flow_rules(rx->status.rule);
159
160 if (rx != ipsec->rx_esw)
161 return;
162
163 #ifdef CONFIG_MLX5_ESWITCH
164 mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
165 #endif
166 }
167
rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)168 static int rx_add_rule_drop_auth_trailer(struct mlx5e_ipsec_sa_entry *sa_entry,
169 struct mlx5e_ipsec_rx *rx)
170 {
171 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
172 struct mlx5_flow_table *ft = rx->ft.status;
173 struct mlx5_core_dev *mdev = ipsec->mdev;
174 struct mlx5_flow_destination dest = {};
175 struct mlx5_flow_act flow_act = {};
176 struct mlx5_flow_handle *rule;
177 struct mlx5_fc *flow_counter;
178 struct mlx5_flow_spec *spec;
179 int err;
180
181 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
182 if (!spec)
183 return -ENOMEM;
184
185 flow_counter = mlx5_fc_create(mdev, true);
186 if (IS_ERR(flow_counter)) {
187 err = PTR_ERR(flow_counter);
188 mlx5_core_err(mdev,
189 "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
190 goto err_cnt;
191 }
192 sa_entry->ipsec_rule.auth.fc = flow_counter;
193
194 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
195 flow_act.flags = FLOW_ACT_NO_APPEND;
196 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
197 dest.counter_id = mlx5_fc_id(flow_counter);
198 if (rx == ipsec->rx_esw)
199 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
200
201 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.ipsec_syndrome);
202 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 1);
203 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
204 MLX5_SET(fte_match_param, spec->match_value,
205 misc_parameters_2.metadata_reg_c_2,
206 sa_entry->ipsec_obj_id | BIT(31));
207 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
208 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
209 if (IS_ERR(rule)) {
210 err = PTR_ERR(rule);
211 mlx5_core_err(mdev,
212 "Failed to add ipsec rx status drop rule, err=%d\n", err);
213 goto err_rule;
214 }
215 sa_entry->ipsec_rule.auth.rule = rule;
216
217 flow_counter = mlx5_fc_create(mdev, true);
218 if (IS_ERR(flow_counter)) {
219 err = PTR_ERR(flow_counter);
220 mlx5_core_err(mdev,
221 "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
222 goto err_cnt_2;
223 }
224 sa_entry->ipsec_rule.trailer.fc = flow_counter;
225
226 dest.counter_id = mlx5_fc_id(flow_counter);
227 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.ipsec_syndrome, 2);
228 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
229 if (IS_ERR(rule)) {
230 err = PTR_ERR(rule);
231 mlx5_core_err(mdev,
232 "Failed to add ipsec rx status drop rule, err=%d\n", err);
233 goto err_rule_2;
234 }
235 sa_entry->ipsec_rule.trailer.rule = rule;
236
237 kvfree(spec);
238 return 0;
239
240 err_rule_2:
241 mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.trailer.fc);
242 err_cnt_2:
243 mlx5_del_flow_rules(sa_entry->ipsec_rule.auth.rule);
244 err_rule:
245 mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.auth.fc);
246 err_cnt:
247 kvfree(spec);
248 return err;
249 }
250
rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5e_ipsec_rx * rx)251 static int rx_add_rule_drop_replay(struct mlx5e_ipsec_sa_entry *sa_entry, struct mlx5e_ipsec_rx *rx)
252 {
253 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
254 struct mlx5_flow_table *ft = rx->ft.status;
255 struct mlx5_core_dev *mdev = ipsec->mdev;
256 struct mlx5_flow_destination dest = {};
257 struct mlx5_flow_act flow_act = {};
258 struct mlx5_flow_handle *rule;
259 struct mlx5_fc *flow_counter;
260 struct mlx5_flow_spec *spec;
261 int err;
262
263 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
264 if (!spec)
265 return -ENOMEM;
266
267 flow_counter = mlx5_fc_create(mdev, true);
268 if (IS_ERR(flow_counter)) {
269 err = PTR_ERR(flow_counter);
270 mlx5_core_err(mdev,
271 "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
272 goto err_cnt;
273 }
274
275 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
276 flow_act.flags = FLOW_ACT_NO_APPEND;
277 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
278 dest.counter_id = mlx5_fc_id(flow_counter);
279 if (rx == ipsec->rx_esw)
280 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
281
282 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_4);
283 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_4, 1);
284 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_c_2);
285 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_2,
286 sa_entry->ipsec_obj_id | BIT(31));
287 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
288 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
289 if (IS_ERR(rule)) {
290 err = PTR_ERR(rule);
291 mlx5_core_err(mdev,
292 "Failed to add ipsec rx status drop rule, err=%d\n", err);
293 goto err_rule;
294 }
295
296 sa_entry->ipsec_rule.replay.rule = rule;
297 sa_entry->ipsec_rule.replay.fc = flow_counter;
298
299 kvfree(spec);
300 return 0;
301
302 err_rule:
303 mlx5_fc_destroy(mdev, flow_counter);
304 err_cnt:
305 kvfree(spec);
306 return err;
307 }
308
ipsec_rx_status_drop_all_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)309 static int ipsec_rx_status_drop_all_create(struct mlx5e_ipsec *ipsec,
310 struct mlx5e_ipsec_rx *rx)
311 {
312 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
313 struct mlx5_flow_table *ft = rx->ft.status;
314 struct mlx5_core_dev *mdev = ipsec->mdev;
315 struct mlx5_flow_destination dest = {};
316 struct mlx5_flow_act flow_act = {};
317 struct mlx5_flow_handle *rule;
318 struct mlx5_fc *flow_counter;
319 struct mlx5_flow_spec *spec;
320 struct mlx5_flow_group *g;
321 u32 *flow_group_in;
322 int err = 0;
323
324 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
325 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
326 if (!flow_group_in || !spec) {
327 err = -ENOMEM;
328 goto err_out;
329 }
330
331 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
332 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
333 g = mlx5_create_flow_group(ft, flow_group_in);
334 if (IS_ERR(g)) {
335 err = PTR_ERR(g);
336 mlx5_core_err(mdev,
337 "Failed to add ipsec rx status drop flow group, err=%d\n", err);
338 goto err_out;
339 }
340
341 flow_counter = mlx5_fc_create(mdev, false);
342 if (IS_ERR(flow_counter)) {
343 err = PTR_ERR(flow_counter);
344 mlx5_core_err(mdev,
345 "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
346 goto err_cnt;
347 }
348
349 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
350 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
351 dest.counter_id = mlx5_fc_id(flow_counter);
352 if (rx == ipsec->rx_esw)
353 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
354 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
355 if (IS_ERR(rule)) {
356 err = PTR_ERR(rule);
357 mlx5_core_err(mdev,
358 "Failed to add ipsec rx status drop rule, err=%d\n", err);
359 goto err_rule;
360 }
361
362 rx->status_drops.drop_all_group = g;
363 rx->status_drops.all.rule = rule;
364 rx->status_drops.all.fc = flow_counter;
365
366 kvfree(flow_group_in);
367 kvfree(spec);
368 return 0;
369
370 err_rule:
371 mlx5_fc_destroy(mdev, flow_counter);
372 err_cnt:
373 mlx5_destroy_flow_group(g);
374 err_out:
375 kvfree(flow_group_in);
376 kvfree(spec);
377 return err;
378 }
379
ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)380 static int ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
381 struct mlx5e_ipsec_rx *rx,
382 struct mlx5_flow_destination *dest)
383 {
384 struct mlx5_flow_act flow_act = {};
385 struct mlx5_flow_handle *rule;
386 struct mlx5_flow_spec *spec;
387 int err;
388
389 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
390 if (!spec)
391 return -ENOMEM;
392
393 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
394 misc_parameters_2.ipsec_syndrome);
395 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
396 misc_parameters_2.metadata_reg_c_4);
397 MLX5_SET(fte_match_param, spec->match_value,
398 misc_parameters_2.ipsec_syndrome, 0);
399 MLX5_SET(fte_match_param, spec->match_value,
400 misc_parameters_2.metadata_reg_c_4, 0);
401 if (rx == ipsec->rx_esw)
402 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
403 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
404 flow_act.flags = FLOW_ACT_NO_APPEND;
405 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
406 MLX5_FLOW_CONTEXT_ACTION_COUNT;
407 rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
408 if (IS_ERR(rule)) {
409 err = PTR_ERR(rule);
410 mlx5_core_warn(ipsec->mdev,
411 "Failed to add ipsec rx status pass rule, err=%d\n", err);
412 goto err_rule;
413 }
414
415 rx->status.rule = rule;
416 kvfree(spec);
417 return 0;
418
419 err_rule:
420 kvfree(spec);
421 return err;
422 }
423
mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)424 static void mlx5_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
425 struct mlx5e_ipsec_rx *rx)
426 {
427 ipsec_rx_status_pass_destroy(ipsec, rx);
428 ipsec_rx_status_drop_destroy(ipsec, rx);
429 }
430
mlx5_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)431 static int mlx5_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
432 struct mlx5e_ipsec_rx *rx,
433 struct mlx5_flow_destination *dest)
434 {
435 int err;
436
437 err = ipsec_rx_status_drop_all_create(ipsec, rx);
438 if (err)
439 return err;
440
441 err = ipsec_rx_status_pass_create(ipsec, rx, dest);
442 if (err)
443 goto err_pass_create;
444
445 return 0;
446
447 err_pass_create:
448 ipsec_rx_status_drop_destroy(ipsec, rx);
449 return err;
450 }
451
ipsec_miss_create(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft,struct mlx5e_ipsec_miss * miss,struct mlx5_flow_destination * dest)452 static int ipsec_miss_create(struct mlx5_core_dev *mdev,
453 struct mlx5_flow_table *ft,
454 struct mlx5e_ipsec_miss *miss,
455 struct mlx5_flow_destination *dest)
456 {
457 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
458 MLX5_DECLARE_FLOW_ACT(flow_act);
459 struct mlx5_flow_spec *spec;
460 u32 *flow_group_in;
461 int err = 0;
462
463 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
464 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
465 if (!flow_group_in || !spec) {
466 err = -ENOMEM;
467 goto out;
468 }
469
470 /* Create miss_group */
471 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
472 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
473 miss->group = mlx5_create_flow_group(ft, flow_group_in);
474 if (IS_ERR(miss->group)) {
475 err = PTR_ERR(miss->group);
476 mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
477 err);
478 goto out;
479 }
480
481 /* Create miss rule */
482 miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
483 if (IS_ERR(miss->rule)) {
484 mlx5_destroy_flow_group(miss->group);
485 err = PTR_ERR(miss->rule);
486 mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
487 err);
488 goto out;
489 }
490 out:
491 kvfree(flow_group_in);
492 kvfree(spec);
493 return err;
494 }
495
handle_ipsec_rx_bringup(struct mlx5e_ipsec * ipsec,u32 family)496 static void handle_ipsec_rx_bringup(struct mlx5e_ipsec *ipsec, u32 family)
497 {
498 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
499 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
500 struct mlx5_flow_destination old_dest, new_dest;
501
502 old_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
503 family2tt(family));
504
505 mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, ns, &old_dest, family,
506 MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL, MLX5E_NIC_PRIO);
507
508 new_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
509 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
510 mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
511 mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
512 }
513
handle_ipsec_rx_cleanup(struct mlx5e_ipsec * ipsec,u32 family)514 static void handle_ipsec_rx_cleanup(struct mlx5e_ipsec *ipsec, u32 family)
515 {
516 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, XFRM_DEV_OFFLOAD_PACKET);
517 struct mlx5_flow_destination old_dest, new_dest;
518
519 old_dest.ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
520 old_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
521 new_dest = mlx5_ttc_get_default_dest(mlx5e_fs_get_ttc(ipsec->fs, false),
522 family2tt(family));
523 mlx5_modify_rule_destination(rx->sa.rule, &new_dest, &old_dest);
524 mlx5_modify_rule_destination(rx->status.rule, &new_dest, &old_dest);
525
526 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, ipsec->mdev);
527 }
528
ipsec_mpv_work_handler(struct work_struct * _work)529 static void ipsec_mpv_work_handler(struct work_struct *_work)
530 {
531 struct mlx5e_ipsec_mpv_work *work = container_of(_work, struct mlx5e_ipsec_mpv_work, work);
532 struct mlx5e_ipsec *ipsec = work->slave_priv->ipsec;
533
534 switch (work->event) {
535 case MPV_DEVCOM_IPSEC_MASTER_UP:
536 mutex_lock(&ipsec->tx->ft.mutex);
537 if (ipsec->tx->ft.refcnt)
538 mlx5_ipsec_fs_roce_tx_create(ipsec->mdev, ipsec->roce, ipsec->tx->ft.pol,
539 true);
540 mutex_unlock(&ipsec->tx->ft.mutex);
541
542 mutex_lock(&ipsec->rx_ipv4->ft.mutex);
543 if (ipsec->rx_ipv4->ft.refcnt)
544 handle_ipsec_rx_bringup(ipsec, AF_INET);
545 mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
546
547 mutex_lock(&ipsec->rx_ipv6->ft.mutex);
548 if (ipsec->rx_ipv6->ft.refcnt)
549 handle_ipsec_rx_bringup(ipsec, AF_INET6);
550 mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
551 break;
552 case MPV_DEVCOM_IPSEC_MASTER_DOWN:
553 mutex_lock(&ipsec->tx->ft.mutex);
554 if (ipsec->tx->ft.refcnt)
555 mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce, ipsec->mdev);
556 mutex_unlock(&ipsec->tx->ft.mutex);
557
558 mutex_lock(&ipsec->rx_ipv4->ft.mutex);
559 if (ipsec->rx_ipv4->ft.refcnt)
560 handle_ipsec_rx_cleanup(ipsec, AF_INET);
561 mutex_unlock(&ipsec->rx_ipv4->ft.mutex);
562
563 mutex_lock(&ipsec->rx_ipv6->ft.mutex);
564 if (ipsec->rx_ipv6->ft.refcnt)
565 handle_ipsec_rx_cleanup(ipsec, AF_INET6);
566 mutex_unlock(&ipsec->rx_ipv6->ft.mutex);
567 break;
568 }
569
570 complete(&work->master_priv->ipsec->comp);
571 }
572
ipsec_rx_ft_disconnect(struct mlx5e_ipsec * ipsec,u32 family)573 static void ipsec_rx_ft_disconnect(struct mlx5e_ipsec *ipsec, u32 family)
574 {
575 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
576
577 mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
578 }
579
rx_destroy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)580 static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
581 struct mlx5e_ipsec_rx *rx, u32 family)
582 {
583 /* disconnect */
584 if (rx != ipsec->rx_esw)
585 ipsec_rx_ft_disconnect(ipsec, family);
586
587 if (rx->chains) {
588 ipsec_chains_destroy(rx->chains);
589 } else {
590 mlx5_del_flow_rules(rx->pol.rule);
591 mlx5_destroy_flow_group(rx->pol.group);
592 mlx5_destroy_flow_table(rx->ft.pol);
593 }
594
595 mlx5_del_flow_rules(rx->sa.rule);
596 mlx5_destroy_flow_group(rx->sa.group);
597 mlx5_destroy_flow_table(rx->ft.sa);
598 if (rx->allow_tunnel_mode)
599 mlx5_eswitch_unblock_encap(mdev);
600 mlx5_ipsec_rx_status_destroy(ipsec, rx);
601 mlx5_destroy_flow_table(rx->ft.status);
602
603 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
604 }
605
ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family,struct mlx5e_ipsec_rx_create_attr * attr)606 static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
607 struct mlx5e_ipsec_rx *rx,
608 u32 family,
609 struct mlx5e_ipsec_rx_create_attr *attr)
610 {
611 if (rx == ipsec->rx_esw) {
612 /* For packet offload in switchdev mode, RX & TX use FDB namespace */
613 attr->ns = ipsec->tx_esw->ns;
614 mlx5_esw_ipsec_rx_create_attr_set(ipsec, attr);
615 return;
616 }
617
618 attr->ns = mlx5e_fs_get_ns(ipsec->fs, false);
619 attr->ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
620 attr->family = family;
621 attr->prio = MLX5E_NIC_PRIO;
622 attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
623 attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
624 attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
625 attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
626 }
627
ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr,struct mlx5_flow_destination * dest)628 static int ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
629 struct mlx5e_ipsec_rx *rx,
630 struct mlx5e_ipsec_rx_create_attr *attr,
631 struct mlx5_flow_destination *dest)
632 {
633 struct mlx5_flow_table *ft;
634 int err;
635
636 if (rx == ipsec->rx_esw)
637 return mlx5_esw_ipsec_rx_status_pass_dest_get(ipsec, dest);
638
639 *dest = mlx5_ttc_get_default_dest(attr->ttc, family2tt(attr->family));
640 err = mlx5_ipsec_fs_roce_rx_create(ipsec->mdev, ipsec->roce, attr->ns, dest,
641 attr->family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
642 attr->prio);
643 if (err)
644 return err;
645
646 ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, attr->family);
647 if (ft) {
648 dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
649 dest->ft = ft;
650 }
651
652 return 0;
653 }
654
ipsec_rx_ft_connect(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5e_ipsec_rx_create_attr * attr)655 static void ipsec_rx_ft_connect(struct mlx5e_ipsec *ipsec,
656 struct mlx5e_ipsec_rx *rx,
657 struct mlx5e_ipsec_rx_create_attr *attr)
658 {
659 struct mlx5_flow_destination dest = {};
660
661 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
662 dest.ft = rx->ft.pol;
663 mlx5_ttc_fwd_dest(attr->ttc, family2tt(attr->family), &dest);
664 }
665
rx_create(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)666 static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
667 struct mlx5e_ipsec_rx *rx, u32 family)
668 {
669 struct mlx5e_ipsec_rx_create_attr attr;
670 struct mlx5_flow_destination dest[2];
671 struct mlx5_flow_table *ft;
672 u32 flags = 0;
673 int err;
674
675 ipsec_rx_create_attr_set(ipsec, rx, family, &attr);
676
677 err = ipsec_rx_status_pass_dest_get(ipsec, rx, &attr, &dest[0]);
678 if (err)
679 return err;
680
681 ft = ipsec_ft_create(attr.ns, attr.status_level, attr.prio, 3, 0);
682 if (IS_ERR(ft)) {
683 err = PTR_ERR(ft);
684 goto err_fs_ft_status;
685 }
686 rx->ft.status = ft;
687
688 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
689 dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
690 err = mlx5_ipsec_rx_status_create(ipsec, rx, dest);
691 if (err)
692 goto err_add;
693
694 /* Create FT */
695 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
696 rx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
697 if (rx->allow_tunnel_mode)
698 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
699 ft = ipsec_ft_create(attr.ns, attr.sa_level, attr.prio, 2, flags);
700 if (IS_ERR(ft)) {
701 err = PTR_ERR(ft);
702 goto err_fs_ft;
703 }
704 rx->ft.sa = ft;
705
706 err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
707 if (err)
708 goto err_fs;
709
710 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
711 rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
712 attr.chains_ns,
713 attr.prio,
714 attr.pol_level,
715 &rx->ft.pol);
716 if (IS_ERR(rx->chains)) {
717 err = PTR_ERR(rx->chains);
718 goto err_pol_ft;
719 }
720
721 goto connect;
722 }
723
724 ft = ipsec_ft_create(attr.ns, attr.pol_level, attr.prio, 2, 0);
725 if (IS_ERR(ft)) {
726 err = PTR_ERR(ft);
727 goto err_pol_ft;
728 }
729 rx->ft.pol = ft;
730 memset(dest, 0x00, 2 * sizeof(*dest));
731 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
732 dest[0].ft = rx->ft.sa;
733 err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
734 if (err)
735 goto err_pol_miss;
736
737 connect:
738 /* connect */
739 if (rx != ipsec->rx_esw)
740 ipsec_rx_ft_connect(ipsec, rx, &attr);
741 return 0;
742
743 err_pol_miss:
744 mlx5_destroy_flow_table(rx->ft.pol);
745 err_pol_ft:
746 mlx5_del_flow_rules(rx->sa.rule);
747 mlx5_destroy_flow_group(rx->sa.group);
748 err_fs:
749 mlx5_destroy_flow_table(rx->ft.sa);
750 err_fs_ft:
751 if (rx->allow_tunnel_mode)
752 mlx5_eswitch_unblock_encap(mdev);
753 mlx5_del_flow_rules(rx->status.rule);
754 mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
755 err_add:
756 mlx5_destroy_flow_table(rx->ft.status);
757 err_fs_ft_status:
758 mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family, mdev);
759 return err;
760 }
761
rx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)762 static int rx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
763 struct mlx5e_ipsec_rx *rx, u32 family)
764 {
765 int err;
766
767 if (rx->ft.refcnt)
768 goto skip;
769
770 err = mlx5_eswitch_block_mode(mdev);
771 if (err)
772 return err;
773
774 err = rx_create(mdev, ipsec, rx, family);
775 if (err) {
776 mlx5_eswitch_unblock_mode(mdev);
777 return err;
778 }
779
780 skip:
781 rx->ft.refcnt++;
782 return 0;
783 }
784
rx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,u32 family)785 static void rx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_rx *rx,
786 u32 family)
787 {
788 if (--rx->ft.refcnt)
789 return;
790
791 rx_destroy(ipsec->mdev, ipsec, rx, family);
792 mlx5_eswitch_unblock_mode(ipsec->mdev);
793 }
794
rx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,int type)795 static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
796 struct mlx5e_ipsec *ipsec, u32 family,
797 int type)
798 {
799 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
800 int err;
801
802 mutex_lock(&rx->ft.mutex);
803 err = rx_get(mdev, ipsec, rx, family);
804 mutex_unlock(&rx->ft.mutex);
805 if (err)
806 return ERR_PTR(err);
807
808 return rx;
809 }
810
rx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)811 static struct mlx5_flow_table *rx_ft_get_policy(struct mlx5_core_dev *mdev,
812 struct mlx5e_ipsec *ipsec,
813 u32 family, u32 prio, int type)
814 {
815 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
816 struct mlx5_flow_table *ft;
817 int err;
818
819 mutex_lock(&rx->ft.mutex);
820 err = rx_get(mdev, ipsec, rx, family);
821 if (err)
822 goto err_get;
823
824 ft = rx->chains ? ipsec_chains_get_table(rx->chains, prio) : rx->ft.pol;
825 if (IS_ERR(ft)) {
826 err = PTR_ERR(ft);
827 goto err_get_ft;
828 }
829
830 mutex_unlock(&rx->ft.mutex);
831 return ft;
832
833 err_get_ft:
834 rx_put(ipsec, rx, family);
835 err_get:
836 mutex_unlock(&rx->ft.mutex);
837 return ERR_PTR(err);
838 }
839
rx_ft_put(struct mlx5e_ipsec * ipsec,u32 family,int type)840 static void rx_ft_put(struct mlx5e_ipsec *ipsec, u32 family, int type)
841 {
842 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
843
844 mutex_lock(&rx->ft.mutex);
845 rx_put(ipsec, rx, family);
846 mutex_unlock(&rx->ft.mutex);
847 }
848
rx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 family,u32 prio,int type)849 static void rx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 family, u32 prio, int type)
850 {
851 struct mlx5e_ipsec_rx *rx = ipsec_rx(ipsec, family, type);
852
853 mutex_lock(&rx->ft.mutex);
854 if (rx->chains)
855 ipsec_chains_put_table(rx->chains, prio);
856
857 rx_put(ipsec, rx, family);
858 mutex_unlock(&rx->ft.mutex);
859 }
860
ipsec_counter_rule_tx(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_tx * tx)861 static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
862 {
863 struct mlx5_flow_destination dest = {};
864 struct mlx5_flow_act flow_act = {};
865 struct mlx5_flow_handle *fte;
866 struct mlx5_flow_spec *spec;
867 int err;
868
869 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
870 if (!spec)
871 return -ENOMEM;
872
873 /* create fte */
874 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
875 MLX5_FLOW_CONTEXT_ACTION_COUNT;
876 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
877 dest.counter_id = mlx5_fc_id(tx->fc->cnt);
878 fte = mlx5_add_flow_rules(tx->ft.status, spec, &flow_act, &dest, 1);
879 if (IS_ERR(fte)) {
880 err = PTR_ERR(fte);
881 mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
882 goto err_rule;
883 }
884
885 kvfree(spec);
886 tx->status.rule = fte;
887 return 0;
888
889 err_rule:
890 kvfree(spec);
891 return err;
892 }
893
894 /* IPsec TX flow steering */
tx_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)895 static void tx_destroy(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
896 struct mlx5_ipsec_fs *roce)
897 {
898 mlx5_ipsec_fs_roce_tx_destroy(roce, ipsec->mdev);
899 if (tx->chains) {
900 ipsec_chains_destroy(tx->chains);
901 } else {
902 mlx5_del_flow_rules(tx->pol.rule);
903 mlx5_destroy_flow_group(tx->pol.group);
904 mlx5_destroy_flow_table(tx->ft.pol);
905 }
906
907 if (tx == ipsec->tx_esw) {
908 mlx5_del_flow_rules(tx->sa.rule);
909 mlx5_destroy_flow_group(tx->sa.group);
910 }
911 mlx5_destroy_flow_table(tx->ft.sa);
912 if (tx->allow_tunnel_mode)
913 mlx5_eswitch_unblock_encap(ipsec->mdev);
914 mlx5_del_flow_rules(tx->status.rule);
915 mlx5_destroy_flow_table(tx->ft.status);
916 }
917
ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5e_ipsec_tx_create_attr * attr)918 static void ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
919 struct mlx5e_ipsec_tx *tx,
920 struct mlx5e_ipsec_tx_create_attr *attr)
921 {
922 if (tx == ipsec->tx_esw) {
923 mlx5_esw_ipsec_tx_create_attr_set(ipsec, attr);
924 return;
925 }
926
927 attr->prio = 0;
928 attr->pol_level = 0;
929 attr->sa_level = 1;
930 attr->cnt_level = 2;
931 attr->chains_ns = MLX5_FLOW_NAMESPACE_EGRESS_IPSEC;
932 }
933
tx_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx,struct mlx5_ipsec_fs * roce)934 static int tx_create(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx,
935 struct mlx5_ipsec_fs *roce)
936 {
937 struct mlx5_core_dev *mdev = ipsec->mdev;
938 struct mlx5e_ipsec_tx_create_attr attr;
939 struct mlx5_flow_destination dest = {};
940 struct mlx5_flow_table *ft;
941 u32 flags = 0;
942 int err;
943
944 ipsec_tx_create_attr_set(ipsec, tx, &attr);
945 ft = ipsec_ft_create(tx->ns, attr.cnt_level, attr.prio, 1, 0);
946 if (IS_ERR(ft))
947 return PTR_ERR(ft);
948 tx->ft.status = ft;
949
950 err = ipsec_counter_rule_tx(mdev, tx);
951 if (err)
952 goto err_status_rule;
953
954 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_TUNNEL)
955 tx->allow_tunnel_mode = mlx5_eswitch_block_encap(mdev);
956 if (tx->allow_tunnel_mode)
957 flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
958 ft = ipsec_ft_create(tx->ns, attr.sa_level, attr.prio, 4, flags);
959 if (IS_ERR(ft)) {
960 err = PTR_ERR(ft);
961 goto err_sa_ft;
962 }
963 tx->ft.sa = ft;
964
965 if (tx == ipsec->tx_esw) {
966 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
967 dest.vport.num = MLX5_VPORT_UPLINK;
968 err = ipsec_miss_create(mdev, tx->ft.sa, &tx->sa, &dest);
969 if (err)
970 goto err_sa_miss;
971 memset(&dest, 0, sizeof(dest));
972 }
973
974 if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
975 tx->chains = ipsec_chains_create(
976 mdev, tx->ft.sa, attr.chains_ns, attr.prio, attr.pol_level,
977 &tx->ft.pol);
978 if (IS_ERR(tx->chains)) {
979 err = PTR_ERR(tx->chains);
980 goto err_pol_ft;
981 }
982
983 goto connect_roce;
984 }
985
986 ft = ipsec_ft_create(tx->ns, attr.pol_level, attr.prio, 2, 0);
987 if (IS_ERR(ft)) {
988 err = PTR_ERR(ft);
989 goto err_pol_ft;
990 }
991 tx->ft.pol = ft;
992 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
993 dest.ft = tx->ft.sa;
994 err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
995 if (err) {
996 mlx5_destroy_flow_table(tx->ft.pol);
997 goto err_pol_ft;
998 }
999
1000 connect_roce:
1001 err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol, false);
1002 if (err)
1003 goto err_roce;
1004 return 0;
1005
1006 err_roce:
1007 if (tx->chains) {
1008 ipsec_chains_destroy(tx->chains);
1009 } else {
1010 mlx5_del_flow_rules(tx->pol.rule);
1011 mlx5_destroy_flow_group(tx->pol.group);
1012 mlx5_destroy_flow_table(tx->ft.pol);
1013 }
1014 err_pol_ft:
1015 if (tx == ipsec->tx_esw) {
1016 mlx5_del_flow_rules(tx->sa.rule);
1017 mlx5_destroy_flow_group(tx->sa.group);
1018 }
1019 err_sa_miss:
1020 mlx5_destroy_flow_table(tx->ft.sa);
1021 err_sa_ft:
1022 if (tx->allow_tunnel_mode)
1023 mlx5_eswitch_unblock_encap(mdev);
1024 mlx5_del_flow_rules(tx->status.rule);
1025 err_status_rule:
1026 mlx5_destroy_flow_table(tx->ft.status);
1027 return err;
1028 }
1029
ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev * mdev,struct mlx5_flow_table * ft)1030 static void ipsec_esw_tx_ft_policy_set(struct mlx5_core_dev *mdev,
1031 struct mlx5_flow_table *ft)
1032 {
1033 #ifdef CONFIG_MLX5_ESWITCH
1034 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1035 struct mlx5e_rep_priv *uplink_rpriv;
1036 struct mlx5e_priv *priv;
1037
1038 esw->offloads.ft_ipsec_tx_pol = ft;
1039 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1040 priv = netdev_priv(uplink_rpriv->netdev);
1041 if (!priv->channels.num)
1042 return;
1043
1044 mlx5e_rep_deactivate_channels(priv);
1045 mlx5e_rep_activate_channels(priv);
1046 #endif
1047 }
1048
tx_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1049 static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
1050 struct mlx5e_ipsec_tx *tx)
1051 {
1052 int err;
1053
1054 if (tx->ft.refcnt)
1055 goto skip;
1056
1057 err = mlx5_eswitch_block_mode(mdev);
1058 if (err)
1059 return err;
1060
1061 err = tx_create(ipsec, tx, ipsec->roce);
1062 if (err) {
1063 mlx5_eswitch_unblock_mode(mdev);
1064 return err;
1065 }
1066
1067 if (tx == ipsec->tx_esw)
1068 ipsec_esw_tx_ft_policy_set(mdev, tx->ft.pol);
1069
1070 skip:
1071 tx->ft.refcnt++;
1072 return 0;
1073 }
1074
tx_put(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx * tx)1075 static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
1076 {
1077 if (--tx->ft.refcnt)
1078 return;
1079
1080 if (tx == ipsec->tx_esw) {
1081 mlx5_esw_ipsec_restore_dest_uplink(ipsec->mdev);
1082 ipsec_esw_tx_ft_policy_set(ipsec->mdev, NULL);
1083 }
1084
1085 tx_destroy(ipsec, tx, ipsec->roce);
1086 mlx5_eswitch_unblock_mode(ipsec->mdev);
1087 }
1088
tx_ft_get_policy(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,u32 prio,int type)1089 static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
1090 struct mlx5e_ipsec *ipsec,
1091 u32 prio, int type)
1092 {
1093 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1094 struct mlx5_flow_table *ft;
1095 int err;
1096
1097 mutex_lock(&tx->ft.mutex);
1098 err = tx_get(mdev, ipsec, tx);
1099 if (err)
1100 goto err_get;
1101
1102 ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
1103 if (IS_ERR(ft)) {
1104 err = PTR_ERR(ft);
1105 goto err_get_ft;
1106 }
1107
1108 mutex_unlock(&tx->ft.mutex);
1109 return ft;
1110
1111 err_get_ft:
1112 tx_put(ipsec, tx);
1113 err_get:
1114 mutex_unlock(&tx->ft.mutex);
1115 return ERR_PTR(err);
1116 }
1117
tx_ft_get(struct mlx5_core_dev * mdev,struct mlx5e_ipsec * ipsec,int type)1118 static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
1119 struct mlx5e_ipsec *ipsec, int type)
1120 {
1121 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1122 int err;
1123
1124 mutex_lock(&tx->ft.mutex);
1125 err = tx_get(mdev, ipsec, tx);
1126 mutex_unlock(&tx->ft.mutex);
1127 if (err)
1128 return ERR_PTR(err);
1129
1130 return tx;
1131 }
1132
tx_ft_put(struct mlx5e_ipsec * ipsec,int type)1133 static void tx_ft_put(struct mlx5e_ipsec *ipsec, int type)
1134 {
1135 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1136
1137 mutex_lock(&tx->ft.mutex);
1138 tx_put(ipsec, tx);
1139 mutex_unlock(&tx->ft.mutex);
1140 }
1141
tx_ft_put_policy(struct mlx5e_ipsec * ipsec,u32 prio,int type)1142 static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio, int type)
1143 {
1144 struct mlx5e_ipsec_tx *tx = ipsec_tx(ipsec, type);
1145
1146 mutex_lock(&tx->ft.mutex);
1147 if (tx->chains)
1148 ipsec_chains_put_table(tx->chains, prio);
1149
1150 tx_put(ipsec, tx);
1151 mutex_unlock(&tx->ft.mutex);
1152 }
1153
setup_fte_addr4(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)1154 static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
1155 __be32 *daddr)
1156 {
1157 if (!*saddr && !*daddr)
1158 return;
1159
1160 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1161
1162 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1163 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
1164
1165 if (*saddr) {
1166 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1167 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
1168 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1169 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
1170 }
1171
1172 if (*daddr) {
1173 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1174 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
1175 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1176 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1177 }
1178 }
1179
setup_fte_addr6(struct mlx5_flow_spec * spec,__be32 * saddr,__be32 * daddr)1180 static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
1181 __be32 *daddr)
1182 {
1183 if (addr6_all_zero(saddr) && addr6_all_zero(daddr))
1184 return;
1185
1186 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1187
1188 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
1189 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
1190
1191 if (!addr6_all_zero(saddr)) {
1192 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1193 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
1194 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1195 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
1196 }
1197
1198 if (!addr6_all_zero(daddr)) {
1199 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
1200 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
1201 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1202 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
1203 }
1204 }
1205
setup_fte_esp(struct mlx5_flow_spec * spec)1206 static void setup_fte_esp(struct mlx5_flow_spec *spec)
1207 {
1208 /* ESP header */
1209 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1210
1211 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
1212 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
1213 }
1214
setup_fte_spi(struct mlx5_flow_spec * spec,u32 spi,bool encap)1215 static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
1216 {
1217 /* SPI number */
1218 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
1219
1220 if (encap) {
1221 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1222 misc_parameters.inner_esp_spi);
1223 MLX5_SET(fte_match_param, spec->match_value,
1224 misc_parameters.inner_esp_spi, spi);
1225 } else {
1226 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1227 misc_parameters.outer_esp_spi);
1228 MLX5_SET(fte_match_param, spec->match_value,
1229 misc_parameters.outer_esp_spi, spi);
1230 }
1231 }
1232
setup_fte_no_frags(struct mlx5_flow_spec * spec)1233 static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
1234 {
1235 /* Non fragmented */
1236 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1237
1238 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
1239 MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
1240 }
1241
setup_fte_reg_a(struct mlx5_flow_spec * spec)1242 static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
1243 {
1244 /* Add IPsec indicator in metadata_reg_a */
1245 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1246
1247 MLX5_SET(fte_match_param, spec->match_criteria,
1248 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1249 MLX5_SET(fte_match_param, spec->match_value,
1250 misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
1251 }
1252
setup_fte_reg_c4(struct mlx5_flow_spec * spec,u32 reqid)1253 static void setup_fte_reg_c4(struct mlx5_flow_spec *spec, u32 reqid)
1254 {
1255 /* Pass policy check before choosing this SA */
1256 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
1257
1258 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1259 misc_parameters_2.metadata_reg_c_4);
1260 MLX5_SET(fte_match_param, spec->match_value,
1261 misc_parameters_2.metadata_reg_c_4, reqid);
1262 }
1263
setup_fte_upper_proto_match(struct mlx5_flow_spec * spec,struct upspec * upspec)1264 static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
1265 {
1266 switch (upspec->proto) {
1267 case IPPROTO_UDP:
1268 if (upspec->dport) {
1269 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1270 udp_dport, upspec->dport_mask);
1271 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1272 udp_dport, upspec->dport);
1273 }
1274 if (upspec->sport) {
1275 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1276 udp_sport, upspec->sport_mask);
1277 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1278 udp_sport, upspec->sport);
1279 }
1280 break;
1281 case IPPROTO_TCP:
1282 if (upspec->dport) {
1283 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1284 tcp_dport, upspec->dport_mask);
1285 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1286 tcp_dport, upspec->dport);
1287 }
1288 if (upspec->sport) {
1289 MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria,
1290 tcp_sport, upspec->sport_mask);
1291 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
1292 tcp_sport, upspec->sport);
1293 }
1294 break;
1295 default:
1296 return;
1297 }
1298
1299 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1300 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
1301 MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
1302 }
1303
ipsec_fs_get_ns(struct mlx5e_ipsec * ipsec,int type,u8 dir)1304 static enum mlx5_flow_namespace_type ipsec_fs_get_ns(struct mlx5e_ipsec *ipsec,
1305 int type, u8 dir)
1306 {
1307 if (ipsec->is_uplink_rep && type == XFRM_DEV_OFFLOAD_PACKET)
1308 return MLX5_FLOW_NAMESPACE_FDB;
1309
1310 if (dir == XFRM_DEV_OFFLOAD_IN)
1311 return MLX5_FLOW_NAMESPACE_KERNEL;
1312
1313 return MLX5_FLOW_NAMESPACE_EGRESS;
1314 }
1315
setup_modify_header(struct mlx5e_ipsec * ipsec,int type,u32 val,u8 dir,struct mlx5_flow_act * flow_act)1316 static int setup_modify_header(struct mlx5e_ipsec *ipsec, int type, u32 val, u8 dir,
1317 struct mlx5_flow_act *flow_act)
1318 {
1319 enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, type, dir);
1320 u8 action[3][MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
1321 struct mlx5_core_dev *mdev = ipsec->mdev;
1322 struct mlx5_modify_hdr *modify_hdr;
1323 u8 num_of_actions = 1;
1324
1325 MLX5_SET(set_action_in, action[0], action_type, MLX5_ACTION_TYPE_SET);
1326 switch (dir) {
1327 case XFRM_DEV_OFFLOAD_IN:
1328 MLX5_SET(set_action_in, action[0], field,
1329 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
1330
1331 num_of_actions++;
1332 MLX5_SET(set_action_in, action[1], action_type, MLX5_ACTION_TYPE_SET);
1333 MLX5_SET(set_action_in, action[1], field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_2);
1334 MLX5_SET(set_action_in, action[1], data, val);
1335 MLX5_SET(set_action_in, action[1], offset, 0);
1336 MLX5_SET(set_action_in, action[1], length, 32);
1337
1338 if (type == XFRM_DEV_OFFLOAD_CRYPTO) {
1339 num_of_actions++;
1340 MLX5_SET(set_action_in, action[2], action_type,
1341 MLX5_ACTION_TYPE_SET);
1342 MLX5_SET(set_action_in, action[2], field,
1343 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1344 MLX5_SET(set_action_in, action[2], data, 0);
1345 MLX5_SET(set_action_in, action[2], offset, 0);
1346 MLX5_SET(set_action_in, action[2], length, 32);
1347 }
1348 break;
1349 case XFRM_DEV_OFFLOAD_OUT:
1350 MLX5_SET(set_action_in, action[0], field,
1351 MLX5_ACTION_IN_FIELD_METADATA_REG_C_4);
1352 break;
1353 default:
1354 return -EINVAL;
1355 }
1356
1357 MLX5_SET(set_action_in, action[0], data, val);
1358 MLX5_SET(set_action_in, action[0], offset, 0);
1359 MLX5_SET(set_action_in, action[0], length, 32);
1360
1361 modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, num_of_actions, action);
1362 if (IS_ERR(modify_hdr)) {
1363 mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
1364 PTR_ERR(modify_hdr));
1365 return PTR_ERR(modify_hdr);
1366 }
1367
1368 flow_act->modify_hdr = modify_hdr;
1369 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1370 return 0;
1371 }
1372
1373 static int
setup_pkt_tunnel_reformat(struct mlx5_core_dev * mdev,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1374 setup_pkt_tunnel_reformat(struct mlx5_core_dev *mdev,
1375 struct mlx5_accel_esp_xfrm_attrs *attrs,
1376 struct mlx5_pkt_reformat_params *reformat_params)
1377 {
1378 struct ip_esp_hdr *esp_hdr;
1379 struct ipv6hdr *ipv6hdr;
1380 struct ethhdr *eth_hdr;
1381 struct iphdr *iphdr;
1382 char *reformatbf;
1383 size_t bfflen;
1384 void *hdr;
1385
1386 bfflen = sizeof(*eth_hdr);
1387
1388 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
1389 bfflen += sizeof(*esp_hdr) + 8;
1390
1391 switch (attrs->family) {
1392 case AF_INET:
1393 bfflen += sizeof(*iphdr);
1394 break;
1395 case AF_INET6:
1396 bfflen += sizeof(*ipv6hdr);
1397 break;
1398 default:
1399 return -EINVAL;
1400 }
1401 }
1402
1403 reformatbf = kzalloc(bfflen, GFP_KERNEL);
1404 if (!reformatbf)
1405 return -ENOMEM;
1406
1407 eth_hdr = (struct ethhdr *)reformatbf;
1408 switch (attrs->family) {
1409 case AF_INET:
1410 eth_hdr->h_proto = htons(ETH_P_IP);
1411 break;
1412 case AF_INET6:
1413 eth_hdr->h_proto = htons(ETH_P_IPV6);
1414 break;
1415 default:
1416 goto free_reformatbf;
1417 }
1418
1419 ether_addr_copy(eth_hdr->h_dest, attrs->dmac);
1420 ether_addr_copy(eth_hdr->h_source, attrs->smac);
1421
1422 switch (attrs->dir) {
1423 case XFRM_DEV_OFFLOAD_IN:
1424 reformat_params->type = MLX5_REFORMAT_TYPE_L3_ESP_TUNNEL_TO_L2;
1425 break;
1426 case XFRM_DEV_OFFLOAD_OUT:
1427 reformat_params->type = MLX5_REFORMAT_TYPE_L2_TO_L3_ESP_TUNNEL;
1428 reformat_params->param_0 = attrs->authsize;
1429
1430 hdr = reformatbf + sizeof(*eth_hdr);
1431 switch (attrs->family) {
1432 case AF_INET:
1433 iphdr = (struct iphdr *)hdr;
1434 memcpy(&iphdr->saddr, &attrs->saddr.a4, 4);
1435 memcpy(&iphdr->daddr, &attrs->daddr.a4, 4);
1436 iphdr->version = 4;
1437 iphdr->ihl = 5;
1438 iphdr->ttl = IPSEC_TUNNEL_DEFAULT_TTL;
1439 iphdr->protocol = IPPROTO_ESP;
1440 hdr += sizeof(*iphdr);
1441 break;
1442 case AF_INET6:
1443 ipv6hdr = (struct ipv6hdr *)hdr;
1444 memcpy(&ipv6hdr->saddr, &attrs->saddr.a6, 16);
1445 memcpy(&ipv6hdr->daddr, &attrs->daddr.a6, 16);
1446 ipv6hdr->nexthdr = IPPROTO_ESP;
1447 ipv6hdr->version = 6;
1448 ipv6hdr->hop_limit = IPSEC_TUNNEL_DEFAULT_TTL;
1449 hdr += sizeof(*ipv6hdr);
1450 break;
1451 default:
1452 goto free_reformatbf;
1453 }
1454
1455 esp_hdr = (struct ip_esp_hdr *)hdr;
1456 esp_hdr->spi = htonl(attrs->spi);
1457 break;
1458 default:
1459 goto free_reformatbf;
1460 }
1461
1462 reformat_params->size = bfflen;
1463 reformat_params->data = reformatbf;
1464 return 0;
1465
1466 free_reformatbf:
1467 kfree(reformatbf);
1468 return -EINVAL;
1469 }
1470
get_reformat_type(struct mlx5_accel_esp_xfrm_attrs * attrs)1471 static int get_reformat_type(struct mlx5_accel_esp_xfrm_attrs *attrs)
1472 {
1473 switch (attrs->dir) {
1474 case XFRM_DEV_OFFLOAD_IN:
1475 if (attrs->encap)
1476 return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
1477 return MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
1478 case XFRM_DEV_OFFLOAD_OUT:
1479 if (attrs->family == AF_INET) {
1480 if (attrs->encap)
1481 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
1482 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
1483 }
1484
1485 if (attrs->encap)
1486 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
1487 return MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
1488 default:
1489 WARN_ON(true);
1490 }
1491
1492 return -EINVAL;
1493 }
1494
1495 static int
setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_pkt_reformat_params * reformat_params)1496 setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
1497 struct mlx5_pkt_reformat_params *reformat_params)
1498 {
1499 struct udphdr *udphdr;
1500 char *reformatbf;
1501 size_t bfflen;
1502 __be32 spi;
1503 void *hdr;
1504
1505 reformat_params->type = get_reformat_type(attrs);
1506 if (reformat_params->type < 0)
1507 return reformat_params->type;
1508
1509 switch (attrs->dir) {
1510 case XFRM_DEV_OFFLOAD_IN:
1511 break;
1512 case XFRM_DEV_OFFLOAD_OUT:
1513 bfflen = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_SIZE;
1514 if (attrs->encap)
1515 bfflen += sizeof(*udphdr);
1516
1517 reformatbf = kzalloc(bfflen, GFP_KERNEL);
1518 if (!reformatbf)
1519 return -ENOMEM;
1520
1521 hdr = reformatbf;
1522 if (attrs->encap) {
1523 udphdr = (struct udphdr *)reformatbf;
1524 udphdr->source = attrs->sport;
1525 udphdr->dest = attrs->dport;
1526 hdr += sizeof(*udphdr);
1527 }
1528
1529 /* convert to network format */
1530 spi = htonl(attrs->spi);
1531 memcpy(hdr, &spi, sizeof(spi));
1532
1533 reformat_params->param_0 = attrs->authsize;
1534 reformat_params->size = bfflen;
1535 reformat_params->data = reformatbf;
1536 break;
1537 default:
1538 return -EINVAL;
1539 }
1540
1541 return 0;
1542 }
1543
setup_pkt_reformat(struct mlx5e_ipsec * ipsec,struct mlx5_accel_esp_xfrm_attrs * attrs,struct mlx5_flow_act * flow_act)1544 static int setup_pkt_reformat(struct mlx5e_ipsec *ipsec,
1545 struct mlx5_accel_esp_xfrm_attrs *attrs,
1546 struct mlx5_flow_act *flow_act)
1547 {
1548 enum mlx5_flow_namespace_type ns_type = ipsec_fs_get_ns(ipsec, attrs->type,
1549 attrs->dir);
1550 struct mlx5_pkt_reformat_params reformat_params = {};
1551 struct mlx5_core_dev *mdev = ipsec->mdev;
1552 struct mlx5_pkt_reformat *pkt_reformat;
1553 int ret;
1554
1555 switch (attrs->mode) {
1556 case XFRM_MODE_TRANSPORT:
1557 ret = setup_pkt_transport_reformat(attrs, &reformat_params);
1558 break;
1559 case XFRM_MODE_TUNNEL:
1560 ret = setup_pkt_tunnel_reformat(mdev, attrs, &reformat_params);
1561 break;
1562 default:
1563 ret = -EINVAL;
1564 }
1565
1566 if (ret)
1567 return ret;
1568
1569 pkt_reformat =
1570 mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
1571 kfree(reformat_params.data);
1572 if (IS_ERR(pkt_reformat))
1573 return PTR_ERR(pkt_reformat);
1574
1575 flow_act->pkt_reformat = pkt_reformat;
1576 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
1577 return 0;
1578 }
1579
rx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1580 static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1581 {
1582 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1583 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1584 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1585 struct mlx5_flow_destination dest[2];
1586 struct mlx5_flow_act flow_act = {};
1587 struct mlx5_flow_handle *rule;
1588 struct mlx5_flow_spec *spec;
1589 struct mlx5e_ipsec_rx *rx;
1590 struct mlx5_fc *counter;
1591 int err = 0;
1592
1593 rx = rx_ft_get(mdev, ipsec, attrs->family, attrs->type);
1594 if (IS_ERR(rx))
1595 return PTR_ERR(rx);
1596
1597 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1598 if (!spec) {
1599 err = -ENOMEM;
1600 goto err_alloc;
1601 }
1602
1603 if (attrs->family == AF_INET)
1604 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1605 else
1606 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1607
1608 setup_fte_spi(spec, attrs->spi, attrs->encap);
1609 if (!attrs->encap)
1610 setup_fte_esp(spec);
1611 setup_fte_no_frags(spec);
1612 setup_fte_upper_proto_match(spec, &attrs->upspec);
1613
1614 if (!attrs->drop) {
1615 if (rx != ipsec->rx_esw)
1616 err = setup_modify_header(ipsec, attrs->type,
1617 sa_entry->ipsec_obj_id | BIT(31),
1618 XFRM_DEV_OFFLOAD_IN, &flow_act);
1619 else
1620 err = mlx5_esw_ipsec_rx_setup_modify_header(sa_entry, &flow_act);
1621
1622 if (err)
1623 goto err_mod_header;
1624 }
1625
1626 switch (attrs->type) {
1627 case XFRM_DEV_OFFLOAD_PACKET:
1628 err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1629 if (err)
1630 goto err_pkt_reformat;
1631 break;
1632 default:
1633 break;
1634 }
1635
1636 counter = mlx5_fc_create(mdev, true);
1637 if (IS_ERR(counter)) {
1638 err = PTR_ERR(counter);
1639 goto err_add_cnt;
1640 }
1641 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1642 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1643 flow_act.flags |= FLOW_ACT_NO_APPEND;
1644 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
1645 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1646 if (attrs->drop)
1647 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1648 else
1649 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1650 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1651 dest[0].ft = rx->ft.status;
1652 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1653 dest[1].counter_id = mlx5_fc_id(counter);
1654 rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
1655 if (IS_ERR(rule)) {
1656 err = PTR_ERR(rule);
1657 mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
1658 goto err_add_flow;
1659 }
1660 if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
1661 err = rx_add_rule_drop_replay(sa_entry, rx);
1662 if (err)
1663 goto err_add_replay;
1664
1665 err = rx_add_rule_drop_auth_trailer(sa_entry, rx);
1666 if (err)
1667 goto err_drop_reason;
1668
1669 kvfree(spec);
1670
1671 sa_entry->ipsec_rule.rule = rule;
1672 sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1673 sa_entry->ipsec_rule.fc = counter;
1674 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1675 return 0;
1676
1677 err_drop_reason:
1678 if (sa_entry->ipsec_rule.replay.rule) {
1679 mlx5_del_flow_rules(sa_entry->ipsec_rule.replay.rule);
1680 mlx5_fc_destroy(mdev, sa_entry->ipsec_rule.replay.fc);
1681 }
1682 err_add_replay:
1683 mlx5_del_flow_rules(rule);
1684 err_add_flow:
1685 mlx5_fc_destroy(mdev, counter);
1686 err_add_cnt:
1687 if (flow_act.pkt_reformat)
1688 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1689 err_pkt_reformat:
1690 if (flow_act.modify_hdr)
1691 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1692 err_mod_header:
1693 kvfree(spec);
1694 err_alloc:
1695 rx_ft_put(ipsec, attrs->family, attrs->type);
1696 return err;
1697 }
1698
tx_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)1699 static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
1700 {
1701 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
1702 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
1703 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
1704 struct mlx5_flow_destination dest[2];
1705 struct mlx5_flow_act flow_act = {};
1706 struct mlx5_flow_handle *rule;
1707 struct mlx5_flow_spec *spec;
1708 struct mlx5e_ipsec_tx *tx;
1709 struct mlx5_fc *counter;
1710 int err;
1711
1712 tx = tx_ft_get(mdev, ipsec, attrs->type);
1713 if (IS_ERR(tx))
1714 return PTR_ERR(tx);
1715
1716 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1717 if (!spec) {
1718 err = -ENOMEM;
1719 goto err_alloc;
1720 }
1721
1722 if (attrs->family == AF_INET)
1723 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1724 else
1725 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1726
1727 setup_fte_no_frags(spec);
1728 setup_fte_upper_proto_match(spec, &attrs->upspec);
1729
1730 switch (attrs->type) {
1731 case XFRM_DEV_OFFLOAD_CRYPTO:
1732 setup_fte_spi(spec, attrs->spi, false);
1733 setup_fte_esp(spec);
1734 setup_fte_reg_a(spec);
1735 break;
1736 case XFRM_DEV_OFFLOAD_PACKET:
1737 if (attrs->reqid)
1738 setup_fte_reg_c4(spec, attrs->reqid);
1739 err = setup_pkt_reformat(ipsec, attrs, &flow_act);
1740 if (err)
1741 goto err_pkt_reformat;
1742 break;
1743 default:
1744 break;
1745 }
1746
1747 counter = mlx5_fc_create(mdev, true);
1748 if (IS_ERR(counter)) {
1749 err = PTR_ERR(counter);
1750 goto err_add_cnt;
1751 }
1752
1753 flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
1754 flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
1755 flow_act.flags |= FLOW_ACT_NO_APPEND;
1756 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
1757 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1758 if (attrs->drop)
1759 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1760 else
1761 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1762
1763 dest[0].ft = tx->ft.status;
1764 dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1765 dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1766 dest[1].counter_id = mlx5_fc_id(counter);
1767 rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, dest, 2);
1768 if (IS_ERR(rule)) {
1769 err = PTR_ERR(rule);
1770 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1771 goto err_add_flow;
1772 }
1773
1774 kvfree(spec);
1775 sa_entry->ipsec_rule.rule = rule;
1776 sa_entry->ipsec_rule.fc = counter;
1777 sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
1778 return 0;
1779
1780 err_add_flow:
1781 mlx5_fc_destroy(mdev, counter);
1782 err_add_cnt:
1783 if (flow_act.pkt_reformat)
1784 mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
1785 err_pkt_reformat:
1786 kvfree(spec);
1787 err_alloc:
1788 tx_ft_put(ipsec, attrs->type);
1789 return err;
1790 }
1791
tx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1792 static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1793 {
1794 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1795 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1796 struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1797 struct mlx5_flow_destination dest[2] = {};
1798 struct mlx5_flow_act flow_act = {};
1799 struct mlx5_flow_handle *rule;
1800 struct mlx5_flow_spec *spec;
1801 struct mlx5_flow_table *ft;
1802 struct mlx5e_ipsec_tx *tx;
1803 int err, dstn = 0;
1804
1805 ft = tx_ft_get_policy(mdev, ipsec, attrs->prio, attrs->type);
1806 if (IS_ERR(ft))
1807 return PTR_ERR(ft);
1808
1809 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1810 if (!spec) {
1811 err = -ENOMEM;
1812 goto err_alloc;
1813 }
1814
1815 tx = ipsec_tx(ipsec, attrs->type);
1816 if (attrs->family == AF_INET)
1817 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1818 else
1819 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1820
1821 setup_fte_no_frags(spec);
1822 setup_fte_upper_proto_match(spec, &attrs->upspec);
1823
1824 switch (attrs->action) {
1825 case XFRM_POLICY_ALLOW:
1826 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1827 if (!attrs->reqid)
1828 break;
1829
1830 err = setup_modify_header(ipsec, attrs->type, attrs->reqid,
1831 XFRM_DEV_OFFLOAD_OUT, &flow_act);
1832 if (err)
1833 goto err_mod_header;
1834 break;
1835 case XFRM_POLICY_BLOCK:
1836 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1837 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1838 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1839 dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
1840 dstn++;
1841 break;
1842 default:
1843 WARN_ON(true);
1844 err = -EINVAL;
1845 goto err_mod_header;
1846 }
1847
1848 flow_act.flags |= FLOW_ACT_NO_APPEND;
1849 if (tx == ipsec->tx_esw && tx->chains)
1850 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1851 dest[dstn].ft = tx->ft.sa;
1852 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1853 dstn++;
1854 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1855 if (IS_ERR(rule)) {
1856 err = PTR_ERR(rule);
1857 mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
1858 goto err_action;
1859 }
1860
1861 kvfree(spec);
1862 pol_entry->ipsec_rule.rule = rule;
1863 pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
1864 return 0;
1865
1866 err_action:
1867 if (flow_act.modify_hdr)
1868 mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
1869 err_mod_header:
1870 kvfree(spec);
1871 err_alloc:
1872 tx_ft_put_policy(ipsec, attrs->prio, attrs->type);
1873 return err;
1874 }
1875
rx_add_policy(struct mlx5e_ipsec_pol_entry * pol_entry)1876 static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
1877 {
1878 struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
1879 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
1880 struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
1881 struct mlx5_flow_destination dest[2];
1882 struct mlx5_flow_act flow_act = {};
1883 struct mlx5_flow_handle *rule;
1884 struct mlx5_flow_spec *spec;
1885 struct mlx5_flow_table *ft;
1886 struct mlx5e_ipsec_rx *rx;
1887 int err, dstn = 0;
1888
1889 ft = rx_ft_get_policy(mdev, pol_entry->ipsec, attrs->family, attrs->prio,
1890 attrs->type);
1891 if (IS_ERR(ft))
1892 return PTR_ERR(ft);
1893
1894 rx = ipsec_rx(pol_entry->ipsec, attrs->family, attrs->type);
1895
1896 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1897 if (!spec) {
1898 err = -ENOMEM;
1899 goto err_alloc;
1900 }
1901
1902 if (attrs->family == AF_INET)
1903 setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
1904 else
1905 setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
1906
1907 setup_fte_no_frags(spec);
1908 setup_fte_upper_proto_match(spec, &attrs->upspec);
1909
1910 switch (attrs->action) {
1911 case XFRM_POLICY_ALLOW:
1912 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1913 break;
1914 case XFRM_POLICY_BLOCK:
1915 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
1916 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1917 dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
1918 dstn++;
1919 break;
1920 default:
1921 WARN_ON(true);
1922 err = -EINVAL;
1923 goto err_action;
1924 }
1925
1926 flow_act.flags |= FLOW_ACT_NO_APPEND;
1927 if (rx == ipsec->rx_esw && rx->chains)
1928 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1929 dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1930 dest[dstn].ft = rx->ft.sa;
1931 dstn++;
1932 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
1933 if (IS_ERR(rule)) {
1934 err = PTR_ERR(rule);
1935 mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
1936 goto err_action;
1937 }
1938
1939 kvfree(spec);
1940 pol_entry->ipsec_rule.rule = rule;
1941 return 0;
1942
1943 err_action:
1944 kvfree(spec);
1945 err_alloc:
1946 rx_ft_put_policy(pol_entry->ipsec, attrs->family, attrs->prio, attrs->type);
1947 return err;
1948 }
1949
ipsec_fs_destroy_single_counter(struct mlx5_core_dev * mdev,struct mlx5e_ipsec_fc * fc)1950 static void ipsec_fs_destroy_single_counter(struct mlx5_core_dev *mdev,
1951 struct mlx5e_ipsec_fc *fc)
1952 {
1953 mlx5_fc_destroy(mdev, fc->drop);
1954 mlx5_fc_destroy(mdev, fc->cnt);
1955 kfree(fc);
1956 }
1957
ipsec_fs_destroy_counters(struct mlx5e_ipsec * ipsec)1958 static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
1959 {
1960 struct mlx5_core_dev *mdev = ipsec->mdev;
1961
1962 ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
1963 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
1964 if (ipsec->is_uplink_rep) {
1965 ipsec_fs_destroy_single_counter(mdev, ipsec->tx_esw->fc);
1966 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
1967 }
1968 }
1969
ipsec_fs_init_single_counter(struct mlx5_core_dev * mdev)1970 static struct mlx5e_ipsec_fc *ipsec_fs_init_single_counter(struct mlx5_core_dev *mdev)
1971 {
1972 struct mlx5e_ipsec_fc *fc;
1973 struct mlx5_fc *counter;
1974 int err;
1975
1976 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
1977 if (!fc)
1978 return ERR_PTR(-ENOMEM);
1979
1980 counter = mlx5_fc_create(mdev, false);
1981 if (IS_ERR(counter)) {
1982 err = PTR_ERR(counter);
1983 goto err_cnt;
1984 }
1985 fc->cnt = counter;
1986
1987 counter = mlx5_fc_create(mdev, false);
1988 if (IS_ERR(counter)) {
1989 err = PTR_ERR(counter);
1990 goto err_drop;
1991 }
1992 fc->drop = counter;
1993
1994 return fc;
1995
1996 err_drop:
1997 mlx5_fc_destroy(mdev, fc->cnt);
1998 err_cnt:
1999 kfree(fc);
2000 return ERR_PTR(err);
2001 }
2002
ipsec_fs_init_counters(struct mlx5e_ipsec * ipsec)2003 static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
2004 {
2005 struct mlx5_core_dev *mdev = ipsec->mdev;
2006 struct mlx5e_ipsec_fc *fc;
2007 int err;
2008
2009 fc = ipsec_fs_init_single_counter(mdev);
2010 if (IS_ERR(fc)) {
2011 err = PTR_ERR(fc);
2012 goto err_rx_cnt;
2013 }
2014 ipsec->rx_ipv4->fc = fc;
2015
2016 fc = ipsec_fs_init_single_counter(mdev);
2017 if (IS_ERR(fc)) {
2018 err = PTR_ERR(fc);
2019 goto err_tx_cnt;
2020 }
2021 ipsec->tx->fc = fc;
2022
2023 if (ipsec->is_uplink_rep) {
2024 fc = ipsec_fs_init_single_counter(mdev);
2025 if (IS_ERR(fc)) {
2026 err = PTR_ERR(fc);
2027 goto err_rx_esw_cnt;
2028 }
2029 ipsec->rx_esw->fc = fc;
2030
2031 fc = ipsec_fs_init_single_counter(mdev);
2032 if (IS_ERR(fc)) {
2033 err = PTR_ERR(fc);
2034 goto err_tx_esw_cnt;
2035 }
2036 ipsec->tx_esw->fc = fc;
2037 }
2038
2039 /* Both IPv4 and IPv6 point to same flow counters struct. */
2040 ipsec->rx_ipv6->fc = ipsec->rx_ipv4->fc;
2041 return 0;
2042
2043 err_tx_esw_cnt:
2044 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_esw->fc);
2045 err_rx_esw_cnt:
2046 ipsec_fs_destroy_single_counter(mdev, ipsec->tx->fc);
2047 err_tx_cnt:
2048 ipsec_fs_destroy_single_counter(mdev, ipsec->rx_ipv4->fc);
2049 err_rx_cnt:
2050 return err;
2051 }
2052
mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv * priv,void * ipsec_stats)2053 void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
2054 {
2055 struct mlx5_core_dev *mdev = priv->mdev;
2056 struct mlx5e_ipsec *ipsec = priv->ipsec;
2057 struct mlx5e_ipsec_hw_stats *stats;
2058 struct mlx5e_ipsec_fc *fc;
2059 u64 packets, bytes;
2060
2061 stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
2062
2063 stats->ipsec_rx_pkts = 0;
2064 stats->ipsec_rx_bytes = 0;
2065 stats->ipsec_rx_drop_pkts = 0;
2066 stats->ipsec_rx_drop_bytes = 0;
2067 stats->ipsec_tx_pkts = 0;
2068 stats->ipsec_tx_bytes = 0;
2069 stats->ipsec_tx_drop_pkts = 0;
2070 stats->ipsec_tx_drop_bytes = 0;
2071
2072 fc = ipsec->rx_ipv4->fc;
2073 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
2074 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
2075 &stats->ipsec_rx_drop_bytes);
2076
2077 fc = ipsec->tx->fc;
2078 mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
2079 mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
2080 &stats->ipsec_tx_drop_bytes);
2081
2082 if (ipsec->is_uplink_rep) {
2083 fc = ipsec->rx_esw->fc;
2084 if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2085 stats->ipsec_rx_pkts += packets;
2086 stats->ipsec_rx_bytes += bytes;
2087 }
2088
2089 if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2090 stats->ipsec_rx_drop_pkts += packets;
2091 stats->ipsec_rx_drop_bytes += bytes;
2092 }
2093
2094 fc = ipsec->tx_esw->fc;
2095 if (!mlx5_fc_query(mdev, fc->cnt, &packets, &bytes)) {
2096 stats->ipsec_tx_pkts += packets;
2097 stats->ipsec_tx_bytes += bytes;
2098 }
2099
2100 if (!mlx5_fc_query(mdev, fc->drop, &packets, &bytes)) {
2101 stats->ipsec_tx_drop_pkts += packets;
2102 stats->ipsec_tx_drop_bytes += bytes;
2103 }
2104 }
2105 }
2106
2107 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2108 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2109 {
2110 struct mlx5_eswitch *esw = mdev->priv.eswitch;
2111 int err = 0;
2112
2113 if (esw) {
2114 err = mlx5_esw_lock(esw);
2115 if (err)
2116 return err;
2117 }
2118
2119 if (mdev->num_block_ipsec) {
2120 err = -EBUSY;
2121 goto unlock;
2122 }
2123
2124 mdev->num_block_tc++;
2125
2126 unlock:
2127 if (esw)
2128 mlx5_esw_unlock(esw);
2129
2130 return err;
2131 }
2132 #else
mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev * mdev)2133 static int mlx5e_ipsec_block_tc_offload(struct mlx5_core_dev *mdev)
2134 {
2135 if (mdev->num_block_ipsec)
2136 return -EBUSY;
2137
2138 mdev->num_block_tc++;
2139 return 0;
2140 }
2141 #endif
2142
mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev * mdev)2143 static void mlx5e_ipsec_unblock_tc_offload(struct mlx5_core_dev *mdev)
2144 {
2145 mdev->num_block_tc--;
2146 }
2147
mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2148 int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2149 {
2150 int err;
2151
2152 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET) {
2153 err = mlx5e_ipsec_block_tc_offload(sa_entry->ipsec->mdev);
2154 if (err)
2155 return err;
2156 }
2157
2158 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2159 err = tx_add_rule(sa_entry);
2160 else
2161 err = rx_add_rule(sa_entry);
2162
2163 if (err)
2164 goto err_out;
2165
2166 return 0;
2167
2168 err_out:
2169 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2170 mlx5e_ipsec_unblock_tc_offload(sa_entry->ipsec->mdev);
2171 return err;
2172 }
2173
mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry * sa_entry)2174 void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
2175 {
2176 struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
2177 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
2178
2179 mlx5_del_flow_rules(ipsec_rule->rule);
2180 mlx5_fc_destroy(mdev, ipsec_rule->fc);
2181 if (ipsec_rule->pkt_reformat)
2182 mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
2183
2184 if (sa_entry->attrs.type == XFRM_DEV_OFFLOAD_PACKET)
2185 mlx5e_ipsec_unblock_tc_offload(mdev);
2186
2187 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
2188 tx_ft_put(sa_entry->ipsec, sa_entry->attrs.type);
2189 return;
2190 }
2191
2192 if (ipsec_rule->modify_hdr)
2193 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2194
2195 mlx5_del_flow_rules(ipsec_rule->trailer.rule);
2196 mlx5_fc_destroy(mdev, ipsec_rule->trailer.fc);
2197
2198 mlx5_del_flow_rules(ipsec_rule->auth.rule);
2199 mlx5_fc_destroy(mdev, ipsec_rule->auth.fc);
2200
2201 if (ipsec_rule->replay.rule) {
2202 mlx5_del_flow_rules(ipsec_rule->replay.rule);
2203 mlx5_fc_destroy(mdev, ipsec_rule->replay.fc);
2204 }
2205 mlx5_esw_ipsec_rx_id_mapping_remove(sa_entry);
2206 rx_ft_put(sa_entry->ipsec, sa_entry->attrs.family, sa_entry->attrs.type);
2207 }
2208
mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2209 int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2210 {
2211 int err;
2212
2213 err = mlx5e_ipsec_block_tc_offload(pol_entry->ipsec->mdev);
2214 if (err)
2215 return err;
2216
2217 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2218 err = tx_add_policy(pol_entry);
2219 else
2220 err = rx_add_policy(pol_entry);
2221
2222 if (err)
2223 goto err_out;
2224
2225 return 0;
2226
2227 err_out:
2228 mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2229 return err;
2230 }
2231
mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry * pol_entry)2232 void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
2233 {
2234 struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
2235 struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
2236
2237 mlx5_del_flow_rules(ipsec_rule->rule);
2238
2239 mlx5e_ipsec_unblock_tc_offload(pol_entry->ipsec->mdev);
2240
2241 if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
2242 rx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.family,
2243 pol_entry->attrs.prio, pol_entry->attrs.type);
2244 return;
2245 }
2246
2247 if (ipsec_rule->modify_hdr)
2248 mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
2249
2250 tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio, pol_entry->attrs.type);
2251 }
2252
mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec * ipsec)2253 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
2254 {
2255 if (!ipsec->tx)
2256 return;
2257
2258 if (ipsec->roce)
2259 mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
2260
2261 ipsec_fs_destroy_counters(ipsec);
2262 mutex_destroy(&ipsec->tx->ft.mutex);
2263 WARN_ON(ipsec->tx->ft.refcnt);
2264 kfree(ipsec->tx);
2265
2266 mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
2267 WARN_ON(ipsec->rx_ipv4->ft.refcnt);
2268 kfree(ipsec->rx_ipv4);
2269
2270 mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
2271 WARN_ON(ipsec->rx_ipv6->ft.refcnt);
2272 kfree(ipsec->rx_ipv6);
2273
2274 if (ipsec->is_uplink_rep) {
2275 xa_destroy(&ipsec->ipsec_obj_id_map);
2276
2277 mutex_destroy(&ipsec->tx_esw->ft.mutex);
2278 WARN_ON(ipsec->tx_esw->ft.refcnt);
2279 kfree(ipsec->tx_esw);
2280
2281 mutex_destroy(&ipsec->rx_esw->ft.mutex);
2282 WARN_ON(ipsec->rx_esw->ft.refcnt);
2283 kfree(ipsec->rx_esw);
2284 }
2285 }
2286
mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec * ipsec,struct mlx5_devcom_comp_dev ** devcom)2287 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec,
2288 struct mlx5_devcom_comp_dev **devcom)
2289 {
2290 struct mlx5_core_dev *mdev = ipsec->mdev;
2291 struct mlx5_flow_namespace *ns, *ns_esw;
2292 int err = -ENOMEM;
2293
2294 ns = mlx5_get_flow_namespace(ipsec->mdev,
2295 MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
2296 if (!ns)
2297 return -EOPNOTSUPP;
2298
2299 if (ipsec->is_uplink_rep) {
2300 ns_esw = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_FDB);
2301 if (!ns_esw)
2302 return -EOPNOTSUPP;
2303
2304 ipsec->tx_esw = kzalloc(sizeof(*ipsec->tx_esw), GFP_KERNEL);
2305 if (!ipsec->tx_esw)
2306 return -ENOMEM;
2307
2308 ipsec->rx_esw = kzalloc(sizeof(*ipsec->rx_esw), GFP_KERNEL);
2309 if (!ipsec->rx_esw)
2310 goto err_rx_esw;
2311 }
2312
2313 ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
2314 if (!ipsec->tx)
2315 goto err_tx;
2316
2317 ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
2318 if (!ipsec->rx_ipv4)
2319 goto err_rx_ipv4;
2320
2321 ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
2322 if (!ipsec->rx_ipv6)
2323 goto err_rx_ipv6;
2324
2325 err = ipsec_fs_init_counters(ipsec);
2326 if (err)
2327 goto err_counters;
2328
2329 mutex_init(&ipsec->tx->ft.mutex);
2330 mutex_init(&ipsec->rx_ipv4->ft.mutex);
2331 mutex_init(&ipsec->rx_ipv6->ft.mutex);
2332 ipsec->tx->ns = ns;
2333
2334 if (ipsec->is_uplink_rep) {
2335 mutex_init(&ipsec->tx_esw->ft.mutex);
2336 mutex_init(&ipsec->rx_esw->ft.mutex);
2337 ipsec->tx_esw->ns = ns_esw;
2338 xa_init_flags(&ipsec->ipsec_obj_id_map, XA_FLAGS_ALLOC1);
2339 } else if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE) {
2340 ipsec->roce = mlx5_ipsec_fs_roce_init(mdev, devcom);
2341 } else {
2342 mlx5_core_warn(mdev, "IPsec was initialized without RoCE support\n");
2343 }
2344
2345 return 0;
2346
2347 err_counters:
2348 kfree(ipsec->rx_ipv6);
2349 err_rx_ipv6:
2350 kfree(ipsec->rx_ipv4);
2351 err_rx_ipv4:
2352 kfree(ipsec->tx);
2353 err_tx:
2354 kfree(ipsec->rx_esw);
2355 err_rx_esw:
2356 kfree(ipsec->tx_esw);
2357 return err;
2358 }
2359
mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry * sa_entry)2360 void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
2361 {
2362 struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
2363 int err;
2364
2365 memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
2366 memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
2367
2368 err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
2369 if (err)
2370 return;
2371
2372 mlx5e_accel_ipsec_fs_del_rule(sa_entry);
2373 memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
2374 }
2375
mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry * sa_entry)2376 bool mlx5e_ipsec_fs_tunnel_enabled(struct mlx5e_ipsec_sa_entry *sa_entry)
2377 {
2378 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
2379 struct mlx5e_ipsec_rx *rx;
2380 struct mlx5e_ipsec_tx *tx;
2381
2382 rx = ipsec_rx(sa_entry->ipsec, attrs->family, attrs->type);
2383 tx = ipsec_tx(sa_entry->ipsec, attrs->type);
2384 if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
2385 return tx->allow_tunnel_mode;
2386
2387 return rx->allow_tunnel_mode;
2388 }
2389
mlx5e_ipsec_handle_mpv_event(int event,struct mlx5e_priv * slave_priv,struct mlx5e_priv * master_priv)2390 void mlx5e_ipsec_handle_mpv_event(int event, struct mlx5e_priv *slave_priv,
2391 struct mlx5e_priv *master_priv)
2392 {
2393 struct mlx5e_ipsec_mpv_work *work;
2394
2395 reinit_completion(&master_priv->ipsec->comp);
2396
2397 if (!slave_priv->ipsec) {
2398 complete(&master_priv->ipsec->comp);
2399 return;
2400 }
2401
2402 work = &slave_priv->ipsec->mpv_work;
2403
2404 INIT_WORK(&work->work, ipsec_mpv_work_handler);
2405 work->event = event;
2406 work->slave_priv = slave_priv;
2407 work->master_priv = master_priv;
2408 queue_work(slave_priv->ipsec->wq, &work->work);
2409 }
2410
mlx5e_ipsec_send_event(struct mlx5e_priv * priv,int event)2411 void mlx5e_ipsec_send_event(struct mlx5e_priv *priv, int event)
2412 {
2413 if (!priv->ipsec)
2414 return; /* IPsec not supported */
2415
2416 mlx5_devcom_send_event(priv->devcom, event, event, priv);
2417 wait_for_completion(&priv->ipsec->comp);
2418 }
2419