1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include "fs_core.h"
5 #include "eswitch.h"
6 #include "en_accel/ipsec.h"
7 #include "esw/ipsec_fs.h"
8 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
9 #include "en/tc_priv.h"
10 #endif
11 
12 enum {
13 	MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
14 	MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
15 	MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
16 };
17 
18 enum {
19 	MLX5_ESW_IPSEC_TX_POL_FT_LEVEL,
20 	MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL,
21 	MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
22 };
23 
esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)24 static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
25 					     struct mlx5e_ipsec_rx *rx)
26 {
27 	mlx5_del_flow_rules(rx->status_drop.rule);
28 	mlx5_destroy_flow_group(rx->status_drop.group);
29 	mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
30 }
31 
esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)32 static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
33 					     struct mlx5e_ipsec_rx *rx)
34 {
35 	mlx5_del_flow_rules(rx->status.rule);
36 	mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
37 }
38 
esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)39 static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
40 					   struct mlx5e_ipsec_rx *rx)
41 {
42 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
43 	struct mlx5_flow_table *ft = rx->ft.status;
44 	struct mlx5_core_dev *mdev = ipsec->mdev;
45 	struct mlx5_flow_destination dest = {};
46 	struct mlx5_flow_act flow_act = {};
47 	struct mlx5_flow_handle *rule;
48 	struct mlx5_fc *flow_counter;
49 	struct mlx5_flow_spec *spec;
50 	struct mlx5_flow_group *g;
51 	u32 *flow_group_in;
52 	int err = 0;
53 
54 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
55 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
56 	if (!flow_group_in || !spec) {
57 		err = -ENOMEM;
58 		goto err_out;
59 	}
60 
61 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
62 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
63 	g = mlx5_create_flow_group(ft, flow_group_in);
64 	if (IS_ERR(g)) {
65 		err = PTR_ERR(g);
66 		mlx5_core_err(mdev,
67 			      "Failed to add ipsec rx status drop flow group, err=%d\n", err);
68 		goto err_out;
69 	}
70 
71 	flow_counter = mlx5_fc_create(mdev, false);
72 	if (IS_ERR(flow_counter)) {
73 		err = PTR_ERR(flow_counter);
74 		mlx5_core_err(mdev,
75 			      "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
76 		goto err_cnt;
77 	}
78 
79 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
80 	dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
81 	dest.counter_id = mlx5_fc_id(flow_counter);
82 	spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
83 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
84 	if (IS_ERR(rule)) {
85 		err = PTR_ERR(rule);
86 		mlx5_core_err(mdev,
87 			      "Failed to add ipsec rx status drop rule, err=%d\n", err);
88 		goto err_rule;
89 	}
90 
91 	rx->status_drop.group = g;
92 	rx->status_drop.rule = rule;
93 	rx->status_drop_cnt = flow_counter;
94 
95 	kvfree(flow_group_in);
96 	kvfree(spec);
97 	return 0;
98 
99 err_rule:
100 	mlx5_fc_destroy(mdev, flow_counter);
101 err_cnt:
102 	mlx5_destroy_flow_group(g);
103 err_out:
104 	kvfree(flow_group_in);
105 	kvfree(spec);
106 	return err;
107 }
108 
esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)109 static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
110 					   struct mlx5e_ipsec_rx *rx,
111 					   struct mlx5_flow_destination *dest)
112 {
113 	struct mlx5_flow_act flow_act = {};
114 	struct mlx5_flow_handle *rule;
115 	struct mlx5_flow_spec *spec;
116 	int err;
117 
118 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
119 	if (!spec)
120 		return -ENOMEM;
121 
122 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
123 			 misc_parameters_2.ipsec_syndrome);
124 	MLX5_SET(fte_match_param, spec->match_value,
125 		 misc_parameters_2.ipsec_syndrome, 0);
126 	spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
127 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
128 	flow_act.flags = FLOW_ACT_NO_APPEND;
129 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
130 			  MLX5_FLOW_CONTEXT_ACTION_COUNT;
131 	rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
132 	if (IS_ERR(rule)) {
133 		err = PTR_ERR(rule);
134 		mlx5_core_warn(ipsec->mdev,
135 			       "Failed to add ipsec rx status pass rule, err=%d\n", err);
136 		goto err_rule;
137 	}
138 
139 	rx->status.rule = rule;
140 	kvfree(spec);
141 	return 0;
142 
143 err_rule:
144 	kvfree(spec);
145 	return err;
146 }
147 
mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx)148 void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
149 				      struct mlx5e_ipsec_rx *rx)
150 {
151 	esw_ipsec_rx_status_pass_destroy(ipsec, rx);
152 	esw_ipsec_rx_status_drop_destroy(ipsec, rx);
153 }
154 
mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx * rx,struct mlx5_flow_destination * dest)155 int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
156 				    struct mlx5e_ipsec_rx *rx,
157 				    struct mlx5_flow_destination *dest)
158 {
159 	int err;
160 
161 	err = esw_ipsec_rx_status_drop_create(ipsec, rx);
162 	if (err)
163 		return err;
164 
165 	err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
166 	if (err)
167 		goto err_pass_create;
168 
169 	return 0;
170 
171 err_pass_create:
172 	esw_ipsec_rx_status_drop_destroy(ipsec, rx);
173 	return err;
174 }
175 
mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_rx_create_attr * attr)176 void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
177 				       struct mlx5e_ipsec_rx_create_attr *attr)
178 {
179 	attr->prio = FDB_CRYPTO_INGRESS;
180 	attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL;
181 	attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL;
182 	attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL;
183 	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
184 }
185 
mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec * ipsec,struct mlx5_flow_destination * dest)186 int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
187 					   struct mlx5_flow_destination *dest)
188 {
189 	dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
190 	dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
191 
192 	return 0;
193 }
194 
mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_flow_act * flow_act)195 int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
196 					  struct mlx5_flow_act *flow_act)
197 {
198 	u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
199 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
200 	struct mlx5_core_dev *mdev = ipsec->mdev;
201 	struct mlx5_modify_hdr *modify_hdr;
202 	u32 mapped_id;
203 	int err;
204 
205 	err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
206 			  xa_mk_value(sa_entry->ipsec_obj_id),
207 			  XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
208 	if (err)
209 		return err;
210 
211 	/* reuse tunnel bits for ipsec,
212 	 * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
213 	 */
214 	MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
215 	MLX5_SET(set_action_in, action, field,
216 		 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
217 	MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
218 	MLX5_SET(set_action_in, action, length,
219 		 ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
220 	MLX5_SET(set_action_in, action, data, mapped_id);
221 
222 	modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
223 					      1, action);
224 	if (IS_ERR(modify_hdr)) {
225 		err = PTR_ERR(modify_hdr);
226 		goto err_header_alloc;
227 	}
228 
229 	sa_entry->rx_mapped_id = mapped_id;
230 	flow_act->modify_hdr = modify_hdr;
231 	flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
232 
233 	return 0;
234 
235 err_header_alloc:
236 	xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
237 	return err;
238 }
239 
mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry * sa_entry)240 void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
241 {
242 	struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
243 
244 	if (sa_entry->rx_mapped_id)
245 		xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
246 			    sa_entry->rx_mapped_id);
247 }
248 
mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv * priv,u32 id,u32 * ipsec_obj_id)249 int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
250 					  u32 *ipsec_obj_id)
251 {
252 	struct mlx5e_ipsec *ipsec = priv->ipsec;
253 	void *val;
254 
255 	val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
256 	if (!val)
257 		return -ENOENT;
258 
259 	*ipsec_obj_id = xa_to_value(val);
260 
261 	return 0;
262 }
263 
mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec * ipsec,struct mlx5e_ipsec_tx_create_attr * attr)264 void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
265 				       struct mlx5e_ipsec_tx_create_attr *attr)
266 {
267 	attr->prio = FDB_CRYPTO_EGRESS;
268 	attr->pol_level = MLX5_ESW_IPSEC_TX_POL_FT_LEVEL;
269 	attr->sa_level = MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL;
270 	attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
271 	attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
272 }
273 
274 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch * esw,struct mlx5e_tc_flow * flow)275 static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw,
276 					    struct mlx5e_tc_flow *flow)
277 {
278 	struct mlx5_esw_flow_attr *esw_attr;
279 	struct mlx5_flow_attr *attr;
280 	int err;
281 
282 	attr = flow->attr;
283 	esw_attr = attr->esw_attr;
284 	if (esw_attr->out_count - esw_attr->split_count > 1)
285 		return 0;
286 
287 	err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr,
288 					      esw_attr->out_count - 1);
289 
290 	return err;
291 }
292 #endif
293 
mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev * mdev)294 void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
295 {
296 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
297 	struct mlx5_eswitch *esw = mdev->priv.eswitch;
298 	struct mlx5_eswitch_rep *rep;
299 	struct mlx5e_rep_priv *rpriv;
300 	struct rhashtable_iter iter;
301 	struct mlx5e_tc_flow *flow;
302 	unsigned long i;
303 	int err;
304 
305 	xa_for_each(&esw->offloads.vport_reps, i, rep) {
306 		rpriv = rep->rep_data[REP_ETH].priv;
307 		if (!rpriv || !rpriv->netdev)
308 			continue;
309 
310 		rhashtable_walk_enter(&rpriv->tc_ht, &iter);
311 		rhashtable_walk_start(&iter);
312 		while ((flow = rhashtable_walk_next(&iter)) != NULL) {
313 			if (IS_ERR(flow))
314 				continue;
315 
316 			err = mlx5_esw_ipsec_modify_flow_dests(esw, flow);
317 			if (err)
318 				mlx5_core_warn_once(mdev,
319 						    "Failed to modify flow dests for IPsec");
320 		}
321 		rhashtable_walk_stop(&iter);
322 		rhashtable_walk_exit(&iter);
323 	}
324 #endif
325 }
326