1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8 #include "lib/ipsec_fs_roce.h"
9 #include "fs_core.h"
10 #include "eswitch.h"
11
12 enum {
13 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
14 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
15 };
16
mlx5_ipsec_device_caps(struct mlx5_core_dev * mdev)17 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
18 {
19 u32 caps = 0;
20
21 if (!MLX5_CAP_GEN(mdev, ipsec_offload))
22 return 0;
23
24 if (!MLX5_CAP_GEN(mdev, log_max_dek))
25 return 0;
26
27 if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
28 MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
29 return 0;
30
31 if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
32 !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
33 return 0;
34
35 if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
36 !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
37 return 0;
38
39 if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
40 MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
41 caps |= MLX5_IPSEC_CAP_CRYPTO;
42
43 if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
44 (mdev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_DMFS ||
45 is_mdev_legacy_mode(mdev))) {
46 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
47 reformat_add_esp_trasport) &&
48 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
49 reformat_del_esp_trasport) &&
50 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
51 caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
52
53 if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
54 ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
55 MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
56 MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
57 caps |= MLX5_IPSEC_CAP_PRIO;
58
59 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
60 reformat_l2_to_l3_esp_tunnel) &&
61 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
62 reformat_l3_esp_tunnel_to_l2))
63 caps |= MLX5_IPSEC_CAP_TUNNEL;
64
65 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
66 reformat_add_esp_transport_over_udp) &&
67 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
68 reformat_del_esp_transport_over_udp))
69 caps |= MLX5_IPSEC_CAP_ESPINUDP;
70 }
71
72 if (mlx5_get_roce_state(mdev) && mlx5_ipsec_fs_is_mpv_roce_supported(mdev) &&
73 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
74 MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
75 caps |= MLX5_IPSEC_CAP_ROCE;
76
77 if (!caps)
78 return 0;
79
80 if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
81 caps |= MLX5_IPSEC_CAP_ESN;
82
83 /* We can accommodate up to 2^24 different IPsec objects
84 * because we use up to 24 bit in flow table metadata
85 * to hold the IPsec Object unique handle.
86 */
87 WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
88 return caps;
89 }
90 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
91
mlx5e_ipsec_packet_setup(void * obj,u32 pdn,struct mlx5e_ipsec_sa_entry * sa_entry)92 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
93 struct mlx5e_ipsec_sa_entry *sa_entry)
94 {
95 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
96 void *aso_ctx;
97
98 aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
99 if (attrs->replay_esn.trigger) {
100 MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
101
102 if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
103 MLX5_SET(ipsec_aso, aso_ctx, window_sz,
104 attrs->replay_esn.replay_window);
105 MLX5_SET(ipsec_aso, aso_ctx, mode,
106 MLX5_IPSEC_ASO_REPLAY_PROTECTION);
107 }
108 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
109 attrs->replay_esn.esn);
110 }
111
112 /* ASO context */
113 MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
114 MLX5_SET(ipsec_obj, obj, full_offload, 1);
115 MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
116 /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
117 * in flow steering to perform matching against. Please be
118 * aware that this register was chosen arbitrary and can't
119 * be used in other places as long as IPsec packet offload
120 * active.
121 */
122 MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
123 if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) {
124 MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
125 if (!attrs->replay_esn.trigger)
126 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
127 sa_entry->esn_state.esn);
128 }
129
130 if (attrs->lft.hard_packet_limit != XFRM_INF) {
131 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
132 attrs->lft.hard_packet_limit);
133 MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
134 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
135 }
136
137 if (attrs->lft.soft_packet_limit != XFRM_INF) {
138 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
139 attrs->lft.soft_packet_limit);
140
141 MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
142 }
143 }
144
mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)145 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
146 {
147 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
148 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
149 struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
150 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
151 u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
152 void *obj, *salt_p, *salt_iv_p;
153 struct mlx5e_hw_objs *res;
154 int err;
155
156 obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
157
158 /* salt and seq_iv */
159 salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
160 memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
161
162 MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
163 salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
164 memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
165 /* esn */
166 if (attrs->replay_esn.trigger) {
167 MLX5_SET(ipsec_obj, obj, esn_en, 1);
168 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
169 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
170 }
171
172 MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
173
174 /* general object fields set */
175 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
176 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
177 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
178 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
179
180 res = &mdev->mlx5e_res.hw_objs;
181 if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
182 mlx5e_ipsec_packet_setup(obj, res->pdn, sa_entry);
183
184 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
185 if (!err)
186 sa_entry->ipsec_obj_id =
187 MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
188
189 return err;
190 }
191
mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry)192 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
193 {
194 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
195 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
196 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
197
198 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
199 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
200 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
201 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
202 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
203
204 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
205 }
206
mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)207 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
208 {
209 struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
210 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
211 int err;
212
213 /* key */
214 err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
215 aes_gcm->key_len / BITS_PER_BYTE,
216 MLX5_ACCEL_OBJ_IPSEC_KEY,
217 &sa_entry->enc_key_id);
218 if (err) {
219 mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
220 return err;
221 }
222
223 err = mlx5_create_ipsec_obj(sa_entry);
224 if (err) {
225 mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
226 goto err_enc_key;
227 }
228
229 return 0;
230
231 err_enc_key:
232 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
233 return err;
234 }
235
mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry * sa_entry)236 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
237 {
238 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
239
240 mlx5_destroy_ipsec_obj(sa_entry);
241 mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
242 }
243
mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)244 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
245 const struct mlx5_accel_esp_xfrm_attrs *attrs)
246 {
247 struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
248 u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
249 u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
250 u64 modify_field_select = 0;
251 u64 general_obj_types;
252 void *obj;
253 int err;
254
255 general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
256 if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
257 return -EINVAL;
258
259 /* general object fields set */
260 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
261 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
262 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
263 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
264 if (err) {
265 mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
266 sa_entry->ipsec_obj_id, err);
267 return err;
268 }
269
270 obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
271 modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
272
273 /* esn */
274 if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
275 !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
276 return -EOPNOTSUPP;
277
278 obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
279 MLX5_SET64(ipsec_obj, obj, modify_field_select,
280 MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
281 MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
282 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
283 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
284
285 /* general object fields set */
286 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
287
288 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
289 }
290
mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry * sa_entry,const struct mlx5_accel_esp_xfrm_attrs * attrs)291 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
292 const struct mlx5_accel_esp_xfrm_attrs *attrs)
293 {
294 int err;
295
296 err = mlx5_modify_ipsec_obj(sa_entry, attrs);
297 if (err)
298 return;
299
300 memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
301 }
302
mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)303 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
304 struct mlx5_wqe_aso_ctrl_seg *data)
305 {
306 data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
307 data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
308 MLX5_ASO_ALWAYS_TRUE << 4;
309
310 mlx5e_ipsec_aso_query(sa_entry, data);
311 }
312
mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry * sa_entry,u32 mode_param)313 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
314 u32 mode_param)
315 {
316 struct mlx5_accel_esp_xfrm_attrs attrs = {};
317 struct mlx5_wqe_aso_ctrl_seg data = {};
318
319 if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
320 sa_entry->esn_state.esn_msb++;
321 sa_entry->esn_state.overlap = 0;
322 } else {
323 sa_entry->esn_state.overlap = 1;
324 }
325
326 mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
327
328 /* It is safe to execute the modify below unlocked since the only flows
329 * that could affect this HW object, are create, destroy and this work.
330 *
331 * Creation flow can't co-exist with this modify work, the destruction
332 * flow would cancel this work, and this work is a single entity that
333 * can't conflict with it self.
334 */
335 spin_unlock_bh(&sa_entry->x->lock);
336 mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
337 spin_lock_bh(&sa_entry->x->lock);
338
339 data.data_offset_condition_operand =
340 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
341 data.bitwise_data = cpu_to_be64(BIT_ULL(54));
342 data.data_mask = data.bitwise_data;
343
344 mlx5e_ipsec_aso_update(sa_entry, &data);
345 }
346
mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry * sa_entry)347 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
348 {
349 struct mlx5_wqe_aso_ctrl_seg data = {};
350
351 data.data_offset_condition_operand =
352 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
353 data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
354 data.data_mask = data.bitwise_data;
355 mlx5e_ipsec_aso_update(sa_entry, &data);
356 }
357
mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry * sa_entry,u32 val)358 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
359 u32 val)
360 {
361 struct mlx5_wqe_aso_ctrl_seg data = {};
362
363 data.data_offset_condition_operand =
364 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
365 data.bitwise_data = cpu_to_be64(val);
366 data.data_mask = cpu_to_be64(U32_MAX);
367 mlx5e_ipsec_aso_update(sa_entry, &data);
368 }
369
mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry * sa_entry)370 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
371 {
372 struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
373 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
374 struct mlx5e_ipsec_aso *aso = ipsec->aso;
375 bool soft_arm, hard_arm;
376 u64 hard_cnt;
377
378 lockdep_assert_held(&sa_entry->x->lock);
379
380 soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
381 hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
382 if (!soft_arm && !hard_arm)
383 /* It is not lifetime event */
384 return;
385
386 hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
387 if (!hard_cnt || hard_arm) {
388 /* It is possible to see packet counter equal to zero without
389 * hard limit event armed. Such situation can be if packet
390 * decreased, while we handled soft limit event.
391 *
392 * However it will be HW/FW bug if hard limit event is raised
393 * and packet counter is not zero.
394 */
395 WARN_ON_ONCE(hard_arm && hard_cnt);
396
397 /* Notify about hard limit */
398 xfrm_state_check_expire(sa_entry->x);
399 return;
400 }
401
402 /* We are in soft limit event. */
403 if (!sa_entry->limits.soft_limit_hit &&
404 sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
405 sa_entry->limits.soft_limit_hit = true;
406 /* Notify about soft limit */
407 xfrm_state_check_expire(sa_entry->x);
408
409 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
410 goto hard;
411
412 if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
413 /* We cannot avoid a soft_value that might have the high
414 * bit set. For instance soft_value=2^31+1 cannot be
415 * adjusted to the low bit clear version of soft_value=1
416 * because it is too close to 0.
417 *
418 * Thus we have this corner case where we can hit the
419 * soft_limit with the high bit set, but cannot adjust
420 * the counter. Thus we set a temporary interrupt_value
421 * at least 2^30 away from here and do the adjustment
422 * then.
423 */
424 mlx5e_ipsec_aso_update_soft(sa_entry,
425 BIT_ULL(31) - BIT_ULL(30));
426 sa_entry->limits.fix_limit = true;
427 return;
428 }
429
430 sa_entry->limits.fix_limit = true;
431 }
432
433 hard:
434 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
435 mlx5e_ipsec_aso_update_soft(sa_entry, 0);
436 attrs->lft.soft_packet_limit = XFRM_INF;
437 return;
438 }
439
440 mlx5e_ipsec_aso_update_hard(sa_entry);
441 sa_entry->limits.round++;
442 if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
443 mlx5e_ipsec_aso_update_soft(sa_entry,
444 attrs->lft.soft_packet_limit);
445 if (sa_entry->limits.fix_limit) {
446 sa_entry->limits.fix_limit = false;
447 mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
448 }
449 }
450
mlx5e_ipsec_handle_event(struct work_struct * _work)451 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
452 {
453 struct mlx5e_ipsec_work *work =
454 container_of(_work, struct mlx5e_ipsec_work, work);
455 struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
456 struct mlx5_accel_esp_xfrm_attrs *attrs;
457 struct mlx5e_ipsec_aso *aso;
458 int ret;
459
460 aso = sa_entry->ipsec->aso;
461 attrs = &sa_entry->attrs;
462
463 spin_lock_bh(&sa_entry->x->lock);
464 ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
465 if (ret)
466 goto unlock;
467
468 if (attrs->replay_esn.trigger &&
469 !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
470 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
471
472 mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
473 }
474
475 if (attrs->lft.soft_packet_limit != XFRM_INF)
476 mlx5e_ipsec_handle_limits(sa_entry);
477
478 unlock:
479 spin_unlock_bh(&sa_entry->x->lock);
480 kfree(work);
481 }
482
mlx5e_ipsec_event(struct notifier_block * nb,unsigned long event,void * data)483 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
484 void *data)
485 {
486 struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
487 struct mlx5e_ipsec_sa_entry *sa_entry;
488 struct mlx5_eqe_obj_change *object;
489 struct mlx5e_ipsec_work *work;
490 struct mlx5_eqe *eqe = data;
491 u16 type;
492
493 if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
494 return NOTIFY_DONE;
495
496 object = &eqe->data.obj_change;
497 type = be16_to_cpu(object->obj_type);
498
499 if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
500 return NOTIFY_DONE;
501
502 sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
503 if (!sa_entry)
504 return NOTIFY_DONE;
505
506 work = kmalloc(sizeof(*work), GFP_ATOMIC);
507 if (!work)
508 return NOTIFY_DONE;
509
510 INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
511 work->data = sa_entry;
512
513 queue_work(ipsec->wq, &work->work);
514 return NOTIFY_OK;
515 }
516
mlx5e_ipsec_aso_init(struct mlx5e_ipsec * ipsec)517 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
518 {
519 struct mlx5_core_dev *mdev = ipsec->mdev;
520 struct mlx5e_ipsec_aso *aso;
521 struct mlx5e_hw_objs *res;
522 struct device *pdev;
523 int err;
524
525 aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
526 if (!aso)
527 return -ENOMEM;
528
529 res = &mdev->mlx5e_res.hw_objs;
530
531 pdev = mlx5_core_dma_dev(mdev);
532 aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
533 DMA_BIDIRECTIONAL);
534 err = dma_mapping_error(pdev, aso->dma_addr);
535 if (err)
536 goto err_dma;
537
538 aso->aso = mlx5_aso_create(mdev, res->pdn);
539 if (IS_ERR(aso->aso)) {
540 err = PTR_ERR(aso->aso);
541 goto err_aso_create;
542 }
543
544 spin_lock_init(&aso->lock);
545 ipsec->nb.notifier_call = mlx5e_ipsec_event;
546 mlx5_notifier_register(mdev, &ipsec->nb);
547
548 ipsec->aso = aso;
549 return 0;
550
551 err_aso_create:
552 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
553 DMA_BIDIRECTIONAL);
554 err_dma:
555 kfree(aso);
556 return err;
557 }
558
mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec * ipsec)559 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
560 {
561 struct mlx5_core_dev *mdev = ipsec->mdev;
562 struct mlx5e_ipsec_aso *aso;
563 struct device *pdev;
564
565 aso = ipsec->aso;
566 pdev = mlx5_core_dma_dev(mdev);
567
568 mlx5_notifier_unregister(mdev, &ipsec->nb);
569 mlx5_aso_destroy(aso->aso);
570 dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
571 DMA_BIDIRECTIONAL);
572 kfree(aso);
573 ipsec->aso = NULL;
574 }
575
mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg * ctrl,struct mlx5_wqe_aso_ctrl_seg * data)576 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
577 struct mlx5_wqe_aso_ctrl_seg *data)
578 {
579 if (!data)
580 return;
581
582 ctrl->data_mask_mode = data->data_mask_mode;
583 ctrl->condition_1_0_operand = data->condition_1_0_operand;
584 ctrl->condition_1_0_offset = data->condition_1_0_offset;
585 ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
586 ctrl->condition_0_data = data->condition_0_data;
587 ctrl->condition_0_mask = data->condition_0_mask;
588 ctrl->condition_1_data = data->condition_1_data;
589 ctrl->condition_1_mask = data->condition_1_mask;
590 ctrl->bitwise_data = data->bitwise_data;
591 ctrl->data_mask = data->data_mask;
592 }
593
mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry * sa_entry,struct mlx5_wqe_aso_ctrl_seg * data)594 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
595 struct mlx5_wqe_aso_ctrl_seg *data)
596 {
597 struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
598 struct mlx5e_ipsec_aso *aso = ipsec->aso;
599 struct mlx5_core_dev *mdev = ipsec->mdev;
600 struct mlx5_wqe_aso_ctrl_seg *ctrl;
601 struct mlx5e_hw_objs *res;
602 struct mlx5_aso_wqe *wqe;
603 unsigned long expires;
604 u8 ds_cnt;
605 int ret;
606
607 lockdep_assert_held(&sa_entry->x->lock);
608 res = &mdev->mlx5e_res.hw_objs;
609
610 spin_lock_bh(&aso->lock);
611 memset(aso->ctx, 0, sizeof(aso->ctx));
612 wqe = mlx5_aso_get_wqe(aso->aso);
613 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
614 mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
615 MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
616
617 ctrl = &wqe->aso_ctrl;
618 ctrl->va_l =
619 cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
620 ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
621 ctrl->l_key = cpu_to_be32(res->mkey);
622 mlx5e_ipsec_aso_copy(ctrl, data);
623
624 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
625 expires = jiffies + msecs_to_jiffies(10);
626 do {
627 ret = mlx5_aso_poll_cq(aso->aso, false);
628 if (ret)
629 /* We are in atomic context */
630 udelay(10);
631 } while (ret && time_is_after_jiffies(expires));
632 spin_unlock_bh(&aso->lock);
633 return ret;
634 }
635