1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/debugfs.h>
34 #include <linux/list.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/tcp.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/mpfs.h>
40 #include "en_tc.h"
41 #include "lib/mpfs.h"
42 #include "en/ptp.h"
43 #include "en/fs_ethtool.h"
44
45 struct mlx5e_flow_steering {
46 struct work_struct set_rx_mode_work;
47 bool state_destroy;
48 bool vlan_strip_disable;
49 struct mlx5_core_dev *mdev;
50 struct net_device *netdev;
51 struct mlx5_flow_namespace *ns;
52 struct mlx5_flow_namespace *egress_ns;
53 #ifdef CONFIG_MLX5_EN_RXNFC
54 struct mlx5e_ethtool_steering *ethtool;
55 #endif
56 struct mlx5e_tc_table *tc;
57 struct mlx5e_promisc_table promisc;
58 struct mlx5e_vlan_table *vlan;
59 struct mlx5e_l2_table l2;
60 struct mlx5_ttc_table *ttc;
61 struct mlx5_ttc_table *inner_ttc;
62 #ifdef CONFIG_MLX5_EN_ARFS
63 struct mlx5e_arfs_tables *arfs;
64 #endif
65 #ifdef CONFIG_MLX5_EN_TLS
66 struct mlx5e_accel_fs_tcp *accel_tcp;
67 #endif
68 struct mlx5e_fs_udp *udp;
69 struct mlx5e_fs_any *any;
70 struct mlx5e_ptp_fs *ptp_fs;
71 struct dentry *dfs_root;
72 };
73
74 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
75 struct mlx5e_l2_rule *ai, int type);
76 static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
77 struct mlx5e_l2_rule *ai);
78
79 enum {
80 MLX5E_FULLMATCH = 0,
81 MLX5E_ALLMULTI = 1,
82 };
83
84 enum {
85 MLX5E_UC = 0,
86 MLX5E_MC_IPV4 = 1,
87 MLX5E_MC_IPV6 = 2,
88 MLX5E_MC_OTHER = 3,
89 };
90
91 enum {
92 MLX5E_ACTION_NONE = 0,
93 MLX5E_ACTION_ADD = 1,
94 MLX5E_ACTION_DEL = 2,
95 };
96
97 struct mlx5e_l2_hash_node {
98 struct hlist_node hlist;
99 u8 action;
100 struct mlx5e_l2_rule ai;
101 bool mpfs;
102 };
103
mlx5e_hash_l2(const u8 * addr)104 static inline int mlx5e_hash_l2(const u8 *addr)
105 {
106 return addr[5];
107 }
108
mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering * fs)109 struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs)
110 {
111 return fs->dfs_root;
112 }
113
mlx5e_add_l2_to_hash(struct hlist_head * hash,const u8 * addr)114 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
115 {
116 struct mlx5e_l2_hash_node *hn;
117 int ix = mlx5e_hash_l2(addr);
118 int found = 0;
119
120 hlist_for_each_entry(hn, &hash[ix], hlist)
121 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
122 found = 1;
123 break;
124 }
125
126 if (found) {
127 hn->action = MLX5E_ACTION_NONE;
128 return;
129 }
130
131 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
132 if (!hn)
133 return;
134
135 ether_addr_copy(hn->ai.addr, addr);
136 hn->action = MLX5E_ACTION_ADD;
137
138 hlist_add_head(&hn->hlist, &hash[ix]);
139 }
140
mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node * hn)141 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
142 {
143 hlist_del(&hn->hlist);
144 kfree(hn);
145 }
146
147 struct mlx5e_vlan_table {
148 struct mlx5e_flow_table ft;
149 DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
150 DECLARE_BITMAP(active_svlans, VLAN_N_VID);
151 struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
152 struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
153 struct mlx5_flow_handle *untagged_rule;
154 struct mlx5_flow_handle *any_cvlan_rule;
155 struct mlx5_flow_handle *any_svlan_rule;
156 struct mlx5_flow_handle *trap_rule;
157 bool cvlan_filter_disabled;
158 };
159
mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table * vlan)160 unsigned long *mlx5e_vlan_get_active_svlans(struct mlx5e_vlan_table *vlan)
161 {
162 return vlan->active_svlans;
163 }
164
mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table * vlan)165 struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan)
166 {
167 return vlan->ft.t;
168 }
169
mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering * fs)170 static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs)
171 {
172 int max_list_size;
173 int list_size;
174 u16 *vlans;
175 int vlan;
176 int err;
177 int i;
178
179 list_size = 0;
180 for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID)
181 list_size++;
182
183 max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list);
184
185 if (list_size > max_list_size) {
186 fs_warn(fs, "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
187 list_size, max_list_size);
188 list_size = max_list_size;
189 }
190
191 vlans = kvcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
192 if (!vlans)
193 return -ENOMEM;
194
195 i = 0;
196 for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) {
197 if (i >= list_size)
198 break;
199 vlans[i++] = vlan;
200 }
201
202 err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size);
203 if (err)
204 fs_err(fs, "Failed to modify vport vlans list err(%d)\n",
205 err);
206
207 kvfree(vlans);
208 return err;
209 }
210
211 enum mlx5e_vlan_rule_type {
212 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
213 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
214 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
215 MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
216 MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
217 };
218
__mlx5e_add_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid,struct mlx5_flow_spec * spec)219 static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
220 enum mlx5e_vlan_rule_type rule_type,
221 u16 vid, struct mlx5_flow_spec *spec)
222 {
223 struct mlx5_flow_table *ft = fs->vlan->ft.t;
224 struct mlx5_flow_destination dest = {};
225 struct mlx5_flow_handle **rule_p;
226 MLX5_DECLARE_FLOW_ACT(flow_act);
227 int err = 0;
228
229 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
230 dest.ft = fs->l2.ft.t;
231
232 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
233
234 switch (rule_type) {
235 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
236 /* cvlan_tag enabled in match criteria and
237 * disabled in match value means both S & C tags
238 * don't exist (untagged of both)
239 */
240 rule_p = &fs->vlan->untagged_rule;
241 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
242 outer_headers.cvlan_tag);
243 break;
244 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
245 rule_p = &fs->vlan->any_cvlan_rule;
246 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
247 outer_headers.cvlan_tag);
248 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
249 break;
250 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
251 rule_p = &fs->vlan->any_svlan_rule;
252 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
253 outer_headers.svlan_tag);
254 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
255 break;
256 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
257 rule_p = &fs->vlan->active_svlans_rule[vid];
258 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
259 outer_headers.svlan_tag);
260 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
261 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
262 outer_headers.first_vid);
263 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
264 vid);
265 break;
266 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
267 rule_p = &fs->vlan->active_cvlans_rule[vid];
268 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
269 outer_headers.cvlan_tag);
270 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
271 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
272 outer_headers.first_vid);
273 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
274 vid);
275 break;
276 }
277
278 if (WARN_ONCE(*rule_p, "VLAN rule already exists type %d", rule_type))
279 return 0;
280
281 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
282
283 if (IS_ERR(*rule_p)) {
284 err = PTR_ERR(*rule_p);
285 *rule_p = NULL;
286 fs_err(fs, "add rule failed\n");
287 }
288
289 return err;
290 }
291
mlx5e_add_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid)292 static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
293 enum mlx5e_vlan_rule_type rule_type, u16 vid)
294 {
295 struct mlx5_flow_spec *spec;
296 int err = 0;
297
298 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
299 if (!spec)
300 return -ENOMEM;
301
302 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
303 mlx5e_vport_context_update_vlans(fs);
304
305 err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec);
306
307 kvfree(spec);
308
309 return err;
310 }
311
mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering * fs,enum mlx5e_vlan_rule_type rule_type,u16 vid)312 static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs,
313 enum mlx5e_vlan_rule_type rule_type, u16 vid)
314 {
315 switch (rule_type) {
316 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
317 if (fs->vlan->untagged_rule) {
318 mlx5_del_flow_rules(fs->vlan->untagged_rule);
319 fs->vlan->untagged_rule = NULL;
320 }
321 break;
322 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
323 if (fs->vlan->any_cvlan_rule) {
324 mlx5_del_flow_rules(fs->vlan->any_cvlan_rule);
325 fs->vlan->any_cvlan_rule = NULL;
326 }
327 break;
328 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
329 if (fs->vlan->any_svlan_rule) {
330 mlx5_del_flow_rules(fs->vlan->any_svlan_rule);
331 fs->vlan->any_svlan_rule = NULL;
332 }
333 break;
334 case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
335 if (fs->vlan->active_svlans_rule[vid]) {
336 mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]);
337 fs->vlan->active_svlans_rule[vid] = NULL;
338 }
339 break;
340 case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
341 if (fs->vlan->active_cvlans_rule[vid]) {
342 mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]);
343 fs->vlan->active_cvlans_rule[vid] = NULL;
344 }
345 mlx5e_vport_context_update_vlans(fs);
346 break;
347 }
348 }
349
mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering * fs)350 static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs)
351 {
352 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
353 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
354 }
355
mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering * fs)356 static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs)
357 {
358 int err;
359
360 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
361 if (err)
362 return err;
363
364 return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
365 }
366
367 static struct mlx5_flow_handle *
mlx5e_add_trap_rule(struct mlx5_flow_table * ft,int trap_id,int tir_num)368 mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
369 {
370 struct mlx5_flow_destination dest = {};
371 MLX5_DECLARE_FLOW_ACT(flow_act);
372 struct mlx5_flow_handle *rule;
373 struct mlx5_flow_spec *spec;
374
375 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
376 if (!spec)
377 return ERR_PTR(-ENOMEM);
378 spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
379 spec->flow_context.flow_tag = trap_id;
380 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
381 dest.tir_num = tir_num;
382
383 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
384 kvfree(spec);
385 return rule;
386 }
387
mlx5e_add_vlan_trap(struct mlx5e_flow_steering * fs,int trap_id,int tir_num)388 int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
389 {
390 struct mlx5_flow_table *ft = fs->vlan->ft.t;
391 struct mlx5_flow_handle *rule;
392 int err;
393
394 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
395 if (IS_ERR(rule)) {
396 err = PTR_ERR(rule);
397 fs->vlan->trap_rule = NULL;
398 fs_err(fs, "add VLAN trap rule failed, err %d\n", err);
399 return err;
400 }
401 fs->vlan->trap_rule = rule;
402 return 0;
403 }
404
mlx5e_remove_vlan_trap(struct mlx5e_flow_steering * fs)405 void mlx5e_remove_vlan_trap(struct mlx5e_flow_steering *fs)
406 {
407 if (fs->vlan->trap_rule) {
408 mlx5_del_flow_rules(fs->vlan->trap_rule);
409 fs->vlan->trap_rule = NULL;
410 }
411 }
412
mlx5e_add_mac_trap(struct mlx5e_flow_steering * fs,int trap_id,int tir_num)413 int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
414 {
415 struct mlx5_flow_table *ft = fs->l2.ft.t;
416 struct mlx5_flow_handle *rule;
417 int err;
418
419 rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
420 if (IS_ERR(rule)) {
421 err = PTR_ERR(rule);
422 fs->l2.trap_rule = NULL;
423 fs_err(fs, "add MAC trap rule failed, err %d\n", err);
424 return err;
425 }
426 fs->l2.trap_rule = rule;
427 return 0;
428 }
429
mlx5e_remove_mac_trap(struct mlx5e_flow_steering * fs)430 void mlx5e_remove_mac_trap(struct mlx5e_flow_steering *fs)
431 {
432 if (fs->l2.trap_rule) {
433 mlx5_del_flow_rules(fs->l2.trap_rule);
434 fs->l2.trap_rule = NULL;
435 }
436 }
437
mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering * fs,bool promisc)438 void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
439 {
440 if (!fs->vlan->cvlan_filter_disabled)
441 return;
442
443 fs->vlan->cvlan_filter_disabled = false;
444 if (promisc)
445 return;
446 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
447 }
448
mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering * fs,bool promisc)449 void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
450 {
451 if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
452 return;
453
454 fs->vlan->cvlan_filter_disabled = true;
455 if (promisc)
456 return;
457 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
458 }
459
mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering * fs,u16 vid)460 static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid)
461 {
462 int err;
463
464 set_bit(vid, fs->vlan->active_cvlans);
465
466 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
467 if (err)
468 clear_bit(vid, fs->vlan->active_cvlans);
469
470 return err;
471 }
472
mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering * fs,struct net_device * netdev,u16 vid)473 static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs,
474 struct net_device *netdev, u16 vid)
475 {
476 int err;
477
478 set_bit(vid, fs->vlan->active_svlans);
479
480 err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
481 if (err) {
482 clear_bit(vid, fs->vlan->active_svlans);
483 return err;
484 }
485
486 /* Need to fix some features.. */
487 netdev_lock(netdev);
488 netdev_update_features(netdev);
489 netdev_unlock(netdev);
490 return err;
491 }
492
mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering * fs,struct net_device * netdev,__be16 proto,u16 vid)493 int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
494 struct net_device *netdev,
495 __be16 proto, u16 vid)
496 {
497
498 if (!fs->vlan) {
499 fs_err(fs, "Vlan doesn't exist\n");
500 return -EINVAL;
501 }
502
503 if (be16_to_cpu(proto) == ETH_P_8021Q)
504 return mlx5e_vlan_rx_add_cvid(fs, vid);
505 else if (be16_to_cpu(proto) == ETH_P_8021AD)
506 return mlx5e_vlan_rx_add_svid(fs, netdev, vid);
507
508 return -EOPNOTSUPP;
509 }
510
mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering * fs,struct net_device * netdev,__be16 proto,u16 vid)511 int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
512 struct net_device *netdev,
513 __be16 proto, u16 vid)
514 {
515 if (!fs->vlan) {
516 fs_err(fs, "Vlan doesn't exist\n");
517 return -EINVAL;
518 }
519
520 if (be16_to_cpu(proto) == ETH_P_8021Q) {
521 clear_bit(vid, fs->vlan->active_cvlans);
522 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
523 } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
524 clear_bit(vid, fs->vlan->active_svlans);
525 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
526 netdev_lock(netdev);
527 netdev_update_features(netdev);
528 netdev_unlock(netdev);
529 }
530
531 return 0;
532 }
533
mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering * fs)534 static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs)
535 {
536 int i;
537
538 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
539
540 for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
541 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
542 }
543
544 for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
545 mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
546
547 if (fs->vlan->cvlan_filter_disabled)
548 mlx5e_fs_add_any_vid_rules(fs);
549 }
550
mlx5e_del_vlan_rules(struct mlx5e_flow_steering * fs)551 static void mlx5e_del_vlan_rules(struct mlx5e_flow_steering *fs)
552 {
553 int i;
554
555 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
556
557 for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) {
558 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
559 }
560
561 for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID)
562 mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
563
564 WARN_ON_ONCE(fs->state_destroy);
565
566 mlx5e_remove_vlan_trap(fs);
567
568 /* must be called after DESTROY bit is set and
569 * set_rx_mode is called and flushed
570 */
571 if (fs->vlan->cvlan_filter_disabled)
572 mlx5e_fs_del_any_vid_rules(fs);
573 }
574
575 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
576 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
577 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
578
mlx5e_execute_l2_action(struct mlx5e_flow_steering * fs,struct mlx5e_l2_hash_node * hn)579 static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs,
580 struct mlx5e_l2_hash_node *hn)
581 {
582 u8 action = hn->action;
583 u8 mac_addr[ETH_ALEN];
584 int l2_err = 0;
585
586 ether_addr_copy(mac_addr, hn->ai.addr);
587
588 switch (action) {
589 case MLX5E_ACTION_ADD:
590 mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH);
591 if (!is_multicast_ether_addr(mac_addr)) {
592 l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr);
593 hn->mpfs = !l2_err;
594 }
595 hn->action = MLX5E_ACTION_NONE;
596 break;
597
598 case MLX5E_ACTION_DEL:
599 if (!is_multicast_ether_addr(mac_addr) && hn->mpfs)
600 l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr);
601 mlx5e_del_l2_flow_rule(fs, &hn->ai);
602 mlx5e_del_l2_from_hash(hn);
603 break;
604 }
605
606 if (l2_err)
607 fs_warn(fs, "MPFS, failed to %s mac %pM, err(%d)\n",
608 action == MLX5E_ACTION_ADD ? "add" : "del",
609 mac_addr, l2_err);
610 }
611
mlx5e_sync_netdev_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)612 static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs,
613 struct net_device *netdev)
614 {
615 struct netdev_hw_addr *ha;
616
617 netif_addr_lock_bh(netdev);
618
619 mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr);
620 netdev_for_each_uc_addr(ha, netdev)
621 mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr);
622
623 netdev_for_each_mc_addr(ha, netdev)
624 mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr);
625
626 netif_addr_unlock_bh(netdev);
627 }
628
mlx5e_fill_addr_array(struct mlx5e_flow_steering * fs,int list_type,struct net_device * ndev,u8 addr_array[][ETH_ALEN],int size)629 static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type,
630 struct net_device *ndev,
631 u8 addr_array[][ETH_ALEN], int size)
632 {
633 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
634 struct mlx5e_l2_hash_node *hn;
635 struct hlist_head *addr_list;
636 struct hlist_node *tmp;
637 int i = 0;
638 int hi;
639
640 addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
641
642 if (is_uc) /* Make sure our own address is pushed first */
643 ether_addr_copy(addr_array[i++], ndev->dev_addr);
644 else if (fs->l2.broadcast_enabled)
645 ether_addr_copy(addr_array[i++], ndev->broadcast);
646
647 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
648 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
649 continue;
650 if (i >= size)
651 break;
652 ether_addr_copy(addr_array[i++], hn->ai.addr);
653 }
654 }
655
mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering * fs,struct net_device * netdev,int list_type)656 static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs,
657 struct net_device *netdev,
658 int list_type)
659 {
660 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
661 struct mlx5e_l2_hash_node *hn;
662 u8 (*addr_array)[ETH_ALEN] = NULL;
663 struct hlist_head *addr_list;
664 struct hlist_node *tmp;
665 int max_size;
666 int size;
667 int err;
668 int hi;
669
670 size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0);
671 max_size = is_uc ?
672 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) :
673 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list);
674
675 addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc;
676 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
677 size++;
678
679 if (size > max_size) {
680 fs_warn(fs, "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
681 is_uc ? "UC" : "MC", size, max_size);
682 size = max_size;
683 }
684
685 if (size) {
686 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
687 if (!addr_array) {
688 err = -ENOMEM;
689 goto out;
690 }
691 mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size);
692 }
693
694 err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size);
695 out:
696 if (err)
697 fs_err(fs, "Failed to modify vport %s list err(%d)\n",
698 is_uc ? "UC" : "MC", err);
699 kfree(addr_array);
700 }
701
mlx5e_vport_context_update(struct mlx5e_flow_steering * fs,struct net_device * netdev)702 static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs,
703 struct net_device *netdev)
704 {
705 struct mlx5e_l2_table *ea = &fs->l2;
706
707 mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC);
708 mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC);
709 mlx5_modify_nic_vport_promisc(fs->mdev, 0,
710 ea->allmulti_enabled,
711 ea->promisc_enabled);
712 }
713
mlx5e_apply_netdev_addr(struct mlx5e_flow_steering * fs)714 static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs)
715 {
716 struct mlx5e_l2_hash_node *hn;
717 struct hlist_node *tmp;
718 int i;
719
720 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
721 mlx5e_execute_l2_action(fs, hn);
722
723 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
724 mlx5e_execute_l2_action(fs, hn);
725 }
726
mlx5e_handle_netdev_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)727 static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs,
728 struct net_device *netdev)
729 {
730 struct mlx5e_l2_hash_node *hn;
731 struct hlist_node *tmp;
732 int i;
733
734 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i)
735 hn->action = MLX5E_ACTION_DEL;
736 mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i)
737 hn->action = MLX5E_ACTION_DEL;
738
739 if (fs->state_destroy)
740 mlx5e_sync_netdev_addr(fs, netdev);
741
742 mlx5e_apply_netdev_addr(fs);
743 }
744
745 #define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
746 #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
747
mlx5e_add_promisc_rule(struct mlx5e_flow_steering * fs)748 static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
749 {
750 struct mlx5_flow_table *ft = fs->promisc.ft.t;
751 struct mlx5_flow_destination dest = {};
752 struct mlx5_flow_handle **rule_p;
753 MLX5_DECLARE_FLOW_ACT(flow_act);
754 struct mlx5_flow_spec *spec;
755 int err = 0;
756
757 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
758 if (!spec)
759 return -ENOMEM;
760 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
761 dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
762
763 rule_p = &fs->promisc.rule;
764 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
765 if (IS_ERR(*rule_p)) {
766 err = PTR_ERR(*rule_p);
767 *rule_p = NULL;
768 fs_err(fs, "add promiscuous rule failed\n");
769 }
770 kvfree(spec);
771 return err;
772 }
773
mlx5e_create_promisc_table(struct mlx5e_flow_steering * fs)774 static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs)
775 {
776 struct mlx5e_flow_table *ft = &fs->promisc.ft;
777 struct mlx5_flow_table_attr ft_attr = {};
778 int err;
779
780 ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
781 ft_attr.autogroup.max_num_groups = 1;
782 ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
783 ft_attr.prio = MLX5E_PROMISC_PRIO;
784
785 ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr);
786 if (IS_ERR(ft->t)) {
787 err = PTR_ERR(ft->t);
788 ft->t = NULL;
789 fs_err(fs, "fail to create promisc table err=%d\n", err);
790 return err;
791 }
792
793 err = mlx5e_add_promisc_rule(fs);
794 if (err)
795 goto err_destroy_promisc_table;
796
797 return 0;
798
799 err_destroy_promisc_table:
800 mlx5_destroy_flow_table(ft->t);
801 ft->t = NULL;
802
803 return err;
804 }
805
mlx5e_del_promisc_rule(struct mlx5e_flow_steering * fs)806 static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs)
807 {
808 if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule"))
809 return;
810 mlx5_del_flow_rules(fs->promisc.rule);
811 fs->promisc.rule = NULL;
812 }
813
mlx5e_destroy_promisc_table(struct mlx5e_flow_steering * fs)814 static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs)
815 {
816 if (!fs->promisc.ft.t)
817 return;
818 mlx5e_del_promisc_rule(fs);
819 mlx5_destroy_flow_table(fs->promisc.ft.t);
820 fs->promisc.ft.t = NULL;
821 }
822
mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering * fs,struct net_device * netdev)823 void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs,
824 struct net_device *netdev)
825 {
826 struct mlx5e_l2_table *ea = &fs->l2;
827
828 bool rx_mode_enable = fs->state_destroy;
829 bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC);
830 bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI);
831 bool broadcast_enabled = rx_mode_enable;
832
833 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
834 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
835 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
836 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
837 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
838 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
839 int err;
840
841 if (enable_promisc) {
842 err = mlx5e_create_promisc_table(fs);
843 if (err)
844 enable_promisc = false;
845 if (!fs->vlan_strip_disable && !err)
846 fs_warn_once(fs,
847 "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
848 }
849 if (enable_allmulti)
850 mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI);
851 if (enable_broadcast)
852 mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH);
853
854 mlx5e_handle_netdev_addr(fs, netdev);
855
856 if (disable_broadcast)
857 mlx5e_del_l2_flow_rule(fs, &ea->broadcast);
858 if (disable_allmulti)
859 mlx5e_del_l2_flow_rule(fs, &ea->allmulti);
860 if (disable_promisc)
861 mlx5e_destroy_promisc_table(fs);
862
863 ea->promisc_enabled = promisc_enabled;
864 ea->allmulti_enabled = allmulti_enabled;
865 ea->broadcast_enabled = broadcast_enabled;
866
867 mlx5e_vport_context_update(fs, netdev);
868 }
869
mlx5e_destroy_groups(struct mlx5e_flow_table * ft)870 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
871 {
872 int i;
873
874 for (i = ft->num_groups - 1; i >= 0; i--) {
875 if (!IS_ERR_OR_NULL(ft->g[i]))
876 mlx5_destroy_flow_group(ft->g[i]);
877 ft->g[i] = NULL;
878 }
879 ft->num_groups = 0;
880 }
881
mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering * fs,struct net_device * netdev)882 void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev)
883 {
884 ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast);
885 }
886
mlx5e_destroy_flow_table(struct mlx5e_flow_table * ft)887 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
888 {
889 mlx5e_destroy_groups(ft);
890 kfree(ft->g);
891 mlx5_destroy_flow_table(ft->t);
892 ft->t = NULL;
893 }
894
mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,struct ttc_params * ttc_params)895 static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
896 struct mlx5e_rx_res *rx_res,
897 struct ttc_params *ttc_params)
898 {
899 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
900 int tt;
901
902 memset(ttc_params, 0, sizeof(*ttc_params));
903 ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
904 ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
905 ft_attr->prio = MLX5E_NIC_PRIO;
906
907 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
908 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
909 ttc_params->dests[tt].tir_num =
910 tt == MLX5_TT_ANY ?
911 mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
912 mlx5e_rx_res_get_tirn_rss_inner(rx_res,
913 tt);
914 }
915 }
916
mlx5e_set_ttc_params(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,struct ttc_params * ttc_params,bool tunnel)917 void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
918 struct mlx5e_rx_res *rx_res,
919 struct ttc_params *ttc_params, bool tunnel)
920
921 {
922 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
923 int tt;
924
925 memset(ttc_params, 0, sizeof(*ttc_params));
926 ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
927 ft_attr->level = MLX5E_TTC_FT_LEVEL;
928 ft_attr->prio = MLX5E_NIC_PRIO;
929
930 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
931 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
932 ttc_params->dests[tt].tir_num =
933 tt == MLX5_TT_ANY ?
934 mlx5e_rx_res_get_tirn_direct(rx_res, 0) :
935 mlx5e_rx_res_get_tirn_rss(rx_res, tt);
936 }
937
938 ttc_params->inner_ttc = tunnel;
939 if (!tunnel || !mlx5_tunnel_inner_ft_supported(fs->mdev))
940 return;
941
942 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
943 ttc_params->tunnel_dests[tt].type =
944 MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
945 ttc_params->tunnel_dests[tt].ft =
946 mlx5_get_ttc_flow_table(fs->inner_ttc);
947 }
948 }
949
mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering * fs,struct mlx5e_l2_rule * ai)950 static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs,
951 struct mlx5e_l2_rule *ai)
952 {
953 if (!IS_ERR_OR_NULL(ai->rule)) {
954 mlx5_del_flow_rules(ai->rule);
955 ai->rule = NULL;
956 }
957 }
958
mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering * fs,struct mlx5e_l2_rule * ai,int type)959 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
960 struct mlx5e_l2_rule *ai, int type)
961 {
962 struct mlx5_flow_table *ft = fs->l2.ft.t;
963 struct mlx5_flow_destination dest = {};
964 MLX5_DECLARE_FLOW_ACT(flow_act);
965 struct mlx5_flow_spec *spec;
966 int err = 0;
967 u8 *mc_dmac;
968 u8 *mv_dmac;
969
970 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
971 if (!spec)
972 return -ENOMEM;
973
974 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
975 outer_headers.dmac_47_16);
976 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
977 outer_headers.dmac_47_16);
978
979 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
980 dest.ft = mlx5_get_ttc_flow_table(fs->ttc);
981
982 switch (type) {
983 case MLX5E_FULLMATCH:
984 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
985 eth_broadcast_addr(mc_dmac);
986 ether_addr_copy(mv_dmac, ai->addr);
987 break;
988
989 case MLX5E_ALLMULTI:
990 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
991 mc_dmac[0] = 0x01;
992 mv_dmac[0] = 0x01;
993 break;
994 }
995
996 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
997 if (IS_ERR(ai->rule)) {
998 fs_err(fs, "add l2 rule(mac:%pM) failed\n", mv_dmac);
999 err = PTR_ERR(ai->rule);
1000 ai->rule = NULL;
1001 }
1002
1003 kvfree(spec);
1004
1005 return err;
1006 }
1007
1008 #define MLX5E_NUM_L2_GROUPS 3
1009 #define MLX5E_L2_GROUP1_SIZE BIT(15)
1010 #define MLX5E_L2_GROUP2_SIZE BIT(0)
1011 #define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
1012 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
1013 MLX5E_L2_GROUP2_SIZE +\
1014 MLX5E_L2_GROUP_TRAP_SIZE)
mlx5e_create_l2_table_groups(struct mlx5e_l2_table * l2_table)1015 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
1016 {
1017 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1018 struct mlx5e_flow_table *ft = &l2_table->ft;
1019 int ix = 0;
1020 u8 *mc_dmac;
1021 u32 *in;
1022 int err;
1023 u8 *mc;
1024
1025 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1026 if (!ft->g)
1027 return -ENOMEM;
1028 in = kvzalloc(inlen, GFP_KERNEL);
1029 if (!in) {
1030 kfree(ft->g);
1031 return -ENOMEM;
1032 }
1033
1034 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1035 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
1036 outer_headers.dmac_47_16);
1037 /* Flow Group for full match */
1038 eth_broadcast_addr(mc_dmac);
1039 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1040 MLX5_SET_CFG(in, start_flow_index, ix);
1041 ix += MLX5E_L2_GROUP1_SIZE;
1042 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1043 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1044 if (IS_ERR(ft->g[ft->num_groups]))
1045 goto err_destroy_groups;
1046 ft->num_groups++;
1047
1048 /* Flow Group for allmulti */
1049 eth_zero_addr(mc_dmac);
1050 mc_dmac[0] = 0x01;
1051 MLX5_SET_CFG(in, start_flow_index, ix);
1052 ix += MLX5E_L2_GROUP2_SIZE;
1053 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1054 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1055 if (IS_ERR(ft->g[ft->num_groups]))
1056 goto err_destroy_groups;
1057 ft->num_groups++;
1058
1059 /* Flow Group for l2 traps */
1060 memset(in, 0, inlen);
1061 MLX5_SET_CFG(in, start_flow_index, ix);
1062 ix += MLX5E_L2_GROUP_TRAP_SIZE;
1063 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1064 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1065 if (IS_ERR(ft->g[ft->num_groups]))
1066 goto err_destroy_groups;
1067 ft->num_groups++;
1068
1069 kvfree(in);
1070 return 0;
1071
1072 err_destroy_groups:
1073 err = PTR_ERR(ft->g[ft->num_groups]);
1074 ft->g[ft->num_groups] = NULL;
1075 mlx5e_destroy_groups(ft);
1076 kvfree(in);
1077 kfree(ft->g);
1078
1079 return err;
1080 }
1081
mlx5e_destroy_l2_table(struct mlx5e_flow_steering * fs)1082 static void mlx5e_destroy_l2_table(struct mlx5e_flow_steering *fs)
1083 {
1084 mlx5e_destroy_flow_table(&fs->l2.ft);
1085 }
1086
mlx5e_create_l2_table(struct mlx5e_flow_steering * fs)1087 static int mlx5e_create_l2_table(struct mlx5e_flow_steering *fs)
1088 {
1089 struct mlx5e_l2_table *l2_table = &fs->l2;
1090 struct mlx5e_flow_table *ft = &l2_table->ft;
1091 struct mlx5_flow_table_attr ft_attr = {};
1092 int err;
1093
1094 ft->num_groups = 0;
1095
1096 ft_attr.max_fte = MLX5E_L2_TABLE_SIZE;
1097 ft_attr.level = MLX5E_L2_FT_LEVEL;
1098 ft_attr.prio = MLX5E_NIC_PRIO;
1099
1100 ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1101 if (IS_ERR(ft->t)) {
1102 err = PTR_ERR(ft->t);
1103 ft->t = NULL;
1104 return err;
1105 }
1106
1107 err = mlx5e_create_l2_table_groups(l2_table);
1108 if (err)
1109 goto err_destroy_flow_table;
1110
1111 return 0;
1112
1113 err_destroy_flow_table:
1114 mlx5_destroy_flow_table(ft->t);
1115 ft->t = NULL;
1116
1117 return err;
1118 }
1119
1120 #define MLX5E_NUM_VLAN_GROUPS 5
1121 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1122 #define MLX5E_VLAN_GROUP1_SIZE BIT(12)
1123 #define MLX5E_VLAN_GROUP2_SIZE BIT(1)
1124 #define MLX5E_VLAN_GROUP3_SIZE BIT(0)
1125 #define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
1126 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1127 MLX5E_VLAN_GROUP1_SIZE +\
1128 MLX5E_VLAN_GROUP2_SIZE +\
1129 MLX5E_VLAN_GROUP3_SIZE +\
1130 MLX5E_VLAN_GROUP_TRAP_SIZE)
1131
__mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft,u32 * in,int inlen)1132 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1133 int inlen)
1134 {
1135 int err;
1136 int ix = 0;
1137 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1138
1139 memset(in, 0, inlen);
1140 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1141 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1142 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1143 MLX5_SET_CFG(in, start_flow_index, ix);
1144 ix += MLX5E_VLAN_GROUP0_SIZE;
1145 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1146 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1147 if (IS_ERR(ft->g[ft->num_groups]))
1148 goto err_destroy_groups;
1149 ft->num_groups++;
1150
1151 memset(in, 0, inlen);
1152 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1153 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1154 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1155 MLX5_SET_CFG(in, start_flow_index, ix);
1156 ix += MLX5E_VLAN_GROUP1_SIZE;
1157 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1158 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1159 if (IS_ERR(ft->g[ft->num_groups]))
1160 goto err_destroy_groups;
1161 ft->num_groups++;
1162
1163 memset(in, 0, inlen);
1164 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1165 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1166 MLX5_SET_CFG(in, start_flow_index, ix);
1167 ix += MLX5E_VLAN_GROUP2_SIZE;
1168 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1169 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1170 if (IS_ERR(ft->g[ft->num_groups]))
1171 goto err_destroy_groups;
1172 ft->num_groups++;
1173
1174 memset(in, 0, inlen);
1175 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1176 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1177 MLX5_SET_CFG(in, start_flow_index, ix);
1178 ix += MLX5E_VLAN_GROUP3_SIZE;
1179 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1180 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1181 if (IS_ERR(ft->g[ft->num_groups]))
1182 goto err_destroy_groups;
1183 ft->num_groups++;
1184
1185 memset(in, 0, inlen);
1186 MLX5_SET_CFG(in, start_flow_index, ix);
1187 ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
1188 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1189 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1190 if (IS_ERR(ft->g[ft->num_groups]))
1191 goto err_destroy_groups;
1192 ft->num_groups++;
1193
1194 return 0;
1195
1196 err_destroy_groups:
1197 err = PTR_ERR(ft->g[ft->num_groups]);
1198 ft->g[ft->num_groups] = NULL;
1199 mlx5e_destroy_groups(ft);
1200
1201 return err;
1202 }
1203
mlx5e_create_vlan_table_groups(struct mlx5e_flow_table * ft)1204 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1205 {
1206 u32 *in;
1207 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1208 int err;
1209
1210 in = kvzalloc(inlen, GFP_KERNEL);
1211 if (!in)
1212 return -ENOMEM;
1213
1214 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1215
1216 kvfree(in);
1217 return err;
1218 }
1219
mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering * fs)1220 static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs)
1221 {
1222 struct mlx5_flow_table_attr ft_attr = {};
1223 struct mlx5e_flow_table *ft;
1224 int err;
1225
1226 ft = &fs->vlan->ft;
1227 ft->num_groups = 0;
1228
1229 ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
1230 ft_attr.level = MLX5E_VLAN_FT_LEVEL;
1231 ft_attr.prio = MLX5E_NIC_PRIO;
1232
1233 ft->t = mlx5_create_flow_table(fs->ns, &ft_attr);
1234 if (IS_ERR(ft->t))
1235 return PTR_ERR(ft->t);
1236
1237 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1238 if (!ft->g) {
1239 err = -ENOMEM;
1240 goto err_destroy_vlan_table;
1241 }
1242
1243 err = mlx5e_create_vlan_table_groups(ft);
1244 if (err)
1245 goto err_free_g;
1246
1247 mlx5e_fs_add_vlan_rules(fs);
1248
1249 return 0;
1250
1251 err_free_g:
1252 kfree(ft->g);
1253 err_destroy_vlan_table:
1254 mlx5_destroy_flow_table(ft->t);
1255
1256 return err;
1257 }
1258
mlx5e_destroy_vlan_table(struct mlx5e_flow_steering * fs)1259 static void mlx5e_destroy_vlan_table(struct mlx5e_flow_steering *fs)
1260 {
1261 mlx5e_del_vlan_rules(fs);
1262 mlx5e_destroy_flow_table(&fs->vlan->ft);
1263 }
1264
mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering * fs)1265 static void mlx5e_destroy_inner_ttc_table(struct mlx5e_flow_steering *fs)
1266 {
1267 if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
1268 return;
1269 mlx5_destroy_ttc_table(fs->inner_ttc);
1270 }
1271
mlx5e_destroy_ttc_table(struct mlx5e_flow_steering * fs)1272 void mlx5e_destroy_ttc_table(struct mlx5e_flow_steering *fs)
1273 {
1274 mlx5_destroy_ttc_table(fs->ttc);
1275 }
1276
mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res)1277 static int mlx5e_create_inner_ttc_table(struct mlx5e_flow_steering *fs,
1278 struct mlx5e_rx_res *rx_res)
1279 {
1280 struct ttc_params ttc_params = {};
1281
1282 if (!mlx5_tunnel_inner_ft_supported(fs->mdev))
1283 return 0;
1284
1285 mlx5e_set_inner_ttc_params(fs, rx_res, &ttc_params);
1286 fs->inner_ttc = mlx5_create_inner_ttc_table(fs->mdev,
1287 &ttc_params);
1288 return PTR_ERR_OR_ZERO(fs->inner_ttc);
1289 }
1290
mlx5e_create_ttc_table(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res)1291 int mlx5e_create_ttc_table(struct mlx5e_flow_steering *fs,
1292 struct mlx5e_rx_res *rx_res)
1293 {
1294 struct ttc_params ttc_params = {};
1295
1296 mlx5e_set_ttc_params(fs, rx_res, &ttc_params, true);
1297 fs->ttc = mlx5_create_ttc_table(fs->mdev, &ttc_params);
1298 return PTR_ERR_OR_ZERO(fs->ttc);
1299 }
1300
mlx5e_create_flow_steering(struct mlx5e_flow_steering * fs,struct mlx5e_rx_res * rx_res,const struct mlx5e_profile * profile,struct net_device * netdev)1301 int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
1302 struct mlx5e_rx_res *rx_res,
1303 const struct mlx5e_profile *profile,
1304 struct net_device *netdev)
1305 {
1306 struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(fs->mdev,
1307 MLX5_FLOW_NAMESPACE_KERNEL);
1308 int err;
1309
1310 if (!ns)
1311 return -EOPNOTSUPP;
1312
1313 mlx5e_fs_set_ns(fs, ns, false);
1314 err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev));
1315 if (err) {
1316 fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
1317 netdev->hw_features &= ~NETIF_F_NTUPLE;
1318 }
1319
1320 err = mlx5e_create_inner_ttc_table(fs, rx_res);
1321 if (err) {
1322 fs_err(fs, "Failed to create inner ttc table, err=%d\n", err);
1323 goto err_destroy_arfs_tables;
1324 }
1325
1326 err = mlx5e_create_ttc_table(fs, rx_res);
1327 if (err) {
1328 fs_err(fs, "Failed to create ttc table, err=%d\n", err);
1329 goto err_destroy_inner_ttc_table;
1330 }
1331
1332 err = mlx5e_create_l2_table(fs);
1333 if (err) {
1334 fs_err(fs, "Failed to create l2 table, err=%d\n", err);
1335 goto err_destroy_ttc_table;
1336 }
1337
1338 err = mlx5e_fs_create_vlan_table(fs);
1339 if (err) {
1340 fs_err(fs, "Failed to create vlan table, err=%d\n", err);
1341 goto err_destroy_l2_table;
1342 }
1343
1344 err = mlx5e_ptp_alloc_rx_fs(fs, profile);
1345 if (err)
1346 goto err_destory_vlan_table;
1347
1348 mlx5e_ethtool_init_steering(fs);
1349
1350 return 0;
1351
1352 err_destory_vlan_table:
1353 mlx5e_destroy_vlan_table(fs);
1354 err_destroy_l2_table:
1355 mlx5e_destroy_l2_table(fs);
1356 err_destroy_ttc_table:
1357 mlx5e_destroy_ttc_table(fs);
1358 err_destroy_inner_ttc_table:
1359 mlx5e_destroy_inner_ttc_table(fs);
1360 err_destroy_arfs_tables:
1361 mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev));
1362
1363 return err;
1364 }
1365
mlx5e_destroy_flow_steering(struct mlx5e_flow_steering * fs,bool ntuple,const struct mlx5e_profile * profile)1366 void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
1367 const struct mlx5e_profile *profile)
1368 {
1369 mlx5e_ptp_free_rx_fs(fs, profile);
1370 mlx5e_destroy_vlan_table(fs);
1371 mlx5e_destroy_l2_table(fs);
1372 mlx5e_destroy_ttc_table(fs);
1373 mlx5e_destroy_inner_ttc_table(fs);
1374 mlx5e_arfs_destroy_tables(fs, ntuple);
1375 mlx5e_ethtool_cleanup_steering(fs);
1376 }
1377
mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering * fs)1378 static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs)
1379 {
1380 fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL);
1381 if (!fs->vlan)
1382 return -ENOMEM;
1383 return 0;
1384 }
1385
mlx5e_fs_vlan_free(struct mlx5e_flow_steering * fs)1386 static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs)
1387 {
1388 kvfree(fs->vlan);
1389 }
1390
mlx5e_fs_get_vlan(struct mlx5e_flow_steering * fs)1391 struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs)
1392 {
1393 return fs->vlan;
1394 }
1395
mlx5e_fs_tc_alloc(struct mlx5e_flow_steering * fs)1396 static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs)
1397 {
1398 fs->tc = mlx5e_tc_table_alloc();
1399 if (IS_ERR(fs->tc))
1400 return -ENOMEM;
1401 return 0;
1402 }
1403
mlx5e_fs_tc_free(struct mlx5e_flow_steering * fs)1404 static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs)
1405 {
1406 mlx5e_tc_table_free(fs->tc);
1407 }
1408
mlx5e_fs_get_tc(struct mlx5e_flow_steering * fs)1409 struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs)
1410 {
1411 return fs->tc;
1412 }
1413
1414 #ifdef CONFIG_MLX5_EN_RXNFC
mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering * fs)1415 static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
1416 {
1417 return mlx5e_ethtool_alloc(&fs->ethtool);
1418 }
1419
mlx5e_fs_ethtool_free(struct mlx5e_flow_steering * fs)1420 static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs)
1421 {
1422 mlx5e_ethtool_free(fs->ethtool);
1423 }
1424
mlx5e_fs_get_ethtool(struct mlx5e_flow_steering * fs)1425 struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs)
1426 {
1427 return fs->ethtool;
1428 }
1429 #else
mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering * fs)1430 static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
1431 { return 0; }
mlx5e_fs_ethtool_free(struct mlx5e_flow_steering * fs)1432 static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
1433 #endif
1434
mlx5e_fs_debugfs_init(struct mlx5e_flow_steering * fs,struct dentry * dfs_root)1435 static void mlx5e_fs_debugfs_init(struct mlx5e_flow_steering *fs,
1436 struct dentry *dfs_root)
1437 {
1438 if (IS_ERR_OR_NULL(dfs_root))
1439 return;
1440
1441 fs->dfs_root = debugfs_create_dir("fs", dfs_root);
1442 }
1443
mlx5e_fs_init(const struct mlx5e_profile * profile,struct mlx5_core_dev * mdev,bool state_destroy,struct dentry * dfs_root)1444 struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
1445 struct mlx5_core_dev *mdev,
1446 bool state_destroy,
1447 struct dentry *dfs_root)
1448 {
1449 struct mlx5e_flow_steering *fs;
1450 int err;
1451
1452 fs = kvzalloc(sizeof(*fs), GFP_KERNEL);
1453 if (!fs)
1454 goto err;
1455
1456 fs->mdev = mdev;
1457 fs->state_destroy = state_destroy;
1458 if (mlx5e_profile_feature_cap(profile, FS_VLAN)) {
1459 err = mlx5e_fs_vlan_alloc(fs);
1460 if (err)
1461 goto err_free_fs;
1462 }
1463
1464 if (mlx5e_profile_feature_cap(profile, FS_TC)) {
1465 err = mlx5e_fs_tc_alloc(fs);
1466 if (err)
1467 goto err_free_vlan;
1468 }
1469
1470 err = mlx5e_fs_ethtool_alloc(fs);
1471 if (err)
1472 goto err_free_tc;
1473
1474 mlx5e_fs_debugfs_init(fs, dfs_root);
1475
1476 return fs;
1477 err_free_tc:
1478 mlx5e_fs_tc_free(fs);
1479 err_free_vlan:
1480 mlx5e_fs_vlan_free(fs);
1481 err_free_fs:
1482 kvfree(fs);
1483 err:
1484 return NULL;
1485 }
1486
mlx5e_fs_cleanup(struct mlx5e_flow_steering * fs)1487 void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
1488 {
1489 if (!fs)
1490 return;
1491 debugfs_remove_recursive(fs->dfs_root);
1492 mlx5e_fs_ethtool_free(fs);
1493 mlx5e_fs_tc_free(fs);
1494 mlx5e_fs_vlan_free(fs);
1495 kvfree(fs);
1496 }
1497
mlx5e_fs_get_l2(struct mlx5e_flow_steering * fs)1498 struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs)
1499 {
1500 return &fs->l2;
1501 }
1502
mlx5e_fs_get_ns(struct mlx5e_flow_steering * fs,bool egress)1503 struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress)
1504 {
1505 return egress ? fs->egress_ns : fs->ns;
1506 }
1507
mlx5e_fs_set_ns(struct mlx5e_flow_steering * fs,struct mlx5_flow_namespace * ns,bool egress)1508 void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress)
1509 {
1510 if (!egress)
1511 fs->ns = ns;
1512 else
1513 fs->egress_ns = ns;
1514 }
1515
mlx5e_fs_get_ttc(struct mlx5e_flow_steering * fs,bool inner)1516 struct mlx5_ttc_table *mlx5e_fs_get_ttc(struct mlx5e_flow_steering *fs, bool inner)
1517 {
1518 return inner ? fs->inner_ttc : fs->ttc;
1519 }
1520
mlx5e_fs_set_ttc(struct mlx5e_flow_steering * fs,struct mlx5_ttc_table * ttc,bool inner)1521 void mlx5e_fs_set_ttc(struct mlx5e_flow_steering *fs, struct mlx5_ttc_table *ttc, bool inner)
1522 {
1523 if (!inner)
1524 fs->ttc = ttc;
1525 else
1526 fs->inner_ttc = ttc;
1527 }
1528
1529 #ifdef CONFIG_MLX5_EN_ARFS
mlx5e_fs_get_arfs(struct mlx5e_flow_steering * fs)1530 struct mlx5e_arfs_tables *mlx5e_fs_get_arfs(struct mlx5e_flow_steering *fs)
1531 {
1532 return fs->arfs;
1533 }
1534
mlx5e_fs_set_arfs(struct mlx5e_flow_steering * fs,struct mlx5e_arfs_tables * arfs)1535 void mlx5e_fs_set_arfs(struct mlx5e_flow_steering *fs, struct mlx5e_arfs_tables *arfs)
1536 {
1537 fs->arfs = arfs;
1538 }
1539 #endif
1540
mlx5e_fs_get_ptp(struct mlx5e_flow_steering * fs)1541 struct mlx5e_ptp_fs *mlx5e_fs_get_ptp(struct mlx5e_flow_steering *fs)
1542 {
1543 return fs->ptp_fs;
1544 }
1545
mlx5e_fs_set_ptp(struct mlx5e_flow_steering * fs,struct mlx5e_ptp_fs * ptp_fs)1546 void mlx5e_fs_set_ptp(struct mlx5e_flow_steering *fs, struct mlx5e_ptp_fs *ptp_fs)
1547 {
1548 fs->ptp_fs = ptp_fs;
1549 }
1550
mlx5e_fs_get_any(struct mlx5e_flow_steering * fs)1551 struct mlx5e_fs_any *mlx5e_fs_get_any(struct mlx5e_flow_steering *fs)
1552 {
1553 return fs->any;
1554 }
1555
mlx5e_fs_set_any(struct mlx5e_flow_steering * fs,struct mlx5e_fs_any * any)1556 void mlx5e_fs_set_any(struct mlx5e_flow_steering *fs, struct mlx5e_fs_any *any)
1557 {
1558 fs->any = any;
1559 }
1560
1561 #ifdef CONFIG_MLX5_EN_TLS
mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering * fs)1562 struct mlx5e_accel_fs_tcp *mlx5e_fs_get_accel_tcp(struct mlx5e_flow_steering *fs)
1563 {
1564 return fs->accel_tcp;
1565 }
1566
mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering * fs,struct mlx5e_accel_fs_tcp * accel_tcp)1567 void mlx5e_fs_set_accel_tcp(struct mlx5e_flow_steering *fs, struct mlx5e_accel_fs_tcp *accel_tcp)
1568 {
1569 fs->accel_tcp = accel_tcp;
1570 }
1571 #endif
1572
mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering * fs,bool state_destroy)1573 void mlx5e_fs_set_state_destroy(struct mlx5e_flow_steering *fs, bool state_destroy)
1574 {
1575 fs->state_destroy = state_destroy;
1576 }
1577
mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering * fs,bool vlan_strip_disable)1578 void mlx5e_fs_set_vlan_strip_disable(struct mlx5e_flow_steering *fs,
1579 bool vlan_strip_disable)
1580 {
1581 fs->vlan_strip_disable = vlan_strip_disable;
1582 }
1583
mlx5e_fs_get_udp(struct mlx5e_flow_steering * fs)1584 struct mlx5e_fs_udp *mlx5e_fs_get_udp(struct mlx5e_flow_steering *fs)
1585 {
1586 return fs->udp;
1587 }
1588
mlx5e_fs_set_udp(struct mlx5e_flow_steering * fs,struct mlx5e_fs_udp * udp)1589 void mlx5e_fs_set_udp(struct mlx5e_flow_steering *fs, struct mlx5e_fs_udp *udp)
1590 {
1591 fs->udp = udp;
1592 }
1593
mlx5e_fs_get_mdev(struct mlx5e_flow_steering * fs)1594 struct mlx5_core_dev *mlx5e_fs_get_mdev(struct mlx5e_flow_steering *fs)
1595 {
1596 return fs->mdev;
1597 }
1598