1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "eswitch.h"
6 #include "helper.h"
7 #include "ofld.h"
8
esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport * vport)9 static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
10 {
11 if (!vport->egress.offloads.fwd_rule)
12 return;
13
14 mlx5_del_flow_rules(vport->egress.offloads.fwd_rule);
15 vport->egress.offloads.fwd_rule = NULL;
16 }
17
esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)18 static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
19 struct mlx5_vport *vport,
20 struct mlx5_flow_destination *fwd_dest)
21 {
22 struct mlx5_flow_act flow_act = {};
23 int err = 0;
24
25 esw_debug(esw->dev, "vport(%d) configure egress acl rule fwd2vport(%d)\n",
26 vport->vport, fwd_dest->vport.num);
27
28 /* Delete the old egress forward-to-vport rule if any */
29 esw_acl_egress_ofld_fwd2vport_destroy(vport);
30
31 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
32
33 vport->egress.offloads.fwd_rule =
34 mlx5_add_flow_rules(vport->egress.acl, NULL,
35 &flow_act, fwd_dest, 1);
36 if (IS_ERR(vport->egress.offloads.fwd_rule)) {
37 err = PTR_ERR(vport->egress.offloads.fwd_rule);
38 esw_warn(esw->dev,
39 "vport(%d) failed to add fwd2vport acl rule err(%d)\n",
40 vport->vport, err);
41 vport->egress.offloads.fwd_rule = NULL;
42 }
43
44 return err;
45 }
46
esw_acl_egress_ofld_rules_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport,struct mlx5_flow_destination * fwd_dest)47 static int esw_acl_egress_ofld_rules_create(struct mlx5_eswitch *esw,
48 struct mlx5_vport *vport,
49 struct mlx5_flow_destination *fwd_dest)
50 {
51 int err = 0;
52 int action;
53
54 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
55 /* For prio tag mode, there is only 1 FTEs:
56 * 1) prio tag packets - pop the prio tag VLAN, allow
57 * Unmatched traffic is allowed by default
58 */
59 esw_debug(esw->dev,
60 "vport[%d] configure prio tag egress rules\n", vport->vport);
61
62 action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
63 action |= fwd_dest ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
64 MLX5_FLOW_CONTEXT_ACTION_ALLOW;
65
66 /* prio tag vlan rule - pop it so vport receives untagged packets */
67 err = esw_egress_acl_vlan_create(esw, vport, fwd_dest, 0, action);
68 if (err)
69 goto prio_err;
70 }
71
72 if (fwd_dest) {
73 err = esw_acl_egress_ofld_fwd2vport_create(esw, vport, fwd_dest);
74 if (err)
75 goto fwd_err;
76 }
77
78 return 0;
79
80 fwd_err:
81 esw_acl_egress_vlan_destroy(vport);
82 prio_err:
83 return err;
84 }
85
esw_acl_egress_ofld_rules_destroy(struct mlx5_vport * vport)86 static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
87 {
88 esw_acl_egress_vlan_destroy(vport);
89 esw_acl_egress_ofld_fwd2vport_destroy(vport);
90 }
91
esw_acl_egress_ofld_groups_create(struct mlx5_eswitch * esw,struct mlx5_vport * vport)92 static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
93 struct mlx5_vport *vport)
94 {
95 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
96 struct mlx5_flow_group *fwd_grp;
97 u32 *flow_group_in;
98 u32 flow_index = 0;
99 int ret = 0;
100
101 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) {
102 ret = esw_acl_egress_vlan_grp_create(esw, vport);
103 if (ret)
104 return ret;
105
106 flow_index++;
107 }
108
109 if (!mlx5_esw_acl_egress_fwd2vport_supported(esw))
110 goto out;
111
112 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
113 if (!flow_group_in) {
114 ret = -ENOMEM;
115 goto fwd_grp_err;
116 }
117
118 /* This group holds 1 FTE to forward all packets to other vport
119 * when bond vports is supported.
120 */
121 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
122 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
123 fwd_grp = mlx5_create_flow_group(vport->egress.acl, flow_group_in);
124 if (IS_ERR(fwd_grp)) {
125 ret = PTR_ERR(fwd_grp);
126 esw_warn(esw->dev,
127 "Failed to create vport[%d] egress fwd2vport flow group, err(%d)\n",
128 vport->vport, ret);
129 kvfree(flow_group_in);
130 goto fwd_grp_err;
131 }
132 vport->egress.offloads.fwd_grp = fwd_grp;
133 kvfree(flow_group_in);
134 return 0;
135
136 fwd_grp_err:
137 esw_acl_egress_vlan_grp_destroy(vport);
138 out:
139 return ret;
140 }
141
esw_acl_egress_ofld_groups_destroy(struct mlx5_vport * vport)142 static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
143 {
144 if (!IS_ERR_OR_NULL(vport->egress.offloads.fwd_grp)) {
145 mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp);
146 vport->egress.offloads.fwd_grp = NULL;
147 }
148 esw_acl_egress_vlan_grp_destroy(vport);
149 }
150
esw_acl_egress_needed(const struct mlx5_eswitch * esw,u16 vport_num)151 static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
152 {
153 return mlx5_eswitch_is_vf_vport(esw, vport_num);
154 }
155
esw_acl_egress_ofld_setup(struct mlx5_eswitch * esw,struct mlx5_vport * vport)156 int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
157 {
158 int table_size = 0;
159 int err;
160
161 if (!mlx5_esw_acl_egress_fwd2vport_supported(esw) &&
162 !MLX5_CAP_GEN(esw->dev, prio_tag_required))
163 return 0;
164
165 if (!esw_acl_egress_needed(esw, vport->vport))
166 return 0;
167
168 esw_acl_egress_ofld_rules_destroy(vport);
169
170 if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
171 table_size++;
172 if (MLX5_CAP_GEN(esw->dev, prio_tag_required))
173 table_size++;
174 vport->egress.acl = esw_acl_table_create(esw, vport->vport,
175 MLX5_FLOW_NAMESPACE_ESW_EGRESS, table_size);
176 if (IS_ERR_OR_NULL(vport->egress.acl)) {
177 err = PTR_ERR(vport->egress.acl);
178 vport->egress.acl = NULL;
179 return err;
180 }
181
182 err = esw_acl_egress_ofld_groups_create(esw, vport);
183 if (err)
184 goto group_err;
185
186 esw_debug(esw->dev, "vport[%d] configure egress rules\n", vport->vport);
187
188 err = esw_acl_egress_ofld_rules_create(esw, vport, NULL);
189 if (err)
190 goto rules_err;
191
192 return 0;
193
194 rules_err:
195 esw_acl_egress_ofld_groups_destroy(vport);
196 group_err:
197 esw_acl_egress_table_destroy(vport);
198 return err;
199 }
200
esw_acl_egress_ofld_cleanup(struct mlx5_vport * vport)201 void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport)
202 {
203 esw_acl_egress_ofld_rules_destroy(vport);
204 esw_acl_egress_ofld_groups_destroy(vport);
205 esw_acl_egress_table_destroy(vport);
206 }
207
mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch * esw,u16 active_vport_num,u16 passive_vport_num)208 int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
209 u16 passive_vport_num)
210 {
211 struct mlx5_vport *passive_vport = mlx5_eswitch_get_vport(esw, passive_vport_num);
212 struct mlx5_vport *active_vport = mlx5_eswitch_get_vport(esw, active_vport_num);
213 struct mlx5_flow_destination fwd_dest = {};
214
215 if (IS_ERR(active_vport))
216 return PTR_ERR(active_vport);
217 if (IS_ERR(passive_vport))
218 return PTR_ERR(passive_vport);
219
220 /* Cleanup and recreate rules WITHOUT fwd2vport of active vport */
221 esw_acl_egress_ofld_rules_destroy(active_vport);
222 esw_acl_egress_ofld_rules_create(esw, active_vport, NULL);
223
224 /* Cleanup and recreate all rules + fwd2vport rule of passive vport to forward */
225 esw_acl_egress_ofld_rules_destroy(passive_vport);
226 fwd_dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
227 fwd_dest.vport.num = active_vport_num;
228 fwd_dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
229 fwd_dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
230
231 return esw_acl_egress_ofld_rules_create(esw, passive_vport, &fwd_dest);
232 }
233
mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch * esw,u16 vport_num)234 int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num)
235 {
236 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
237
238 if (IS_ERR(vport))
239 return PTR_ERR(vport);
240
241 esw_acl_egress_ofld_rules_destroy(vport);
242 return esw_acl_egress_ofld_rules_create(esw, vport, NULL);
243 }
244