1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2019-2021, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_tc_lib.h"
6 #include "ice_fltr.h"
7 #include "ice_lib.h"
8 #include "ice_protocol_type.h"
9
10 #define ICE_TC_METADATA_LKUP_IDX 0
11
12 /**
13 * ice_tc_count_lkups - determine lookup count for switch filter
14 * @flags: TC-flower flags
15 * @fltr: Pointer to outer TC filter structure
16 *
17 * Return: lookup count based on TC flower input for a switch filter.
18 */
ice_tc_count_lkups(u32 flags,struct ice_tc_flower_fltr * fltr)19 static int ice_tc_count_lkups(u32 flags, struct ice_tc_flower_fltr *fltr)
20 {
21 int lkups_cnt = 1; /* 0th lookup is metadata */
22
23 /* Always add metadata as the 0th lookup. Included elements:
24 * - Direction flag (always present)
25 * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
26 * - Tunnel flag (present if tunnel)
27 */
28 if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
29 lkups_cnt++;
30
31 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
32 lkups_cnt++;
33
34 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
35 lkups_cnt++;
36
37 if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
38 lkups_cnt++;
39
40 if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
41 lkups_cnt++;
42
43 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
44 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
45 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
46 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
47 lkups_cnt++;
48
49 if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
50 ICE_TC_FLWR_FIELD_ENC_IP_TTL))
51 lkups_cnt++;
52
53 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
54 lkups_cnt++;
55
56 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID)
57 lkups_cnt++;
58
59 /* are MAC fields specified? */
60 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
61 lkups_cnt++;
62
63 /* is VLAN specified? */
64 if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
65 lkups_cnt++;
66
67 /* is CVLAN specified? */
68 if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
69 lkups_cnt++;
70
71 /* are PPPoE options specified? */
72 if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
73 ICE_TC_FLWR_FIELD_PPP_PROTO))
74 lkups_cnt++;
75
76 /* are IPv[4|6] fields specified? */
77 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
78 ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
79 lkups_cnt++;
80
81 if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
82 lkups_cnt++;
83
84 /* are L2TPv3 options specified? */
85 if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
86 lkups_cnt++;
87
88 /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
89 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
90 ICE_TC_FLWR_FIELD_SRC_L4_PORT))
91 lkups_cnt++;
92
93 return lkups_cnt;
94 }
95
ice_proto_type_from_mac(bool inner)96 static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
97 {
98 return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
99 }
100
ice_proto_type_from_etype(bool inner)101 static enum ice_protocol_type ice_proto_type_from_etype(bool inner)
102 {
103 return inner ? ICE_ETYPE_IL : ICE_ETYPE_OL;
104 }
105
ice_proto_type_from_ipv4(bool inner)106 static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
107 {
108 return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
109 }
110
ice_proto_type_from_ipv6(bool inner)111 static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
112 {
113 return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
114 }
115
ice_proto_type_from_l4_port(u16 ip_proto)116 static enum ice_protocol_type ice_proto_type_from_l4_port(u16 ip_proto)
117 {
118 switch (ip_proto) {
119 case IPPROTO_TCP:
120 return ICE_TCP_IL;
121 case IPPROTO_UDP:
122 return ICE_UDP_ILOS;
123 }
124
125 return 0;
126 }
127
128 static enum ice_protocol_type
ice_proto_type_from_tunnel(enum ice_tunnel_type type)129 ice_proto_type_from_tunnel(enum ice_tunnel_type type)
130 {
131 switch (type) {
132 case TNL_VXLAN:
133 return ICE_VXLAN;
134 case TNL_GENEVE:
135 return ICE_GENEVE;
136 case TNL_GRETAP:
137 return ICE_NVGRE;
138 case TNL_GTPU:
139 /* NO_PAY profiles will not work with GTP-U */
140 return ICE_GTP;
141 case TNL_GTPC:
142 return ICE_GTP_NO_PAY;
143 case TNL_PFCP:
144 return ICE_PFCP;
145 default:
146 return 0;
147 }
148 }
149
150 static enum ice_sw_tunnel_type
ice_sw_type_from_tunnel(enum ice_tunnel_type type)151 ice_sw_type_from_tunnel(enum ice_tunnel_type type)
152 {
153 switch (type) {
154 case TNL_VXLAN:
155 return ICE_SW_TUN_VXLAN;
156 case TNL_GENEVE:
157 return ICE_SW_TUN_GENEVE;
158 case TNL_GRETAP:
159 return ICE_SW_TUN_NVGRE;
160 case TNL_GTPU:
161 return ICE_SW_TUN_GTPU;
162 case TNL_GTPC:
163 return ICE_SW_TUN_GTPC;
164 case TNL_PFCP:
165 return ICE_SW_TUN_PFCP;
166 default:
167 return ICE_NON_TUN;
168 }
169 }
170
ice_check_supported_vlan_tpid(u16 vlan_tpid)171 static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
172 {
173 switch (vlan_tpid) {
174 case ETH_P_8021Q:
175 case ETH_P_8021AD:
176 case ETH_P_QINQ1:
177 return vlan_tpid;
178 default:
179 return 0;
180 }
181 }
182
183 static int
ice_tc_fill_tunnel_outer(u32 flags,struct ice_tc_flower_fltr * fltr,struct ice_adv_lkup_elem * list,int i)184 ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
185 struct ice_adv_lkup_elem *list, int i)
186 {
187 struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
188
189 if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
190 u32 tenant_id;
191
192 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
193 switch (fltr->tunnel_type) {
194 case TNL_VXLAN:
195 case TNL_GENEVE:
196 tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
197 list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
198 memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
199 i++;
200 break;
201 case TNL_GRETAP:
202 list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
203 memcpy(&list[i].m_u.nvgre_hdr.tni_flow,
204 "\xff\xff\xff\xff", 4);
205 i++;
206 break;
207 case TNL_GTPC:
208 case TNL_GTPU:
209 list[i].h_u.gtp_hdr.teid = fltr->tenant_id;
210 memcpy(&list[i].m_u.gtp_hdr.teid,
211 "\xff\xff\xff\xff", 4);
212 i++;
213 break;
214 default:
215 break;
216 }
217 }
218
219 if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC) {
220 list[i].type = ice_proto_type_from_mac(false);
221 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
222 hdr->l2_key.dst_mac);
223 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
224 hdr->l2_mask.dst_mac);
225 i++;
226 }
227
228 if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
229 list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
230
231 if (fltr->gtp_pdu_info_masks.pdu_type) {
232 list[i].h_u.gtp_hdr.pdu_type =
233 fltr->gtp_pdu_info_keys.pdu_type << 4;
234 memcpy(&list[i].m_u.gtp_hdr.pdu_type, "\xf0", 1);
235 }
236
237 if (fltr->gtp_pdu_info_masks.qfi) {
238 list[i].h_u.gtp_hdr.qfi = fltr->gtp_pdu_info_keys.qfi;
239 memcpy(&list[i].m_u.gtp_hdr.qfi, "\x3f", 1);
240 }
241
242 i++;
243 }
244
245 if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
246 struct ice_pfcp_hdr *hdr_h, *hdr_m;
247
248 hdr_h = &list[i].h_u.pfcp_hdr;
249 hdr_m = &list[i].m_u.pfcp_hdr;
250 list[i].type = ICE_PFCP;
251
252 hdr_h->flags = fltr->pfcp_meta_keys.type;
253 hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
254
255 hdr_h->seid = fltr->pfcp_meta_keys.seid;
256 hdr_m->seid = fltr->pfcp_meta_masks.seid;
257
258 i++;
259 }
260
261 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
262 ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
263 list[i].type = ice_proto_type_from_ipv4(false);
264
265 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
266 list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
267 list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
268 }
269 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
270 list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
271 list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
272 }
273 i++;
274 }
275
276 if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
277 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
278 list[i].type = ice_proto_type_from_ipv6(false);
279
280 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
281 memcpy(&list[i].h_u.ipv6_hdr.src_addr,
282 &hdr->l3_key.src_ipv6_addr,
283 sizeof(hdr->l3_key.src_ipv6_addr));
284 memcpy(&list[i].m_u.ipv6_hdr.src_addr,
285 &hdr->l3_mask.src_ipv6_addr,
286 sizeof(hdr->l3_mask.src_ipv6_addr));
287 }
288 if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
289 memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
290 &hdr->l3_key.dst_ipv6_addr,
291 sizeof(hdr->l3_key.dst_ipv6_addr));
292 memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
293 &hdr->l3_mask.dst_ipv6_addr,
294 sizeof(hdr->l3_mask.dst_ipv6_addr));
295 }
296 i++;
297 }
298
299 if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
300 (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
301 ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
302 list[i].type = ice_proto_type_from_ipv4(false);
303
304 if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
305 list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
306 list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
307 }
308
309 if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
310 list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
311 list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
312 }
313
314 i++;
315 }
316
317 if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
318 (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
319 ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
320 struct ice_ipv6_hdr *hdr_h, *hdr_m;
321
322 hdr_h = &list[i].h_u.ipv6_hdr;
323 hdr_m = &list[i].m_u.ipv6_hdr;
324 list[i].type = ice_proto_type_from_ipv6(false);
325
326 if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
327 be32p_replace_bits(&hdr_h->be_ver_tc_flow,
328 hdr->l3_key.tos,
329 ICE_IPV6_HDR_TC_MASK);
330 be32p_replace_bits(&hdr_m->be_ver_tc_flow,
331 hdr->l3_mask.tos,
332 ICE_IPV6_HDR_TC_MASK);
333 }
334
335 if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
336 hdr_h->hop_limit = hdr->l3_key.ttl;
337 hdr_m->hop_limit = hdr->l3_mask.ttl;
338 }
339
340 i++;
341 }
342
343 if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
344 hdr->l3_key.ip_proto == IPPROTO_UDP) {
345 list[i].type = ICE_UDP_OF;
346 list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
347 list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
348 i++;
349 }
350
351 /* always fill matching on tunneled packets in metadata */
352 ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
353
354 return i;
355 }
356
357 /**
358 * ice_tc_fill_rules - fill filter rules based on TC fltr
359 * @hw: pointer to HW structure
360 * @flags: tc flower field flags
361 * @tc_fltr: pointer to TC flower filter
362 * @list: list of advance rule elements
363 * @rule_info: pointer to information about rule
364 * @l4_proto: pointer to information such as L4 proto type
365 *
366 * Fill ice_adv_lkup_elem list based on TC flower flags and
367 * TC flower headers. This list should be used to add
368 * advance filter in hardware.
369 */
370 static int
ice_tc_fill_rules(struct ice_hw * hw,u32 flags,struct ice_tc_flower_fltr * tc_fltr,struct ice_adv_lkup_elem * list,struct ice_adv_rule_info * rule_info,u16 * l4_proto)371 ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
372 struct ice_tc_flower_fltr *tc_fltr,
373 struct ice_adv_lkup_elem *list,
374 struct ice_adv_rule_info *rule_info,
375 u16 *l4_proto)
376 {
377 struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
378 bool inner = false;
379 u16 vlan_tpid = 0;
380 int i = 1; /* 0th lookup is metadata */
381
382 rule_info->vlan_type = vlan_tpid;
383
384 /* Always add direction metadata */
385 ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
386
387 if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
388 ice_rule_add_src_vsi_metadata(&list[i]);
389 i++;
390 }
391
392 rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
393 if (tc_fltr->tunnel_type != TNL_LAST) {
394 i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
395
396 /* PFCP is considered non-tunneled - don't swap headers. */
397 if (tc_fltr->tunnel_type != TNL_PFCP) {
398 headers = &tc_fltr->inner_headers;
399 inner = true;
400 }
401 }
402
403 if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
404 list[i].type = ice_proto_type_from_etype(inner);
405 list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
406 list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
407 i++;
408 }
409
410 if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
411 ICE_TC_FLWR_FIELD_SRC_MAC)) {
412 struct ice_tc_l2_hdr *l2_key, *l2_mask;
413
414 l2_key = &headers->l2_key;
415 l2_mask = &headers->l2_mask;
416
417 list[i].type = ice_proto_type_from_mac(inner);
418 if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
419 ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
420 l2_key->dst_mac);
421 ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
422 l2_mask->dst_mac);
423 }
424 if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
425 ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
426 l2_key->src_mac);
427 ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
428 l2_mask->src_mac);
429 }
430 i++;
431 }
432
433 /* copy VLAN info */
434 if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
435 if (flags & ICE_TC_FLWR_FIELD_CVLAN)
436 list[i].type = ICE_VLAN_EX;
437 else
438 list[i].type = ICE_VLAN_OFOS;
439
440 if (flags & ICE_TC_FLWR_FIELD_VLAN) {
441 list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
442 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
443 }
444
445 if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
446 if (flags & ICE_TC_FLWR_FIELD_VLAN) {
447 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
448 } else {
449 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
450 list[i].h_u.vlan_hdr.vlan = 0;
451 }
452 list[i].h_u.vlan_hdr.vlan |=
453 headers->vlan_hdr.vlan_prio;
454 }
455
456 i++;
457 }
458
459 if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID) {
460 vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
461 rule_info->vlan_type =
462 ice_check_supported_vlan_tpid(vlan_tpid);
463
464 ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
465 }
466
467 if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
468 list[i].type = ICE_VLAN_IN;
469
470 if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
471 list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
472 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
473 }
474
475 if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
476 if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
477 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
478 } else {
479 list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
480 list[i].h_u.vlan_hdr.vlan = 0;
481 }
482 list[i].h_u.vlan_hdr.vlan |=
483 headers->cvlan_hdr.vlan_prio;
484 }
485
486 i++;
487 }
488
489 if (flags & (ICE_TC_FLWR_FIELD_PPPOE_SESSID |
490 ICE_TC_FLWR_FIELD_PPP_PROTO)) {
491 struct ice_pppoe_hdr *vals, *masks;
492
493 vals = &list[i].h_u.pppoe_hdr;
494 masks = &list[i].m_u.pppoe_hdr;
495
496 list[i].type = ICE_PPPOE;
497
498 if (flags & ICE_TC_FLWR_FIELD_PPPOE_SESSID) {
499 vals->session_id = headers->pppoe_hdr.session_id;
500 masks->session_id = cpu_to_be16(0xFFFF);
501 }
502
503 if (flags & ICE_TC_FLWR_FIELD_PPP_PROTO) {
504 vals->ppp_prot_id = headers->pppoe_hdr.ppp_proto;
505 masks->ppp_prot_id = cpu_to_be16(0xFFFF);
506 }
507
508 i++;
509 }
510
511 /* copy L3 (IPv[4|6]: src, dest) address */
512 if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
513 ICE_TC_FLWR_FIELD_SRC_IPV4)) {
514 struct ice_tc_l3_hdr *l3_key, *l3_mask;
515
516 list[i].type = ice_proto_type_from_ipv4(inner);
517 l3_key = &headers->l3_key;
518 l3_mask = &headers->l3_mask;
519 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
520 list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
521 list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
522 }
523 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
524 list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
525 list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
526 }
527 i++;
528 } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
529 ICE_TC_FLWR_FIELD_SRC_IPV6)) {
530 struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
531 struct ice_tc_l3_hdr *l3_key, *l3_mask;
532
533 list[i].type = ice_proto_type_from_ipv6(inner);
534 ipv6_hdr = &list[i].h_u.ipv6_hdr;
535 ipv6_mask = &list[i].m_u.ipv6_hdr;
536 l3_key = &headers->l3_key;
537 l3_mask = &headers->l3_mask;
538
539 if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
540 memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
541 sizeof(l3_key->dst_ipv6_addr));
542 memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
543 sizeof(l3_mask->dst_ipv6_addr));
544 }
545 if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
546 memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
547 sizeof(l3_key->src_ipv6_addr));
548 memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
549 sizeof(l3_mask->src_ipv6_addr));
550 }
551 i++;
552 }
553
554 if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
555 (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
556 list[i].type = ice_proto_type_from_ipv4(inner);
557
558 if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
559 list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
560 list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
561 }
562
563 if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
564 list[i].h_u.ipv4_hdr.time_to_live =
565 headers->l3_key.ttl;
566 list[i].m_u.ipv4_hdr.time_to_live =
567 headers->l3_mask.ttl;
568 }
569
570 i++;
571 }
572
573 if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
574 (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
575 struct ice_ipv6_hdr *hdr_h, *hdr_m;
576
577 hdr_h = &list[i].h_u.ipv6_hdr;
578 hdr_m = &list[i].m_u.ipv6_hdr;
579 list[i].type = ice_proto_type_from_ipv6(inner);
580
581 if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
582 be32p_replace_bits(&hdr_h->be_ver_tc_flow,
583 headers->l3_key.tos,
584 ICE_IPV6_HDR_TC_MASK);
585 be32p_replace_bits(&hdr_m->be_ver_tc_flow,
586 headers->l3_mask.tos,
587 ICE_IPV6_HDR_TC_MASK);
588 }
589
590 if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
591 hdr_h->hop_limit = headers->l3_key.ttl;
592 hdr_m->hop_limit = headers->l3_mask.ttl;
593 }
594
595 i++;
596 }
597
598 if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
599 list[i].type = ICE_L2TPV3;
600
601 list[i].h_u.l2tpv3_sess_hdr.session_id =
602 headers->l2tpv3_hdr.session_id;
603 list[i].m_u.l2tpv3_sess_hdr.session_id =
604 cpu_to_be32(0xFFFFFFFF);
605
606 i++;
607 }
608
609 /* copy L4 (src, dest) port */
610 if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
611 ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
612 struct ice_tc_l4_hdr *l4_key, *l4_mask;
613
614 list[i].type = ice_proto_type_from_l4_port(headers->l3_key.ip_proto);
615 l4_key = &headers->l4_key;
616 l4_mask = &headers->l4_mask;
617
618 if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
619 list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
620 list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
621 }
622 if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
623 list[i].h_u.l4_hdr.src_port = l4_key->src_port;
624 list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
625 }
626 i++;
627 }
628
629 return i;
630 }
631
632 /**
633 * ice_tc_tun_get_type - get the tunnel type
634 * @tunnel_dev: ptr to tunnel device
635 *
636 * This function detects appropriate tunnel_type if specified device is
637 * tunnel device such as VXLAN/Geneve
638 */
ice_tc_tun_get_type(struct net_device * tunnel_dev)639 static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
640 {
641 if (netif_is_vxlan(tunnel_dev))
642 return TNL_VXLAN;
643 if (netif_is_geneve(tunnel_dev))
644 return TNL_GENEVE;
645 if (netif_is_gretap(tunnel_dev) ||
646 netif_is_ip6gretap(tunnel_dev))
647 return TNL_GRETAP;
648
649 /* Assume GTP-U by default in case of GTP netdev.
650 * GTP-C may be selected later, based on enc_dst_port.
651 */
652 if (netif_is_gtp(tunnel_dev))
653 return TNL_GTPU;
654 if (netif_is_pfcp(tunnel_dev))
655 return TNL_PFCP;
656 return TNL_LAST;
657 }
658
ice_is_tunnel_supported(struct net_device * dev)659 bool ice_is_tunnel_supported(struct net_device *dev)
660 {
661 return ice_tc_tun_get_type(dev) != TNL_LAST;
662 }
663
ice_tc_is_dev_uplink(struct net_device * dev)664 static bool ice_tc_is_dev_uplink(struct net_device *dev)
665 {
666 return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
667 }
668
ice_tc_setup_action(struct net_device * filter_dev,struct ice_tc_flower_fltr * fltr,struct net_device * target_dev,enum ice_sw_fwd_act_type action)669 static int ice_tc_setup_action(struct net_device *filter_dev,
670 struct ice_tc_flower_fltr *fltr,
671 struct net_device *target_dev,
672 enum ice_sw_fwd_act_type action)
673 {
674 struct ice_repr *repr;
675
676 if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) {
677 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided");
678 return -EINVAL;
679 }
680
681 fltr->action.fltr_act = action;
682
683 if (ice_is_port_repr_netdev(filter_dev) &&
684 ice_is_port_repr_netdev(target_dev) &&
685 fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
686 repr = ice_netdev_to_repr(target_dev);
687
688 fltr->dest_vsi = repr->src_vsi;
689 } else if (ice_is_port_repr_netdev(filter_dev) &&
690 ice_tc_is_dev_uplink(target_dev) &&
691 fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
692 repr = ice_netdev_to_repr(filter_dev);
693
694 fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
695 } else if (ice_tc_is_dev_uplink(filter_dev) &&
696 ice_is_port_repr_netdev(target_dev) &&
697 fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
698 repr = ice_netdev_to_repr(target_dev);
699
700 fltr->dest_vsi = repr->src_vsi;
701 } else {
702 NL_SET_ERR_MSG_MOD(fltr->extack,
703 "The action is not supported for this netdevice");
704 return -EINVAL;
705 }
706
707 return 0;
708 }
709
710 static int
ice_tc_setup_drop_action(struct net_device * filter_dev,struct ice_tc_flower_fltr * fltr)711 ice_tc_setup_drop_action(struct net_device *filter_dev,
712 struct ice_tc_flower_fltr *fltr)
713 {
714 fltr->action.fltr_act = ICE_DROP_PACKET;
715
716 if (!ice_tc_is_dev_uplink(filter_dev) &&
717 !(ice_is_port_repr_netdev(filter_dev) &&
718 fltr->direction == ICE_ESWITCH_FLTR_INGRESS)) {
719 NL_SET_ERR_MSG_MOD(fltr->extack,
720 "The action is not supported for this netdevice");
721 return -EINVAL;
722 }
723
724 return 0;
725 }
726
ice_eswitch_tc_parse_action(struct net_device * filter_dev,struct ice_tc_flower_fltr * fltr,struct flow_action_entry * act)727 static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
728 struct ice_tc_flower_fltr *fltr,
729 struct flow_action_entry *act)
730 {
731 int err;
732
733 switch (act->id) {
734 case FLOW_ACTION_DROP:
735 err = ice_tc_setup_drop_action(filter_dev, fltr);
736 if (err)
737 return err;
738
739 break;
740
741 case FLOW_ACTION_REDIRECT:
742 err = ice_tc_setup_action(filter_dev, fltr,
743 act->dev, ICE_FWD_TO_VSI);
744 if (err)
745 return err;
746
747 break;
748
749 case FLOW_ACTION_MIRRED:
750 err = ice_tc_setup_action(filter_dev, fltr,
751 act->dev, ICE_MIRROR_PACKET);
752 if (err)
753 return err;
754
755 break;
756
757 default:
758 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
759 return -EINVAL;
760 }
761
762 return 0;
763 }
764
ice_is_fltr_lldp(struct ice_tc_flower_fltr * fltr)765 static bool ice_is_fltr_lldp(struct ice_tc_flower_fltr *fltr)
766 {
767 return fltr->outer_headers.l2_key.n_proto == htons(ETH_P_LLDP);
768 }
769
ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr * fltr)770 static bool ice_is_fltr_pf_tx_lldp(struct ice_tc_flower_fltr *fltr)
771 {
772 struct ice_vsi *vsi = fltr->src_vsi, *uplink;
773
774 if (!ice_is_switchdev_running(vsi->back))
775 return false;
776
777 uplink = vsi->back->eswitch.uplink_vsi;
778 return vsi == uplink && fltr->action.fltr_act == ICE_DROP_PACKET &&
779 ice_is_fltr_lldp(fltr) &&
780 fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
781 fltr->flags == ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
782 }
783
ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr * fltr)784 static bool ice_is_fltr_vf_tx_lldp(struct ice_tc_flower_fltr *fltr)
785 {
786 struct ice_vsi *vsi = fltr->src_vsi, *uplink;
787
788 uplink = vsi->back->eswitch.uplink_vsi;
789 return fltr->src_vsi->type == ICE_VSI_VF && ice_is_fltr_lldp(fltr) &&
790 fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
791 fltr->dest_vsi == uplink;
792 }
793
794 static struct ice_tc_flower_fltr *
ice_find_pf_tx_lldp_fltr(struct ice_pf * pf)795 ice_find_pf_tx_lldp_fltr(struct ice_pf *pf)
796 {
797 struct ice_tc_flower_fltr *fltr;
798
799 hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
800 if (ice_is_fltr_pf_tx_lldp(fltr))
801 return fltr;
802
803 return NULL;
804 }
805
ice_any_vf_lldp_tx_ena(struct ice_pf * pf)806 static bool ice_any_vf_lldp_tx_ena(struct ice_pf *pf)
807 {
808 struct ice_vf *vf;
809 unsigned int bkt;
810
811 ice_for_each_vf(pf, bkt, vf)
812 if (vf->lldp_tx_ena)
813 return true;
814
815 return false;
816 }
817
ice_pass_vf_tx_lldp(struct ice_vsi * vsi,bool deinit)818 int ice_pass_vf_tx_lldp(struct ice_vsi *vsi, bool deinit)
819 {
820 struct ice_rule_query_data remove_entry = {
821 .rid = vsi->vf->lldp_recipe_id,
822 .rule_id = vsi->vf->lldp_rule_id,
823 .vsi_handle = vsi->idx,
824 };
825 struct ice_pf *pf = vsi->back;
826 int err;
827
828 if (vsi->vf->lldp_tx_ena)
829 return 0;
830
831 if (!deinit && !ice_find_pf_tx_lldp_fltr(vsi->back))
832 return -EINVAL;
833
834 if (!deinit && ice_any_vf_lldp_tx_ena(pf))
835 return -EINVAL;
836
837 err = ice_rem_adv_rule_by_id(&pf->hw, &remove_entry);
838 if (!err)
839 vsi->vf->lldp_tx_ena = true;
840
841 return err;
842 }
843
ice_drop_vf_tx_lldp(struct ice_vsi * vsi,bool init)844 int ice_drop_vf_tx_lldp(struct ice_vsi *vsi, bool init)
845 {
846 struct ice_rule_query_data rule_added;
847 struct ice_adv_rule_info rinfo = {
848 .priority = 7,
849 .src_vsi = vsi->idx,
850 .sw_act = {
851 .src = vsi->idx,
852 .flag = ICE_FLTR_TX,
853 .fltr_act = ICE_DROP_PACKET,
854 .vsi_handle = vsi->idx,
855 },
856 .flags_info.act_valid = true,
857 };
858 struct ice_adv_lkup_elem list[3];
859 struct ice_pf *pf = vsi->back;
860 int err;
861
862 if (!init && !vsi->vf->lldp_tx_ena)
863 return 0;
864
865 memset(list, 0, sizeof(list));
866 ice_rule_add_direction_metadata(&list[0]);
867 ice_rule_add_src_vsi_metadata(&list[1]);
868 list[2].type = ICE_ETYPE_OL;
869 list[2].h_u.ethertype.ethtype_id = htons(ETH_P_LLDP);
870 list[2].m_u.ethertype.ethtype_id = htons(0xFFFF);
871
872 err = ice_add_adv_rule(&pf->hw, list, ARRAY_SIZE(list), &rinfo,
873 &rule_added);
874 if (err) {
875 dev_err(&pf->pdev->dev,
876 "Failed to add an LLDP rule to VSI 0x%X: %d\n",
877 vsi->idx, err);
878 } else {
879 vsi->vf->lldp_recipe_id = rule_added.rid;
880 vsi->vf->lldp_rule_id = rule_added.rule_id;
881 vsi->vf->lldp_tx_ena = false;
882 }
883
884 return err;
885 }
886
ice_handle_add_pf_lldp_drop_rule(struct ice_vsi * vsi)887 static void ice_handle_add_pf_lldp_drop_rule(struct ice_vsi *vsi)
888 {
889 struct ice_tc_flower_fltr *fltr;
890 struct ice_pf *pf = vsi->back;
891
892 hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node) {
893 if (!ice_is_fltr_vf_tx_lldp(fltr))
894 continue;
895 ice_pass_vf_tx_lldp(fltr->src_vsi, true);
896 break;
897 }
898 }
899
ice_handle_del_pf_lldp_drop_rule(struct ice_pf * pf)900 static void ice_handle_del_pf_lldp_drop_rule(struct ice_pf *pf)
901 {
902 int i;
903
904 /* Make the VF LLDP fwd to uplink rule dormant */
905 ice_for_each_vsi(pf, i) {
906 struct ice_vsi *vf_vsi = pf->vsi[i];
907
908 if (vf_vsi && vf_vsi->type == ICE_VSI_VF)
909 ice_drop_vf_tx_lldp(vf_vsi, false);
910 }
911 }
912
913 static int
ice_eswitch_add_tc_fltr(struct ice_vsi * vsi,struct ice_tc_flower_fltr * fltr)914 ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
915 {
916 struct ice_adv_rule_info rule_info = { 0 };
917 struct ice_rule_query_data rule_added;
918 struct ice_hw *hw = &vsi->back->hw;
919 struct ice_adv_lkup_elem *list;
920 u32 flags = fltr->flags;
921 int lkups_cnt;
922 int ret;
923 int i;
924
925 if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT) {
926 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
927 return -EOPNOTSUPP;
928 }
929
930 if (ice_is_fltr_vf_tx_lldp(fltr))
931 return ice_pass_vf_tx_lldp(vsi, false);
932
933 lkups_cnt = ice_tc_count_lkups(flags, fltr);
934 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
935 if (!list)
936 return -ENOMEM;
937
938 i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
939 if (i != lkups_cnt) {
940 ret = -EINVAL;
941 goto exit;
942 }
943
944 rule_info.sw_act.fltr_act = fltr->action.fltr_act;
945 if (fltr->action.fltr_act != ICE_DROP_PACKET)
946 rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
947 /* For now, making priority to be highest, and it also becomes
948 * the priority for recipe which will get created as a result of
949 * new extraction sequence based on input set.
950 * Priority '7' is max val for switch recipe, higher the number
951 * results into order of switch rule evaluation.
952 */
953 rule_info.priority = 7;
954 rule_info.flags_info.act_valid = true;
955
956 if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
957 /* Uplink to VF */
958 rule_info.sw_act.flag |= ICE_FLTR_RX;
959 rule_info.sw_act.src = hw->pf_id;
960 rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
961 } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
962 !fltr->dest_vsi && vsi == vsi->back->eswitch.uplink_vsi) {
963 /* PF to Uplink */
964 rule_info.sw_act.flag |= ICE_FLTR_TX;
965 rule_info.sw_act.src = vsi->idx;
966 } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
967 fltr->dest_vsi == vsi->back->eswitch.uplink_vsi) {
968 /* VF to Uplink */
969 rule_info.sw_act.flag |= ICE_FLTR_TX;
970 rule_info.sw_act.src = vsi->idx;
971 rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
972 /* This is a specific case. The destination VSI index is
973 * overwritten by the source VSI index. This type of filter
974 * should allow the packet to go to the LAN, not to the
975 * VSI passed here. It should set LAN_EN bit only. However,
976 * the VSI must be a valid one. Setting source VSI index
977 * here is safe. Even if the result from switch is set LAN_EN
978 * and LB_EN (which normally will pass the packet to this VSI)
979 * packet won't be seen on the VSI, because local loopback is
980 * turned off.
981 */
982 rule_info.sw_act.vsi_handle = vsi->idx;
983 } else {
984 /* VF to VF */
985 rule_info.sw_act.flag |= ICE_FLTR_TX;
986 rule_info.sw_act.src = vsi->idx;
987 rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
988 }
989
990 /* specify the cookie as filter_rule_id */
991 rule_info.fltr_rule_id = fltr->cookie;
992 rule_info.src_vsi = vsi->idx;
993
994 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
995 if (ret == -EEXIST) {
996 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
997 ret = -EINVAL;
998 goto exit;
999 } else if (ret == -ENOSPC) {
1000 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter: insufficient space available.");
1001 goto exit;
1002 } else if (ret) {
1003 NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
1004 goto exit;
1005 }
1006
1007 if (ice_is_fltr_pf_tx_lldp(fltr))
1008 ice_handle_add_pf_lldp_drop_rule(vsi);
1009
1010 /* store the output params, which are needed later for removing
1011 * advanced switch filter
1012 */
1013 fltr->rid = rule_added.rid;
1014 fltr->rule_id = rule_added.rule_id;
1015 fltr->dest_vsi_handle = rule_added.vsi_handle;
1016
1017 exit:
1018 kfree(list);
1019 return ret;
1020 }
1021
1022 /**
1023 * ice_locate_vsi_using_queue - locate VSI using queue (forward to queue action)
1024 * @vsi: Pointer to VSI
1025 * @queue: Queue index
1026 *
1027 * Locate the VSI using specified "queue". When ADQ is not enabled,
1028 * always return input VSI, otherwise locate corresponding
1029 * VSI based on per channel "offset" and "qcount"
1030 */
1031 struct ice_vsi *
ice_locate_vsi_using_queue(struct ice_vsi * vsi,int queue)1032 ice_locate_vsi_using_queue(struct ice_vsi *vsi, int queue)
1033 {
1034 int num_tc, tc;
1035
1036 /* if ADQ is not active, passed VSI is the candidate VSI */
1037 if (!ice_is_adq_active(vsi->back))
1038 return vsi;
1039
1040 /* Locate the VSI (it could still be main PF VSI or CHNL_VSI depending
1041 * upon queue number)
1042 */
1043 num_tc = vsi->mqprio_qopt.qopt.num_tc;
1044
1045 for (tc = 0; tc < num_tc; tc++) {
1046 int qcount = vsi->mqprio_qopt.qopt.count[tc];
1047 int offset = vsi->mqprio_qopt.qopt.offset[tc];
1048
1049 if (queue >= offset && queue < offset + qcount) {
1050 /* for non-ADQ TCs, passed VSI is the candidate VSI */
1051 if (tc < ICE_CHNL_START_TC)
1052 return vsi;
1053 else
1054 return vsi->tc_map_vsi[tc];
1055 }
1056 }
1057 return NULL;
1058 }
1059
1060 static struct ice_rx_ring *
ice_locate_rx_ring_using_queue(struct ice_vsi * vsi,struct ice_tc_flower_fltr * tc_fltr)1061 ice_locate_rx_ring_using_queue(struct ice_vsi *vsi,
1062 struct ice_tc_flower_fltr *tc_fltr)
1063 {
1064 u16 queue = tc_fltr->action.fwd.q.queue;
1065
1066 return queue < vsi->num_rxq ? vsi->rx_rings[queue] : NULL;
1067 }
1068
1069 /**
1070 * ice_tc_forward_action - Determine destination VSI and queue for the action
1071 * @vsi: Pointer to VSI
1072 * @tc_fltr: Pointer to TC flower filter structure
1073 *
1074 * Validates the tc forward action and determines the destination VSI and queue
1075 * for the forward action.
1076 */
1077 static struct ice_vsi *
ice_tc_forward_action(struct ice_vsi * vsi,struct ice_tc_flower_fltr * tc_fltr)1078 ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
1079 {
1080 struct ice_rx_ring *ring = NULL;
1081 struct ice_vsi *dest_vsi = NULL;
1082 struct ice_pf *pf = vsi->back;
1083 struct device *dev;
1084 u32 tc_class;
1085 int q;
1086
1087 dev = ice_pf_to_dev(pf);
1088
1089 /* Get the destination VSI and/or destination queue and validate them */
1090 switch (tc_fltr->action.fltr_act) {
1091 case ICE_FWD_TO_VSI:
1092 tc_class = tc_fltr->action.fwd.tc.tc_class;
1093 /* Select the destination VSI */
1094 if (tc_class < ICE_CHNL_START_TC) {
1095 NL_SET_ERR_MSG_MOD(tc_fltr->extack,
1096 "Unable to add filter because of unsupported destination");
1097 return ERR_PTR(-EOPNOTSUPP);
1098 }
1099 /* Locate ADQ VSI depending on hw_tc number */
1100 dest_vsi = vsi->tc_map_vsi[tc_class];
1101 break;
1102 case ICE_FWD_TO_Q:
1103 /* Locate the Rx queue */
1104 ring = ice_locate_rx_ring_using_queue(vsi, tc_fltr);
1105 if (!ring) {
1106 dev_err(dev,
1107 "Unable to locate Rx queue for action fwd_to_queue: %u\n",
1108 tc_fltr->action.fwd.q.queue);
1109 return ERR_PTR(-EINVAL);
1110 }
1111 /* Determine destination VSI even though the action is
1112 * FWD_TO_QUEUE, because QUEUE is associated with VSI
1113 */
1114 q = tc_fltr->action.fwd.q.queue;
1115 dest_vsi = ice_locate_vsi_using_queue(vsi, q);
1116 break;
1117 default:
1118 dev_err(dev,
1119 "Unable to add filter because of unsupported action %u (supported actions: fwd to tc, fwd to queue)\n",
1120 tc_fltr->action.fltr_act);
1121 return ERR_PTR(-EINVAL);
1122 }
1123 /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */
1124 if (!dest_vsi) {
1125 dev_err(dev,
1126 "Unable to add filter because specified destination VSI doesn't exist\n");
1127 return ERR_PTR(-EINVAL);
1128 }
1129 return dest_vsi;
1130 }
1131
1132 /**
1133 * ice_add_tc_flower_adv_fltr - add appropriate filter rules
1134 * @vsi: Pointer to VSI
1135 * @tc_fltr: Pointer to TC flower filter structure
1136 *
1137 * based on filter parameters using Advance recipes supported
1138 * by OS package.
1139 */
1140 static int
ice_add_tc_flower_adv_fltr(struct ice_vsi * vsi,struct ice_tc_flower_fltr * tc_fltr)1141 ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
1142 struct ice_tc_flower_fltr *tc_fltr)
1143 {
1144 struct ice_adv_rule_info rule_info = {0};
1145 struct ice_rule_query_data rule_added;
1146 struct ice_adv_lkup_elem *list;
1147 struct ice_pf *pf = vsi->back;
1148 struct ice_hw *hw = &pf->hw;
1149 u32 flags = tc_fltr->flags;
1150 struct ice_vsi *dest_vsi;
1151 struct device *dev;
1152 u16 lkups_cnt = 0;
1153 u16 l4_proto = 0;
1154 int ret = 0;
1155 u16 i = 0;
1156
1157 dev = ice_pf_to_dev(pf);
1158 if (ice_is_safe_mode(pf)) {
1159 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
1160 return -EOPNOTSUPP;
1161 }
1162
1163 if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
1164 ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
1165 ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
1166 ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
1167 ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
1168 NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
1169 return -EOPNOTSUPP;
1170 }
1171
1172 /* validate forwarding action VSI and queue */
1173 if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
1174 dest_vsi = ice_tc_forward_action(vsi, tc_fltr);
1175 if (IS_ERR(dest_vsi))
1176 return PTR_ERR(dest_vsi);
1177 }
1178
1179 lkups_cnt = ice_tc_count_lkups(flags, tc_fltr);
1180 list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
1181 if (!list)
1182 return -ENOMEM;
1183
1184 i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
1185 if (i != lkups_cnt) {
1186 ret = -EINVAL;
1187 goto exit;
1188 }
1189
1190 rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
1191 /* specify the cookie as filter_rule_id */
1192 rule_info.fltr_rule_id = tc_fltr->cookie;
1193
1194 switch (tc_fltr->action.fltr_act) {
1195 case ICE_FWD_TO_VSI:
1196 rule_info.sw_act.vsi_handle = dest_vsi->idx;
1197 rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
1198 rule_info.sw_act.src = hw->pf_id;
1199 dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
1200 tc_fltr->action.fwd.tc.tc_class,
1201 rule_info.sw_act.vsi_handle, lkups_cnt);
1202 break;
1203 case ICE_FWD_TO_Q:
1204 /* HW queue number in global space */
1205 rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
1206 rule_info.sw_act.vsi_handle = dest_vsi->idx;
1207 rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
1208 rule_info.sw_act.src = hw->pf_id;
1209 dev_dbg(dev, "add switch rule action to forward to queue:%u (HW queue %u), lkups_cnt:%u\n",
1210 tc_fltr->action.fwd.q.queue,
1211 tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
1212 break;
1213 case ICE_DROP_PACKET:
1214 if (tc_fltr->direction == ICE_ESWITCH_FLTR_EGRESS) {
1215 rule_info.sw_act.flag |= ICE_FLTR_TX;
1216 rule_info.sw_act.src = vsi->idx;
1217 } else {
1218 rule_info.sw_act.flag |= ICE_FLTR_RX;
1219 rule_info.sw_act.src = hw->pf_id;
1220 }
1221 rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
1222 break;
1223 default:
1224 ret = -EOPNOTSUPP;
1225 goto exit;
1226 }
1227
1228 ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
1229 if (ret == -EEXIST) {
1230 NL_SET_ERR_MSG_MOD(tc_fltr->extack,
1231 "Unable to add filter because it already exist");
1232 ret = -EINVAL;
1233 goto exit;
1234 } else if (ret == -ENOSPC) {
1235 NL_SET_ERR_MSG_MOD(tc_fltr->extack,
1236 "Unable to add filter: insufficient space available.");
1237 goto exit;
1238 } else if (ret) {
1239 NL_SET_ERR_MSG_MOD(tc_fltr->extack,
1240 "Unable to add filter due to error");
1241 goto exit;
1242 }
1243
1244 /* store the output params, which are needed later for removing
1245 * advanced switch filter
1246 */
1247 tc_fltr->rid = rule_added.rid;
1248 tc_fltr->rule_id = rule_added.rule_id;
1249 tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
1250 if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
1251 tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
1252 tc_fltr->dest_vsi = dest_vsi;
1253 /* keep track of advanced switch filter for
1254 * destination VSI
1255 */
1256 dest_vsi->num_chnl_fltr++;
1257
1258 /* keeps track of channel filters for PF VSI */
1259 if (vsi->type == ICE_VSI_PF &&
1260 (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1261 ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
1262 pf->num_dmac_chnl_fltrs++;
1263 }
1264 switch (tc_fltr->action.fltr_act) {
1265 case ICE_FWD_TO_VSI:
1266 dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to TC %u, rid %u, rule_id %u, vsi_idx %u\n",
1267 lkups_cnt, flags,
1268 tc_fltr->action.fwd.tc.tc_class, rule_added.rid,
1269 rule_added.rule_id, rule_added.vsi_handle);
1270 break;
1271 case ICE_FWD_TO_Q:
1272 dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is forward to queue: %u (HW queue %u) , rid %u, rule_id %u\n",
1273 lkups_cnt, flags, tc_fltr->action.fwd.q.queue,
1274 tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
1275 rule_added.rule_id);
1276 break;
1277 case ICE_DROP_PACKET:
1278 dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
1279 lkups_cnt, flags, rule_added.rid, rule_added.rule_id);
1280 break;
1281 default:
1282 break;
1283 }
1284 exit:
1285 kfree(list);
1286 return ret;
1287 }
1288
1289 /**
1290 * ice_tc_set_pppoe - Parse PPPoE fields from TC flower filter
1291 * @match: Pointer to flow match structure
1292 * @fltr: Pointer to filter structure
1293 * @headers: Pointer to outer header fields
1294 * @returns PPP protocol used in filter (ppp_ses or ppp_disc)
1295 */
1296 static u16
ice_tc_set_pppoe(struct flow_match_pppoe * match,struct ice_tc_flower_fltr * fltr,struct ice_tc_flower_lyr_2_4_hdrs * headers)1297 ice_tc_set_pppoe(struct flow_match_pppoe *match,
1298 struct ice_tc_flower_fltr *fltr,
1299 struct ice_tc_flower_lyr_2_4_hdrs *headers)
1300 {
1301 if (match->mask->session_id) {
1302 fltr->flags |= ICE_TC_FLWR_FIELD_PPPOE_SESSID;
1303 headers->pppoe_hdr.session_id = match->key->session_id;
1304 }
1305
1306 if (match->mask->ppp_proto) {
1307 fltr->flags |= ICE_TC_FLWR_FIELD_PPP_PROTO;
1308 headers->pppoe_hdr.ppp_proto = match->key->ppp_proto;
1309 }
1310
1311 return be16_to_cpu(match->key->type);
1312 }
1313
1314 /**
1315 * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
1316 * @match: Pointer to flow match structure
1317 * @fltr: Pointer to filter structure
1318 * @headers: inner or outer header fields
1319 * @is_encap: set true for tunnel IPv4 address
1320 */
1321 static int
ice_tc_set_ipv4(struct flow_match_ipv4_addrs * match,struct ice_tc_flower_fltr * fltr,struct ice_tc_flower_lyr_2_4_hdrs * headers,bool is_encap)1322 ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
1323 struct ice_tc_flower_fltr *fltr,
1324 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1325 {
1326 if (match->key->dst) {
1327 if (is_encap)
1328 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
1329 else
1330 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
1331 headers->l3_key.dst_ipv4 = match->key->dst;
1332 headers->l3_mask.dst_ipv4 = match->mask->dst;
1333 }
1334 if (match->key->src) {
1335 if (is_encap)
1336 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
1337 else
1338 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
1339 headers->l3_key.src_ipv4 = match->key->src;
1340 headers->l3_mask.src_ipv4 = match->mask->src;
1341 }
1342 return 0;
1343 }
1344
1345 /**
1346 * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
1347 * @match: Pointer to flow match structure
1348 * @fltr: Pointer to filter structure
1349 * @headers: inner or outer header fields
1350 * @is_encap: set true for tunnel IPv6 address
1351 */
1352 static int
ice_tc_set_ipv6(struct flow_match_ipv6_addrs * match,struct ice_tc_flower_fltr * fltr,struct ice_tc_flower_lyr_2_4_hdrs * headers,bool is_encap)1353 ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
1354 struct ice_tc_flower_fltr *fltr,
1355 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1356 {
1357 struct ice_tc_l3_hdr *l3_key, *l3_mask;
1358
1359 /* src and dest IPV6 address should not be LOOPBACK
1360 * (0:0:0:0:0:0:0:1), which can be represented as ::1
1361 */
1362 if (ipv6_addr_loopback(&match->key->dst) ||
1363 ipv6_addr_loopback(&match->key->src)) {
1364 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
1365 return -EINVAL;
1366 }
1367 /* if src/dest IPv6 address is *,* error */
1368 if (ipv6_addr_any(&match->mask->dst) &&
1369 ipv6_addr_any(&match->mask->src)) {
1370 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
1371 return -EINVAL;
1372 }
1373 if (!ipv6_addr_any(&match->mask->dst)) {
1374 if (is_encap)
1375 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
1376 else
1377 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
1378 }
1379 if (!ipv6_addr_any(&match->mask->src)) {
1380 if (is_encap)
1381 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
1382 else
1383 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
1384 }
1385
1386 l3_key = &headers->l3_key;
1387 l3_mask = &headers->l3_mask;
1388
1389 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
1390 ICE_TC_FLWR_FIELD_SRC_IPV6)) {
1391 memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
1392 sizeof(match->key->src.s6_addr));
1393 memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
1394 sizeof(match->mask->src.s6_addr));
1395 }
1396 if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
1397 ICE_TC_FLWR_FIELD_DEST_IPV6)) {
1398 memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
1399 sizeof(match->key->dst.s6_addr));
1400 memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
1401 sizeof(match->mask->dst.s6_addr));
1402 }
1403
1404 return 0;
1405 }
1406
1407 /**
1408 * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
1409 * @match: Pointer to flow match structure
1410 * @fltr: Pointer to filter structure
1411 * @headers: inner or outer header fields
1412 * @is_encap: set true for tunnel
1413 */
1414 static void
ice_tc_set_tos_ttl(struct flow_match_ip * match,struct ice_tc_flower_fltr * fltr,struct ice_tc_flower_lyr_2_4_hdrs * headers,bool is_encap)1415 ice_tc_set_tos_ttl(struct flow_match_ip *match,
1416 struct ice_tc_flower_fltr *fltr,
1417 struct ice_tc_flower_lyr_2_4_hdrs *headers,
1418 bool is_encap)
1419 {
1420 if (match->mask->tos) {
1421 if (is_encap)
1422 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
1423 else
1424 fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
1425
1426 headers->l3_key.tos = match->key->tos;
1427 headers->l3_mask.tos = match->mask->tos;
1428 }
1429
1430 if (match->mask->ttl) {
1431 if (is_encap)
1432 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
1433 else
1434 fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
1435
1436 headers->l3_key.ttl = match->key->ttl;
1437 headers->l3_mask.ttl = match->mask->ttl;
1438 }
1439 }
1440
1441 /**
1442 * ice_tc_set_port - Parse ports from TC flower filter
1443 * @match: Flow match structure
1444 * @fltr: Pointer to filter structure
1445 * @headers: inner or outer header fields
1446 * @is_encap: set true for tunnel port
1447 */
1448 static int
ice_tc_set_port(struct flow_match_ports match,struct ice_tc_flower_fltr * fltr,struct ice_tc_flower_lyr_2_4_hdrs * headers,bool is_encap)1449 ice_tc_set_port(struct flow_match_ports match,
1450 struct ice_tc_flower_fltr *fltr,
1451 struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
1452 {
1453 if (match.key->dst) {
1454 if (is_encap)
1455 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
1456 else
1457 fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
1458
1459 headers->l4_key.dst_port = match.key->dst;
1460 headers->l4_mask.dst_port = match.mask->dst;
1461 }
1462 if (match.key->src) {
1463 if (is_encap)
1464 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
1465 else
1466 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
1467
1468 headers->l4_key.src_port = match.key->src;
1469 headers->l4_mask.src_port = match.mask->src;
1470 }
1471 return 0;
1472 }
1473
1474 static struct net_device *
ice_get_tunnel_device(struct net_device * dev,struct flow_rule * rule)1475 ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
1476 {
1477 struct flow_action_entry *act;
1478 int i;
1479
1480 if (ice_is_tunnel_supported(dev))
1481 return dev;
1482
1483 flow_action_for_each(i, act, &rule->action) {
1484 if (act->id == FLOW_ACTION_REDIRECT &&
1485 ice_is_tunnel_supported(act->dev))
1486 return act->dev;
1487 }
1488
1489 return NULL;
1490 }
1491
1492 /**
1493 * ice_parse_gtp_type - Sets GTP tunnel type to GTP-U or GTP-C
1494 * @match: Flow match structure
1495 * @fltr: Pointer to filter structure
1496 *
1497 * GTP-C/GTP-U is selected based on destination port number (enc_dst_port).
1498 * Before calling this funtcion, fltr->tunnel_type should be set to TNL_GTPU,
1499 * therefore making GTP-U the default choice (when destination port number is
1500 * not specified).
1501 */
1502 static int
ice_parse_gtp_type(struct flow_match_ports match,struct ice_tc_flower_fltr * fltr)1503 ice_parse_gtp_type(struct flow_match_ports match,
1504 struct ice_tc_flower_fltr *fltr)
1505 {
1506 u16 dst_port;
1507
1508 if (match.key->dst) {
1509 dst_port = be16_to_cpu(match.key->dst);
1510
1511 switch (dst_port) {
1512 case 2152:
1513 break;
1514 case 2123:
1515 fltr->tunnel_type = TNL_GTPC;
1516 break;
1517 default:
1518 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported GTP port number");
1519 return -EINVAL;
1520 }
1521 }
1522
1523 return 0;
1524 }
1525
1526 static int
ice_parse_tunnel_attr(struct net_device * dev,struct flow_rule * rule,struct ice_tc_flower_fltr * fltr)1527 ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
1528 struct ice_tc_flower_fltr *fltr)
1529 {
1530 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1531 struct netlink_ext_ack *extack = fltr->extack;
1532 struct flow_match_control enc_control;
1533
1534 fltr->tunnel_type = ice_tc_tun_get_type(dev);
1535 headers->l3_key.ip_proto = IPPROTO_UDP;
1536
1537 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1538 struct flow_match_enc_keyid enc_keyid;
1539
1540 flow_rule_match_enc_keyid(rule, &enc_keyid);
1541
1542 if (!enc_keyid.mask->keyid ||
1543 enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
1544 return -EINVAL;
1545
1546 fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
1547 fltr->tenant_id = enc_keyid.key->keyid;
1548 }
1549
1550 flow_rule_match_enc_control(rule, &enc_control);
1551
1552 if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack))
1553 return -EOPNOTSUPP;
1554
1555 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1556 struct flow_match_ipv4_addrs match;
1557
1558 flow_rule_match_enc_ipv4_addrs(rule, &match);
1559 if (ice_tc_set_ipv4(&match, fltr, headers, true))
1560 return -EINVAL;
1561 } else if (enc_control.key->addr_type ==
1562 FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1563 struct flow_match_ipv6_addrs match;
1564
1565 flow_rule_match_enc_ipv6_addrs(rule, &match);
1566 if (ice_tc_set_ipv6(&match, fltr, headers, true))
1567 return -EINVAL;
1568 }
1569
1570 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1571 struct flow_match_ip match;
1572
1573 flow_rule_match_enc_ip(rule, &match);
1574 ice_tc_set_tos_ttl(&match, fltr, headers, true);
1575 }
1576
1577 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
1578 fltr->tunnel_type != TNL_VXLAN && fltr->tunnel_type != TNL_GENEVE) {
1579 struct flow_match_ports match;
1580
1581 flow_rule_match_enc_ports(rule, &match);
1582
1583 if (fltr->tunnel_type != TNL_GTPU) {
1584 if (ice_tc_set_port(match, fltr, headers, true))
1585 return -EINVAL;
1586 } else {
1587 if (ice_parse_gtp_type(match, fltr))
1588 return -EINVAL;
1589 }
1590 }
1591
1592 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
1593 (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
1594 struct flow_match_enc_opts match;
1595
1596 flow_rule_match_enc_opts(rule, &match);
1597
1598 memcpy(&fltr->gtp_pdu_info_keys, &match.key->data[0],
1599 sizeof(struct gtp_pdu_session_info));
1600
1601 memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
1602 sizeof(struct gtp_pdu_session_info));
1603
1604 fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
1605 }
1606
1607 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
1608 fltr->tunnel_type == TNL_PFCP) {
1609 struct flow_match_enc_opts match;
1610
1611 flow_rule_match_enc_opts(rule, &match);
1612
1613 memcpy(&fltr->pfcp_meta_keys, match.key->data,
1614 sizeof(struct pfcp_metadata));
1615 memcpy(&fltr->pfcp_meta_masks, match.mask->data,
1616 sizeof(struct pfcp_metadata));
1617
1618 fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
1619 }
1620
1621 return 0;
1622 }
1623
1624 /**
1625 * ice_parse_cls_flower - Parse TC flower filters provided by kernel
1626 * @vsi: Pointer to the VSI
1627 * @filter_dev: Pointer to device on which filter is being added
1628 * @f: Pointer to struct flow_cls_offload
1629 * @fltr: Pointer to filter structure
1630 * @ingress: if the rule is added to an ingress block
1631 *
1632 * Return: 0 if the flower was parsed successfully, -EINVAL if the flower
1633 * cannot be parsed, -EOPNOTSUPP if such filter cannot be configured
1634 * for the given VSI.
1635 */
1636 static int
ice_parse_cls_flower(struct net_device * filter_dev,struct ice_vsi * vsi,struct flow_cls_offload * f,struct ice_tc_flower_fltr * fltr,bool ingress)1637 ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
1638 struct flow_cls_offload *f,
1639 struct ice_tc_flower_fltr *fltr, bool ingress)
1640 {
1641 struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
1642 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1643 u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
1644 struct flow_dissector *dissector;
1645 struct net_device *tunnel_dev;
1646
1647 dissector = rule->match.dissector;
1648
1649 if (dissector->used_keys &
1650 ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1651 BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1652 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1653 BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
1654 BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
1655 BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1656 BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1657 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1658 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1659 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1660 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1661 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1662 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1663 BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
1664 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
1665 BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
1666 BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) |
1667 BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) {
1668 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
1669 return -EOPNOTSUPP;
1670 }
1671
1672 tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
1673 if (tunnel_dev) {
1674 int err;
1675
1676 filter_dev = tunnel_dev;
1677
1678 err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
1679 if (err) {
1680 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
1681 return err;
1682 }
1683
1684 /* PFCP is considered non-tunneled - don't swap headers. */
1685 if (fltr->tunnel_type != TNL_PFCP) {
1686 /* Header pointers should point to the inner headers,
1687 * outer header were already set by
1688 * ice_parse_tunnel_attr().
1689 */
1690 headers = &fltr->inner_headers;
1691 }
1692 } else if (dissector->used_keys &
1693 (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1694 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1695 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1696 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1697 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
1698 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
1699 BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
1700 NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
1701 return -EOPNOTSUPP;
1702 } else {
1703 fltr->tunnel_type = TNL_LAST;
1704 }
1705
1706 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1707 struct flow_match_basic match;
1708
1709 flow_rule_match_basic(rule, &match);
1710
1711 n_proto_key = ntohs(match.key->n_proto);
1712 n_proto_mask = ntohs(match.mask->n_proto);
1713
1714 if (n_proto_key == ETH_P_ALL || n_proto_key == 0 ||
1715 fltr->tunnel_type == TNL_GTPU ||
1716 fltr->tunnel_type == TNL_GTPC) {
1717 n_proto_key = 0;
1718 n_proto_mask = 0;
1719 } else {
1720 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1721 }
1722
1723 if (!ingress) {
1724 bool switchdev =
1725 ice_is_eswitch_mode_switchdev(vsi->back);
1726
1727 if (switchdev != (n_proto_key == ETH_P_LLDP)) {
1728 NL_SET_ERR_MSG_FMT_MOD(fltr->extack,
1729 "%sLLDP filtering is not supported on egress in %s mode",
1730 switchdev ? "Non-" : "",
1731 switchdev ? "switchdev" :
1732 "legacy");
1733 return -EOPNOTSUPP;
1734 }
1735 }
1736
1737 headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1738 headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
1739 headers->l3_key.ip_proto = match.key->ip_proto;
1740 }
1741
1742 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1743 struct flow_match_eth_addrs match;
1744
1745 flow_rule_match_eth_addrs(rule, &match);
1746
1747 if (!is_zero_ether_addr(match.key->dst)) {
1748 ether_addr_copy(headers->l2_key.dst_mac,
1749 match.key->dst);
1750 ether_addr_copy(headers->l2_mask.dst_mac,
1751 match.mask->dst);
1752 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1753 }
1754
1755 if (!is_zero_ether_addr(match.key->src)) {
1756 ether_addr_copy(headers->l2_key.src_mac,
1757 match.key->src);
1758 ether_addr_copy(headers->l2_mask.src_mac,
1759 match.mask->src);
1760 fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
1761 }
1762 }
1763
1764 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
1765 is_vlan_dev(filter_dev)) {
1766 struct flow_dissector_key_vlan mask;
1767 struct flow_dissector_key_vlan key;
1768 struct flow_match_vlan match;
1769
1770 if (is_vlan_dev(filter_dev)) {
1771 match.key = &key;
1772 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
1773 match.key->vlan_priority = 0;
1774 match.mask = &mask;
1775 memset(match.mask, 0xff, sizeof(*match.mask));
1776 match.mask->vlan_priority = 0;
1777 } else {
1778 flow_rule_match_vlan(rule, &match);
1779 }
1780
1781 if (match.mask->vlan_id) {
1782 if (match.mask->vlan_id == VLAN_VID_MASK) {
1783 fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
1784 headers->vlan_hdr.vlan_id =
1785 cpu_to_be16(match.key->vlan_id &
1786 VLAN_VID_MASK);
1787 } else {
1788 NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
1789 return -EINVAL;
1790 }
1791 }
1792
1793 if (match.mask->vlan_priority) {
1794 fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
1795 headers->vlan_hdr.vlan_prio =
1796 be16_encode_bits(match.key->vlan_priority,
1797 VLAN_PRIO_MASK);
1798 }
1799
1800 if (match.mask->vlan_tpid) {
1801 headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
1802 fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_TPID;
1803 }
1804 }
1805
1806 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1807 struct flow_match_vlan match;
1808
1809 if (!ice_is_dvm_ena(&vsi->back->hw)) {
1810 NL_SET_ERR_MSG_MOD(fltr->extack, "Double VLAN mode is not enabled");
1811 return -EINVAL;
1812 }
1813
1814 flow_rule_match_cvlan(rule, &match);
1815
1816 if (match.mask->vlan_id) {
1817 if (match.mask->vlan_id == VLAN_VID_MASK) {
1818 fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
1819 headers->cvlan_hdr.vlan_id =
1820 cpu_to_be16(match.key->vlan_id &
1821 VLAN_VID_MASK);
1822 } else {
1823 NL_SET_ERR_MSG_MOD(fltr->extack,
1824 "Bad CVLAN mask");
1825 return -EINVAL;
1826 }
1827 }
1828
1829 if (match.mask->vlan_priority) {
1830 fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
1831 headers->cvlan_hdr.vlan_prio =
1832 be16_encode_bits(match.key->vlan_priority,
1833 VLAN_PRIO_MASK);
1834 }
1835 }
1836
1837 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
1838 struct flow_match_pppoe match;
1839
1840 flow_rule_match_pppoe(rule, &match);
1841 n_proto_key = ice_tc_set_pppoe(&match, fltr, headers);
1842
1843 /* If ethertype equals ETH_P_PPP_SES, n_proto might be
1844 * overwritten by encapsulated protocol (ppp_proto field) or set
1845 * to 0. To correct this, flow_match_pppoe provides the type
1846 * field, which contains the actual ethertype (ETH_P_PPP_SES).
1847 */
1848 headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
1849 headers->l2_mask.n_proto = cpu_to_be16(0xFFFF);
1850 fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
1851 }
1852
1853 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1854 struct flow_match_control match;
1855
1856 flow_rule_match_control(rule, &match);
1857
1858 addr_type = match.key->addr_type;
1859
1860 if (flow_rule_has_control_flags(match.mask->flags,
1861 fltr->extack))
1862 return -EOPNOTSUPP;
1863 }
1864
1865 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1866 struct flow_match_ipv4_addrs match;
1867
1868 flow_rule_match_ipv4_addrs(rule, &match);
1869 if (ice_tc_set_ipv4(&match, fltr, headers, false))
1870 return -EINVAL;
1871 }
1872
1873 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1874 struct flow_match_ipv6_addrs match;
1875
1876 flow_rule_match_ipv6_addrs(rule, &match);
1877 if (ice_tc_set_ipv6(&match, fltr, headers, false))
1878 return -EINVAL;
1879 }
1880
1881 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1882 struct flow_match_ip match;
1883
1884 flow_rule_match_ip(rule, &match);
1885 ice_tc_set_tos_ttl(&match, fltr, headers, false);
1886 }
1887
1888 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
1889 struct flow_match_l2tpv3 match;
1890
1891 flow_rule_match_l2tpv3(rule, &match);
1892
1893 fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
1894 headers->l2tpv3_hdr.session_id = match.key->session_id;
1895 }
1896
1897 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1898 struct flow_match_ports match;
1899
1900 flow_rule_match_ports(rule, &match);
1901 if (ice_tc_set_port(match, fltr, headers, false))
1902 return -EINVAL;
1903 switch (headers->l3_key.ip_proto) {
1904 case IPPROTO_TCP:
1905 case IPPROTO_UDP:
1906 break;
1907 default:
1908 NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
1909 return -EINVAL;
1910 }
1911 }
1912
1913 /* Ingress filter on representor results in an egress filter in HW
1914 * and vice versa
1915 */
1916 ingress = ice_is_port_repr_netdev(filter_dev) ? !ingress : ingress;
1917 fltr->direction = ingress ? ICE_ESWITCH_FLTR_INGRESS :
1918 ICE_ESWITCH_FLTR_EGRESS;
1919
1920 return 0;
1921 }
1922
1923 /**
1924 * ice_add_switch_fltr - Add TC flower filters
1925 * @vsi: Pointer to VSI
1926 * @fltr: Pointer to struct ice_tc_flower_fltr
1927 *
1928 * Add filter in HW switch block
1929 */
1930 static int
ice_add_switch_fltr(struct ice_vsi * vsi,struct ice_tc_flower_fltr * fltr)1931 ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1932 {
1933 if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
1934 return -EOPNOTSUPP;
1935
1936 if (ice_is_eswitch_mode_switchdev(vsi->back))
1937 return ice_eswitch_add_tc_fltr(vsi, fltr);
1938
1939 return ice_add_tc_flower_adv_fltr(vsi, fltr);
1940 }
1941
1942 /**
1943 * ice_prep_adq_filter - Prepare ADQ filter with the required additional headers
1944 * @vsi: Pointer to VSI
1945 * @fltr: Pointer to TC flower filter structure
1946 *
1947 * Prepare ADQ filter with the required additional header fields
1948 */
1949 static int
ice_prep_adq_filter(struct ice_vsi * vsi,struct ice_tc_flower_fltr * fltr)1950 ice_prep_adq_filter(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
1951 {
1952 if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
1953 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1954 ICE_TC_FLWR_FIELD_SRC_MAC))) {
1955 NL_SET_ERR_MSG_MOD(fltr->extack,
1956 "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
1957 return -EOPNOTSUPP;
1958 }
1959
1960 /* For ADQ, filter must include dest MAC address, otherwise unwanted
1961 * packets with unrelated MAC address get delivered to ADQ VSIs as long
1962 * as remaining filter criteria is satisfied such as dest IP address
1963 * and dest/src L4 port. Below code handles the following cases:
1964 * 1. For non-tunnel, if user specify MAC addresses, use them.
1965 * 2. For non-tunnel, if user didn't specify MAC address, add implicit
1966 * dest MAC to be lower netdev's active unicast MAC address
1967 * 3. For tunnel, as of now TC-filter through flower classifier doesn't
1968 * have provision for user to specify outer DMAC, hence driver to
1969 * implicitly add outer dest MAC to be lower netdev's active unicast
1970 * MAC address.
1971 */
1972 if (fltr->tunnel_type != TNL_LAST &&
1973 !(fltr->flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC))
1974 fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DST_MAC;
1975
1976 if (fltr->tunnel_type == TNL_LAST &&
1977 !(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC))
1978 fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
1979
1980 if (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
1981 ICE_TC_FLWR_FIELD_ENC_DST_MAC)) {
1982 ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
1983 vsi->netdev->dev_addr);
1984 eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
1985 }
1986
1987 /* Make sure VLAN is already added to main VSI, before allowing ADQ to
1988 * add a VLAN based filter such as MAC + VLAN + L4 port.
1989 */
1990 if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
1991 u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
1992
1993 if (!ice_vlan_fltr_exist(&vsi->back->hw, vlan_id, vsi->idx)) {
1994 NL_SET_ERR_MSG_MOD(fltr->extack,
1995 "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
1996 return -EINVAL;
1997 }
1998 }
1999 return 0;
2000 }
2001
2002 /**
2003 * ice_handle_tclass_action - Support directing to a traffic class
2004 * @vsi: Pointer to VSI
2005 * @cls_flower: Pointer to TC flower offload structure
2006 * @fltr: Pointer to TC flower filter structure
2007 *
2008 * Support directing traffic to a traffic class/queue-set
2009 */
2010 static int
ice_handle_tclass_action(struct ice_vsi * vsi,struct flow_cls_offload * cls_flower,struct ice_tc_flower_fltr * fltr)2011 ice_handle_tclass_action(struct ice_vsi *vsi,
2012 struct flow_cls_offload *cls_flower,
2013 struct ice_tc_flower_fltr *fltr)
2014 {
2015 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
2016
2017 /* user specified hw_tc (must be non-zero for ADQ TC), action is forward
2018 * to hw_tc (i.e. ADQ channel number)
2019 */
2020 if (tc < ICE_CHNL_START_TC) {
2021 NL_SET_ERR_MSG_MOD(fltr->extack,
2022 "Unable to add filter because of unsupported destination");
2023 return -EOPNOTSUPP;
2024 }
2025 if (!(vsi->all_enatc & BIT(tc))) {
2026 NL_SET_ERR_MSG_MOD(fltr->extack,
2027 "Unable to add filter because of non-existence destination");
2028 return -EINVAL;
2029 }
2030 fltr->action.fltr_act = ICE_FWD_TO_VSI;
2031 fltr->action.fwd.tc.tc_class = tc;
2032
2033 return ice_prep_adq_filter(vsi, fltr);
2034 }
2035
2036 static int
ice_tc_forward_to_queue(struct ice_vsi * vsi,struct ice_tc_flower_fltr * fltr,struct flow_action_entry * act)2037 ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
2038 struct flow_action_entry *act)
2039 {
2040 struct ice_vsi *ch_vsi = NULL;
2041 u16 queue = act->rx_queue;
2042
2043 if (queue >= vsi->num_rxq) {
2044 NL_SET_ERR_MSG_MOD(fltr->extack,
2045 "Unable to add filter because specified queue is invalid");
2046 return -EINVAL;
2047 }
2048 fltr->action.fltr_act = ICE_FWD_TO_Q;
2049 fltr->action.fwd.q.queue = queue;
2050 /* determine corresponding HW queue */
2051 fltr->action.fwd.q.hw_queue = vsi->rxq_map[queue];
2052
2053 /* If ADQ is configured, and the queue belongs to ADQ VSI, then prepare
2054 * ADQ switch filter
2055 */
2056 ch_vsi = ice_locate_vsi_using_queue(vsi, fltr->action.fwd.q.queue);
2057 if (!ch_vsi)
2058 return -EINVAL;
2059 fltr->dest_vsi = ch_vsi;
2060 if (!ice_is_chnl_fltr(fltr))
2061 return 0;
2062
2063 return ice_prep_adq_filter(vsi, fltr);
2064 }
2065
2066 static int
ice_tc_parse_action(struct ice_vsi * vsi,struct ice_tc_flower_fltr * fltr,struct flow_action_entry * act)2067 ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
2068 struct flow_action_entry *act)
2069 {
2070 switch (act->id) {
2071 case FLOW_ACTION_RX_QUEUE_MAPPING:
2072 /* forward to queue */
2073 return ice_tc_forward_to_queue(vsi, fltr, act);
2074 case FLOW_ACTION_DROP:
2075 fltr->action.fltr_act = ICE_DROP_PACKET;
2076 return 0;
2077 default:
2078 NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
2079 return -EOPNOTSUPP;
2080 }
2081 }
2082
2083 /**
2084 * ice_parse_tc_flower_actions - Parse the actions for a TC filter
2085 * @filter_dev: Pointer to device on which filter is being added
2086 * @vsi: Pointer to VSI
2087 * @cls_flower: Pointer to TC flower offload structure
2088 * @fltr: Pointer to TC flower filter structure
2089 *
2090 * Parse the actions for a TC filter
2091 */
ice_parse_tc_flower_actions(struct net_device * filter_dev,struct ice_vsi * vsi,struct flow_cls_offload * cls_flower,struct ice_tc_flower_fltr * fltr)2092 static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
2093 struct ice_vsi *vsi,
2094 struct flow_cls_offload *cls_flower,
2095 struct ice_tc_flower_fltr *fltr)
2096 {
2097 struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
2098 struct flow_action *flow_action = &rule->action;
2099 struct flow_action_entry *act;
2100 int i, err;
2101
2102 if (cls_flower->classid)
2103 return ice_handle_tclass_action(vsi, cls_flower, fltr);
2104
2105 if (!flow_action_has_entries(flow_action))
2106 return -EINVAL;
2107
2108 flow_action_for_each(i, act, flow_action) {
2109 if (ice_is_eswitch_mode_switchdev(vsi->back))
2110 err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
2111 else
2112 err = ice_tc_parse_action(vsi, fltr, act);
2113 if (err)
2114 return err;
2115 continue;
2116 }
2117 return 0;
2118 }
2119
2120 /**
2121 * ice_del_tc_fltr - deletes a filter from HW table
2122 * @vsi: Pointer to VSI
2123 * @fltr: Pointer to struct ice_tc_flower_fltr
2124 *
2125 * This function deletes a filter from HW table and manages book-keeping
2126 */
ice_del_tc_fltr(struct ice_vsi * vsi,struct ice_tc_flower_fltr * fltr)2127 static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
2128 {
2129 struct ice_rule_query_data rule_rem;
2130 struct ice_pf *pf = vsi->back;
2131 int err;
2132
2133 if (ice_is_fltr_pf_tx_lldp(fltr))
2134 ice_handle_del_pf_lldp_drop_rule(pf);
2135
2136 if (ice_is_fltr_vf_tx_lldp(fltr))
2137 return ice_drop_vf_tx_lldp(vsi, false);
2138
2139 rule_rem.rid = fltr->rid;
2140 rule_rem.rule_id = fltr->rule_id;
2141 rule_rem.vsi_handle = fltr->dest_vsi_handle;
2142 err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
2143 if (err) {
2144 if (err == -ENOENT) {
2145 NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
2146 return -ENOENT;
2147 }
2148 NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
2149 return -EIO;
2150 }
2151
2152 /* update advanced switch filter count for destination
2153 * VSI if filter destination was VSI
2154 */
2155 if (fltr->dest_vsi) {
2156 if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
2157 fltr->dest_vsi->num_chnl_fltr--;
2158
2159 /* keeps track of channel filters for PF VSI */
2160 if (vsi->type == ICE_VSI_PF &&
2161 (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
2162 ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
2163 pf->num_dmac_chnl_fltrs--;
2164 }
2165 }
2166 return 0;
2167 }
2168
2169 /**
2170 * ice_add_tc_fltr - adds a TC flower filter
2171 * @netdev: Pointer to netdev
2172 * @vsi: Pointer to VSI
2173 * @f: Pointer to flower offload structure
2174 * @__fltr: Pointer to struct ice_tc_flower_fltr
2175 * @ingress: if the rule is added to an ingress block
2176 *
2177 * This function parses TC-flower input fields, parses action,
2178 * and adds a filter.
2179 *
2180 * Return: 0 if the filter was successfully added,
2181 * negative error code otherwise.
2182 */
2183 static int
ice_add_tc_fltr(struct net_device * netdev,struct ice_vsi * vsi,struct flow_cls_offload * f,struct ice_tc_flower_fltr ** __fltr,bool ingress)2184 ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
2185 struct flow_cls_offload *f,
2186 struct ice_tc_flower_fltr **__fltr, bool ingress)
2187 {
2188 struct ice_tc_flower_fltr *fltr;
2189 int err;
2190
2191 /* by default, set output to be INVALID */
2192 *__fltr = NULL;
2193
2194 fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
2195 if (!fltr)
2196 return -ENOMEM;
2197
2198 fltr->cookie = f->cookie;
2199 fltr->extack = f->common.extack;
2200 fltr->src_vsi = vsi;
2201 INIT_HLIST_NODE(&fltr->tc_flower_node);
2202
2203 err = ice_parse_cls_flower(netdev, vsi, f, fltr, ingress);
2204 if (err < 0)
2205 goto err;
2206
2207 err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
2208 if (err < 0)
2209 goto err;
2210
2211 err = ice_add_switch_fltr(vsi, fltr);
2212 if (err < 0)
2213 goto err;
2214
2215 /* return the newly created filter */
2216 *__fltr = fltr;
2217
2218 return 0;
2219 err:
2220 kfree(fltr);
2221 return err;
2222 }
2223
2224 /**
2225 * ice_find_tc_flower_fltr - Find the TC flower filter in the list
2226 * @pf: Pointer to PF
2227 * @cookie: filter specific cookie
2228 */
2229 static struct ice_tc_flower_fltr *
ice_find_tc_flower_fltr(struct ice_pf * pf,unsigned long cookie)2230 ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
2231 {
2232 struct ice_tc_flower_fltr *fltr;
2233
2234 hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
2235 if (cookie == fltr->cookie)
2236 return fltr;
2237
2238 return NULL;
2239 }
2240
2241 /**
2242 * ice_add_cls_flower - add TC flower filters
2243 * @netdev: Pointer to filter device
2244 * @vsi: Pointer to VSI
2245 * @cls_flower: Pointer to flower offload structure
2246 * @ingress: if the rule is added to an ingress block
2247 *
2248 * Return: 0 if the flower was successfully added,
2249 * negative error code otherwise.
2250 */
ice_add_cls_flower(struct net_device * netdev,struct ice_vsi * vsi,struct flow_cls_offload * cls_flower,bool ingress)2251 int ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
2252 struct flow_cls_offload *cls_flower, bool ingress)
2253 {
2254 struct netlink_ext_ack *extack = cls_flower->common.extack;
2255 struct net_device *vsi_netdev = vsi->netdev;
2256 struct ice_tc_flower_fltr *fltr;
2257 struct ice_pf *pf = vsi->back;
2258 int err;
2259
2260 if (ice_is_reset_in_progress(pf->state))
2261 return -EBUSY;
2262 if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2263 return -EINVAL;
2264
2265 if (ice_is_port_repr_netdev(netdev))
2266 vsi_netdev = netdev;
2267
2268 if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
2269 !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
2270 /* Based on TC indirect notifications from kernel, all ice
2271 * devices get an instance of rule from higher level device.
2272 * Avoid triggering explicit error in this case.
2273 */
2274 if (netdev == vsi_netdev)
2275 NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
2276 return -EINVAL;
2277 }
2278
2279 /* avoid duplicate entries, if exists - return error */
2280 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
2281 if (fltr) {
2282 NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
2283 return -EEXIST;
2284 }
2285
2286 /* prep and add TC-flower filter in HW */
2287 err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr, ingress);
2288 if (err)
2289 return err;
2290
2291 /* add filter into an ordered list */
2292 hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
2293 return 0;
2294 }
2295
2296 /**
2297 * ice_del_cls_flower - delete TC flower filters
2298 * @vsi: Pointer to VSI
2299 * @cls_flower: Pointer to struct flow_cls_offload
2300 */
2301 int
ice_del_cls_flower(struct ice_vsi * vsi,struct flow_cls_offload * cls_flower)2302 ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
2303 {
2304 struct ice_tc_flower_fltr *fltr;
2305 struct ice_pf *pf = vsi->back;
2306 int err;
2307
2308 /* find filter */
2309 fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
2310 if (!fltr) {
2311 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
2312 hlist_empty(&pf->tc_flower_fltr_list))
2313 return 0;
2314
2315 NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
2316 return -EINVAL;
2317 }
2318
2319 fltr->extack = cls_flower->common.extack;
2320 /* delete filter from HW */
2321 err = ice_del_tc_fltr(vsi, fltr);
2322 if (err)
2323 return err;
2324
2325 /* delete filter from an ordered list */
2326 hlist_del(&fltr->tc_flower_node);
2327
2328 /* free the filter node */
2329 kfree(fltr);
2330
2331 return 0;
2332 }
2333
2334 /**
2335 * ice_replay_tc_fltrs - replay TC filters
2336 * @pf: pointer to PF struct
2337 */
ice_replay_tc_fltrs(struct ice_pf * pf)2338 void ice_replay_tc_fltrs(struct ice_pf *pf)
2339 {
2340 struct ice_tc_flower_fltr *fltr;
2341 struct hlist_node *node;
2342
2343 hlist_for_each_entry_safe(fltr, node,
2344 &pf->tc_flower_fltr_list,
2345 tc_flower_node) {
2346 fltr->extack = NULL;
2347 ice_add_switch_fltr(fltr->src_vsi, fltr);
2348 }
2349 }
2350