16a345b3dSKumar Sanghvi /* 26a345b3dSKumar Sanghvi * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. 36a345b3dSKumar Sanghvi * 46a345b3dSKumar Sanghvi * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved. 56a345b3dSKumar Sanghvi * 66a345b3dSKumar Sanghvi * This software is available to you under a choice of one of two 76a345b3dSKumar Sanghvi * licenses. You may choose to be licensed under the terms of the GNU 86a345b3dSKumar Sanghvi * General Public License (GPL) Version 2, available from the file 96a345b3dSKumar Sanghvi * COPYING in the main directory of this source tree, or the 106a345b3dSKumar Sanghvi * OpenIB.org BSD license below: 116a345b3dSKumar Sanghvi * 126a345b3dSKumar Sanghvi * Redistribution and use in source and binary forms, with or 136a345b3dSKumar Sanghvi * without modification, are permitted provided that the following 146a345b3dSKumar Sanghvi * conditions are met: 156a345b3dSKumar Sanghvi * 166a345b3dSKumar Sanghvi * - Redistributions of source code must retain the above 176a345b3dSKumar Sanghvi * copyright notice, this list of conditions and the following 186a345b3dSKumar Sanghvi * disclaimer. 196a345b3dSKumar Sanghvi * 206a345b3dSKumar Sanghvi * - Redistributions in binary form must reproduce the above 216a345b3dSKumar Sanghvi * copyright notice, this list of conditions and the following 226a345b3dSKumar Sanghvi * disclaimer in the documentation and/or other materials 236a345b3dSKumar Sanghvi * provided with the distribution. 246a345b3dSKumar Sanghvi * 256a345b3dSKumar Sanghvi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 266a345b3dSKumar Sanghvi * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 276a345b3dSKumar Sanghvi * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 286a345b3dSKumar Sanghvi * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 296a345b3dSKumar Sanghvi * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 306a345b3dSKumar Sanghvi * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 316a345b3dSKumar Sanghvi * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 326a345b3dSKumar Sanghvi * SOFTWARE. 336a345b3dSKumar Sanghvi */ 346a345b3dSKumar Sanghvi 356a345b3dSKumar Sanghvi #include <net/tc_act/tc_mirred.h> 3627ece1f3SKumar Sanghvi #include <net/tc_act/tc_pedit.h> 3727ece1f3SKumar Sanghvi #include <net/tc_act/tc_gact.h> 38cf2885a7SKumar Sanghvi #include <net/tc_act/tc_vlan.h> 396a345b3dSKumar Sanghvi 406a345b3dSKumar Sanghvi #include "cxgb4.h" 413eb8b62dSKumar Sanghvi #include "cxgb4_filter.h" 426a345b3dSKumar Sanghvi #include "cxgb4_tc_flower.h" 436a345b3dSKumar Sanghvi 44e0f911c8SKumar Sanghvi #define STATS_CHECK_PERIOD (HZ / 2) 45e0f911c8SKumar Sanghvi 461d174e95SWei Yongjun static struct ch_tc_pedit_fields pedits[] = { 4727ece1f3SKumar Sanghvi PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0), 4827ece1f3SKumar Sanghvi PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4), 49202187c3SKumar Sanghvi PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0), 50202187c3SKumar Sanghvi PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2), 51557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0), 52557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0), 53557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0), 54557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4), 55557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8), 56557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12), 57557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0), 58557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4), 59557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8), 60557ccbf9SKumar Sanghvi PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12), 6127ece1f3SKumar Sanghvi }; 6227ece1f3SKumar Sanghvi 63*2ef813b8SHerat Ramani static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = { 64*2ef813b8SHerat Ramani /* Default supported NAT modes */ 65*2ef813b8SHerat Ramani { 66*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 67*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_NONE, 68*2ef813b8SHerat Ramani .natmode = NAT_MODE_NONE, 69*2ef813b8SHerat Ramani }, 70*2ef813b8SHerat Ramani { 71*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 72*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP, 73*2ef813b8SHerat Ramani .natmode = NAT_MODE_DIP, 74*2ef813b8SHerat Ramani }, 75*2ef813b8SHerat Ramani { 76*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 77*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT, 78*2ef813b8SHerat Ramani .natmode = NAT_MODE_DIP_DP, 79*2ef813b8SHerat Ramani }, 80*2ef813b8SHerat Ramani { 81*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 82*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | 83*2ef813b8SHerat Ramani CXGB4_ACTION_NATMODE_SIP, 84*2ef813b8SHerat Ramani .natmode = NAT_MODE_DIP_DP_SIP, 85*2ef813b8SHerat Ramani }, 86*2ef813b8SHerat Ramani { 87*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 88*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT | 89*2ef813b8SHerat Ramani CXGB4_ACTION_NATMODE_SPORT, 90*2ef813b8SHerat Ramani .natmode = NAT_MODE_DIP_DP_SP, 91*2ef813b8SHerat Ramani }, 92*2ef813b8SHerat Ramani { 93*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 94*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT, 95*2ef813b8SHerat Ramani .natmode = NAT_MODE_SIP_SP, 96*2ef813b8SHerat Ramani }, 97*2ef813b8SHerat Ramani { 98*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 99*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | 100*2ef813b8SHerat Ramani CXGB4_ACTION_NATMODE_SPORT, 101*2ef813b8SHerat Ramani .natmode = NAT_MODE_DIP_SIP_SP, 102*2ef813b8SHerat Ramani }, 103*2ef813b8SHerat Ramani { 104*2ef813b8SHerat Ramani .chip = CHELSIO_T5, 105*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP | 106*2ef813b8SHerat Ramani CXGB4_ACTION_NATMODE_DPORT | 107*2ef813b8SHerat Ramani CXGB4_ACTION_NATMODE_SPORT, 108*2ef813b8SHerat Ramani .natmode = NAT_MODE_ALL, 109*2ef813b8SHerat Ramani }, 110*2ef813b8SHerat Ramani /* T6+ can ignore L4 ports when they're disabled. */ 111*2ef813b8SHerat Ramani { 112*2ef813b8SHerat Ramani .chip = CHELSIO_T6, 113*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_SIP, 114*2ef813b8SHerat Ramani .natmode = NAT_MODE_SIP_SP, 115*2ef813b8SHerat Ramani }, 116*2ef813b8SHerat Ramani { 117*2ef813b8SHerat Ramani .chip = CHELSIO_T6, 118*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT, 119*2ef813b8SHerat Ramani .natmode = NAT_MODE_DIP_DP_SP, 120*2ef813b8SHerat Ramani }, 121*2ef813b8SHerat Ramani { 122*2ef813b8SHerat Ramani .chip = CHELSIO_T6, 123*2ef813b8SHerat Ramani .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP, 124*2ef813b8SHerat Ramani .natmode = NAT_MODE_ALL, 125*2ef813b8SHerat Ramani }, 126*2ef813b8SHerat Ramani }; 127*2ef813b8SHerat Ramani 128*2ef813b8SHerat Ramani static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs, 129*2ef813b8SHerat Ramani u8 natmode_flags) 130*2ef813b8SHerat Ramani { 131*2ef813b8SHerat Ramani u8 i = 0; 132*2ef813b8SHerat Ramani 133*2ef813b8SHerat Ramani /* Translate the enabled NAT 4-tuple fields to one of the 134*2ef813b8SHerat Ramani * hardware supported NAT mode configurations. This ensures 135*2ef813b8SHerat Ramani * that we pick a valid combination, where the disabled fields 136*2ef813b8SHerat Ramani * do not get overwritten to 0. 137*2ef813b8SHerat Ramani */ 138*2ef813b8SHerat Ramani for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { 139*2ef813b8SHerat Ramani if (cxgb4_natmode_config_array[i].flags == natmode_flags) { 140*2ef813b8SHerat Ramani fs->nat_mode = cxgb4_natmode_config_array[i].natmode; 141*2ef813b8SHerat Ramani return; 142*2ef813b8SHerat Ramani } 143*2ef813b8SHerat Ramani } 144*2ef813b8SHerat Ramani } 145*2ef813b8SHerat Ramani 14662488e4bSKumar Sanghvi static struct ch_tc_flower_entry *allocate_flower_entry(void) 14762488e4bSKumar Sanghvi { 14862488e4bSKumar Sanghvi struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL); 149bb132083SNavid Emamdoost if (new) 150e0f911c8SKumar Sanghvi spin_lock_init(&new->lock); 15162488e4bSKumar Sanghvi return new; 15262488e4bSKumar Sanghvi } 15362488e4bSKumar Sanghvi 15462488e4bSKumar Sanghvi /* Must be called with either RTNL or rcu_read_lock */ 15562488e4bSKumar Sanghvi static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap, 15662488e4bSKumar Sanghvi unsigned long flower_cookie) 15762488e4bSKumar Sanghvi { 15879e6d46aSKumar Sanghvi return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie, 15979e6d46aSKumar Sanghvi adap->flower_ht_params); 16062488e4bSKumar Sanghvi } 16162488e4bSKumar Sanghvi 16262488e4bSKumar Sanghvi static void cxgb4_process_flow_match(struct net_device *dev, 163c8729cacSVishal Kulkarni struct flow_rule *rule, 16462488e4bSKumar Sanghvi struct ch_filter_specification *fs) 16562488e4bSKumar Sanghvi { 16629b3705fSRahul Lakkireddy u16 addr_type = 0; 16729b3705fSRahul Lakkireddy 16829b3705fSRahul Lakkireddy if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { 16929b3705fSRahul Lakkireddy struct flow_match_control match; 17029b3705fSRahul Lakkireddy 17129b3705fSRahul Lakkireddy flow_rule_match_control(rule, &match); 17229b3705fSRahul Lakkireddy addr_type = match.key->addr_type; 17329b3705fSRahul Lakkireddy } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { 17429b3705fSRahul Lakkireddy addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; 17529b3705fSRahul Lakkireddy } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { 17629b3705fSRahul Lakkireddy addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; 17729b3705fSRahul Lakkireddy } 17829b3705fSRahul Lakkireddy 1798f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 1808f256622SPablo Neira Ayuso struct flow_match_basic match; 1818f256622SPablo Neira Ayuso u16 ethtype_key, ethtype_mask; 1828f256622SPablo Neira Ayuso 1838f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match); 1848f256622SPablo Neira Ayuso ethtype_key = ntohs(match.key->n_proto); 1858f256622SPablo Neira Ayuso ethtype_mask = ntohs(match.mask->n_proto); 18662488e4bSKumar Sanghvi 18762488e4bSKumar Sanghvi if (ethtype_key == ETH_P_ALL) { 18862488e4bSKumar Sanghvi ethtype_key = 0; 18962488e4bSKumar Sanghvi ethtype_mask = 0; 19062488e4bSKumar Sanghvi } 19162488e4bSKumar Sanghvi 192d728f131SKumar Sanghvi if (ethtype_key == ETH_P_IPV6) 193d728f131SKumar Sanghvi fs->type = 1; 194d728f131SKumar Sanghvi 19562488e4bSKumar Sanghvi fs->val.ethtype = ethtype_key; 19662488e4bSKumar Sanghvi fs->mask.ethtype = ethtype_mask; 1978f256622SPablo Neira Ayuso fs->val.proto = match.key->ip_proto; 1988f256622SPablo Neira Ayuso fs->mask.proto = match.mask->ip_proto; 19962488e4bSKumar Sanghvi } 20062488e4bSKumar Sanghvi 20129b3705fSRahul Lakkireddy if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { 2028f256622SPablo Neira Ayuso struct flow_match_ipv4_addrs match; 2038f256622SPablo Neira Ayuso 2048f256622SPablo Neira Ayuso flow_rule_match_ipv4_addrs(rule, &match); 20562488e4bSKumar Sanghvi fs->type = 0; 2068f256622SPablo Neira Ayuso memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst)); 2078f256622SPablo Neira Ayuso memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src)); 2088f256622SPablo Neira Ayuso memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst)); 2098f256622SPablo Neira Ayuso memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src)); 210557ccbf9SKumar Sanghvi 211557ccbf9SKumar Sanghvi /* also initialize nat_lip/fip to same values */ 2128f256622SPablo Neira Ayuso memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst)); 2138f256622SPablo Neira Ayuso memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src)); 21462488e4bSKumar Sanghvi } 21562488e4bSKumar Sanghvi 21629b3705fSRahul Lakkireddy if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { 2178f256622SPablo Neira Ayuso struct flow_match_ipv6_addrs match; 21862488e4bSKumar Sanghvi 2198f256622SPablo Neira Ayuso flow_rule_match_ipv6_addrs(rule, &match); 22062488e4bSKumar Sanghvi fs->type = 1; 2218f256622SPablo Neira Ayuso memcpy(&fs->val.lip[0], match.key->dst.s6_addr, 2228f256622SPablo Neira Ayuso sizeof(match.key->dst)); 2238f256622SPablo Neira Ayuso memcpy(&fs->val.fip[0], match.key->src.s6_addr, 2248f256622SPablo Neira Ayuso sizeof(match.key->src)); 2258f256622SPablo Neira Ayuso memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr, 2268f256622SPablo Neira Ayuso sizeof(match.mask->dst)); 2278f256622SPablo Neira Ayuso memcpy(&fs->mask.fip[0], match.mask->src.s6_addr, 2288f256622SPablo Neira Ayuso sizeof(match.mask->src)); 229557ccbf9SKumar Sanghvi 230557ccbf9SKumar Sanghvi /* also initialize nat_lip/fip to same values */ 2318f256622SPablo Neira Ayuso memcpy(&fs->nat_lip[0], match.key->dst.s6_addr, 2328f256622SPablo Neira Ayuso sizeof(match.key->dst)); 2338f256622SPablo Neira Ayuso memcpy(&fs->nat_fip[0], match.key->src.s6_addr, 2348f256622SPablo Neira Ayuso sizeof(match.key->src)); 23562488e4bSKumar Sanghvi } 23662488e4bSKumar Sanghvi 2378f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { 2388f256622SPablo Neira Ayuso struct flow_match_ports match; 23962488e4bSKumar Sanghvi 2408f256622SPablo Neira Ayuso flow_rule_match_ports(rule, &match); 24163b53b0bSRahul Lakkireddy fs->val.lport = be16_to_cpu(match.key->dst); 24263b53b0bSRahul Lakkireddy fs->mask.lport = be16_to_cpu(match.mask->dst); 24363b53b0bSRahul Lakkireddy fs->val.fport = be16_to_cpu(match.key->src); 24463b53b0bSRahul Lakkireddy fs->mask.fport = be16_to_cpu(match.mask->src); 245557ccbf9SKumar Sanghvi 246557ccbf9SKumar Sanghvi /* also initialize nat_lport/fport to same values */ 24763b53b0bSRahul Lakkireddy fs->nat_lport = fs->val.lport; 24863b53b0bSRahul Lakkireddy fs->nat_fport = fs->val.fport; 24962488e4bSKumar Sanghvi } 25062488e4bSKumar Sanghvi 2518f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 2528f256622SPablo Neira Ayuso struct flow_match_ip match; 253bda1e229SKumar Sanghvi 2548f256622SPablo Neira Ayuso flow_rule_match_ip(rule, &match); 2558f256622SPablo Neira Ayuso fs->val.tos = match.key->tos; 2568f256622SPablo Neira Ayuso fs->mask.tos = match.mask->tos; 257bda1e229SKumar Sanghvi } 258bda1e229SKumar Sanghvi 2598f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) { 2608f256622SPablo Neira Ayuso struct flow_match_enc_keyid match; 26198f3697fSKumar Sanghvi 2628f256622SPablo Neira Ayuso flow_rule_match_enc_keyid(rule, &match); 2638f256622SPablo Neira Ayuso fs->val.vni = be32_to_cpu(match.key->keyid); 2648f256622SPablo Neira Ayuso fs->mask.vni = be32_to_cpu(match.mask->keyid); 26598f3697fSKumar Sanghvi if (fs->mask.vni) { 26698f3697fSKumar Sanghvi fs->val.encap_vld = 1; 26798f3697fSKumar Sanghvi fs->mask.encap_vld = 1; 26898f3697fSKumar Sanghvi } 26998f3697fSKumar Sanghvi } 27098f3697fSKumar Sanghvi 2718f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { 2728f256622SPablo Neira Ayuso struct flow_match_vlan match; 273ad9af3e0SKumar Sanghvi u16 vlan_tci, vlan_tci_mask; 274ad9af3e0SKumar Sanghvi 2758f256622SPablo Neira Ayuso flow_rule_match_vlan(rule, &match); 2768f256622SPablo Neira Ayuso vlan_tci = match.key->vlan_id | (match.key->vlan_priority << 277ad9af3e0SKumar Sanghvi VLAN_PRIO_SHIFT); 2788f256622SPablo Neira Ayuso vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority << 279ad9af3e0SKumar Sanghvi VLAN_PRIO_SHIFT); 280100d39afSKumar Sanghvi fs->val.ivlan = vlan_tci; 281100d39afSKumar Sanghvi fs->mask.ivlan = vlan_tci_mask; 282ad9af3e0SKumar Sanghvi 283b5730061SRaju Rangoju fs->val.ivlan_vld = 1; 284b5730061SRaju Rangoju fs->mask.ivlan_vld = 1; 285b5730061SRaju Rangoju 286ad9af3e0SKumar Sanghvi /* Chelsio adapters use ivlan_vld bit to match vlan packets 287ad9af3e0SKumar Sanghvi * as 802.1Q. Also, when vlan tag is present in packets, 288ad9af3e0SKumar Sanghvi * ethtype match is used then to match on ethtype of inner 289ad9af3e0SKumar Sanghvi * header ie. the header following the vlan header. 290ad9af3e0SKumar Sanghvi * So, set the ivlan_vld based on ethtype info supplied by 291ad9af3e0SKumar Sanghvi * TC for vlan packets if its 802.1Q. And then reset the 292ad9af3e0SKumar Sanghvi * ethtype value else, hw will try to match the supplied 293ad9af3e0SKumar Sanghvi * ethtype value with ethtype of inner header. 294ad9af3e0SKumar Sanghvi */ 295ad9af3e0SKumar Sanghvi if (fs->val.ethtype == ETH_P_8021Q) { 296ad9af3e0SKumar Sanghvi fs->val.ethtype = 0; 297ad9af3e0SKumar Sanghvi fs->mask.ethtype = 0; 298ad9af3e0SKumar Sanghvi } 29962488e4bSKumar Sanghvi } 30062488e4bSKumar Sanghvi 30162488e4bSKumar Sanghvi /* Match only packets coming from the ingress port where this 30262488e4bSKumar Sanghvi * filter will be created. 30362488e4bSKumar Sanghvi */ 30462488e4bSKumar Sanghvi fs->val.iport = netdev2pinfo(dev)->port_id; 30562488e4bSKumar Sanghvi fs->mask.iport = ~0; 30662488e4bSKumar Sanghvi } 30762488e4bSKumar Sanghvi 30862488e4bSKumar Sanghvi static int cxgb4_validate_flow_match(struct net_device *dev, 309c8729cacSVishal Kulkarni struct flow_rule *rule) 31062488e4bSKumar Sanghvi { 3118f256622SPablo Neira Ayuso struct flow_dissector *dissector = rule->match.dissector; 312bda1e229SKumar Sanghvi u16 ethtype_mask = 0; 313bda1e229SKumar Sanghvi u16 ethtype_key = 0; 314bda1e229SKumar Sanghvi 3158f256622SPablo Neira Ayuso if (dissector->used_keys & 31662488e4bSKumar Sanghvi ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | 31762488e4bSKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_BASIC) | 31862488e4bSKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 31962488e4bSKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 320bda1e229SKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_PORTS) | 32198f3697fSKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | 322ad9af3e0SKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_VLAN) | 323bda1e229SKumar Sanghvi BIT(FLOW_DISSECTOR_KEY_IP))) { 32462488e4bSKumar Sanghvi netdev_warn(dev, "Unsupported key used: 0x%x\n", 3258f256622SPablo Neira Ayuso dissector->used_keys); 32662488e4bSKumar Sanghvi return -EOPNOTSUPP; 32762488e4bSKumar Sanghvi } 328bda1e229SKumar Sanghvi 3298f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { 3308f256622SPablo Neira Ayuso struct flow_match_basic match; 3318f256622SPablo Neira Ayuso 3328f256622SPablo Neira Ayuso flow_rule_match_basic(rule, &match); 3338f256622SPablo Neira Ayuso ethtype_key = ntohs(match.key->n_proto); 3348f256622SPablo Neira Ayuso ethtype_mask = ntohs(match.mask->n_proto); 335bda1e229SKumar Sanghvi } 336bda1e229SKumar Sanghvi 3378f256622SPablo Neira Ayuso if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { 338bda1e229SKumar Sanghvi u16 eth_ip_type = ethtype_key & ethtype_mask; 3398f256622SPablo Neira Ayuso struct flow_match_ip match; 340bda1e229SKumar Sanghvi 341bda1e229SKumar Sanghvi if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) { 342bda1e229SKumar Sanghvi netdev_err(dev, "IP Key supported only with IPv4/v6"); 343bda1e229SKumar Sanghvi return -EINVAL; 344bda1e229SKumar Sanghvi } 345bda1e229SKumar Sanghvi 3468f256622SPablo Neira Ayuso flow_rule_match_ip(rule, &match); 3478f256622SPablo Neira Ayuso if (match.mask->ttl) { 348bda1e229SKumar Sanghvi netdev_warn(dev, "ttl match unsupported for offload"); 349bda1e229SKumar Sanghvi return -EOPNOTSUPP; 350bda1e229SKumar Sanghvi } 351bda1e229SKumar Sanghvi } 352bda1e229SKumar Sanghvi 35362488e4bSKumar Sanghvi return 0; 35462488e4bSKumar Sanghvi } 35562488e4bSKumar Sanghvi 35627ece1f3SKumar Sanghvi static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask, 35727ece1f3SKumar Sanghvi u8 field) 35827ece1f3SKumar Sanghvi { 35927ece1f3SKumar Sanghvi u32 set_val = val & ~mask; 360322d95f0SDavid S. Miller u32 offset = 0; 361322d95f0SDavid S. Miller u8 size = 1; 36227ece1f3SKumar Sanghvi int i; 36327ece1f3SKumar Sanghvi 36427ece1f3SKumar Sanghvi for (i = 0; i < ARRAY_SIZE(pedits); i++) { 36527ece1f3SKumar Sanghvi if (pedits[i].field == field) { 36627ece1f3SKumar Sanghvi offset = pedits[i].offset; 36727ece1f3SKumar Sanghvi size = pedits[i].size; 36827ece1f3SKumar Sanghvi break; 36927ece1f3SKumar Sanghvi } 37027ece1f3SKumar Sanghvi } 37127ece1f3SKumar Sanghvi memcpy((u8 *)fs + offset, &set_val, size); 37227ece1f3SKumar Sanghvi } 37327ece1f3SKumar Sanghvi 37427ece1f3SKumar Sanghvi static void process_pedit_field(struct ch_filter_specification *fs, u32 val, 375*2ef813b8SHerat Ramani u32 mask, u32 offset, u8 htype, 376*2ef813b8SHerat Ramani u8 *natmode_flags) 37727ece1f3SKumar Sanghvi { 37827ece1f3SKumar Sanghvi switch (htype) { 37973867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_ETH: 38027ece1f3SKumar Sanghvi switch (offset) { 38127ece1f3SKumar Sanghvi case PEDIT_ETH_DMAC_31_0: 38227ece1f3SKumar Sanghvi fs->newdmac = 1; 38327ece1f3SKumar Sanghvi offload_pedit(fs, val, mask, ETH_DMAC_31_0); 38427ece1f3SKumar Sanghvi break; 38527ece1f3SKumar Sanghvi case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 38627ece1f3SKumar Sanghvi if (~mask & PEDIT_ETH_DMAC_MASK) 38727ece1f3SKumar Sanghvi offload_pedit(fs, val, mask, ETH_DMAC_47_32); 388202187c3SKumar Sanghvi else 389202187c3SKumar Sanghvi offload_pedit(fs, val >> 16, mask >> 16, 390202187c3SKumar Sanghvi ETH_SMAC_15_0); 391202187c3SKumar Sanghvi break; 392202187c3SKumar Sanghvi case PEDIT_ETH_SMAC_47_16: 393202187c3SKumar Sanghvi fs->newsmac = 1; 394202187c3SKumar Sanghvi offload_pedit(fs, val, mask, ETH_SMAC_47_16); 39527ece1f3SKumar Sanghvi } 396557ccbf9SKumar Sanghvi break; 39773867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_IP4: 398557ccbf9SKumar Sanghvi switch (offset) { 399557ccbf9SKumar Sanghvi case PEDIT_IP4_SRC: 400557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP4_SRC); 401*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 402557ccbf9SKumar Sanghvi break; 403557ccbf9SKumar Sanghvi case PEDIT_IP4_DST: 404557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP4_DST); 405*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 406557ccbf9SKumar Sanghvi } 407557ccbf9SKumar Sanghvi break; 40873867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_IP6: 409557ccbf9SKumar Sanghvi switch (offset) { 410557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_31_0: 411557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_SRC_31_0); 412*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 413557ccbf9SKumar Sanghvi break; 414557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_63_32: 415557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_SRC_63_32); 416*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 417557ccbf9SKumar Sanghvi break; 418557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_95_64: 419557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_SRC_95_64); 420*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 421557ccbf9SKumar Sanghvi break; 422557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_127_96: 423557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_SRC_127_96); 424*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 425557ccbf9SKumar Sanghvi break; 426557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_31_0: 427557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_DST_31_0); 428*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 429557ccbf9SKumar Sanghvi break; 430557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_63_32: 431557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_DST_63_32); 432*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 433557ccbf9SKumar Sanghvi break; 434557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_95_64: 435557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_DST_95_64); 436*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 437557ccbf9SKumar Sanghvi break; 438557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_127_96: 439557ccbf9SKumar Sanghvi offload_pedit(fs, val, mask, IP6_DST_127_96); 440*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 441557ccbf9SKumar Sanghvi } 442557ccbf9SKumar Sanghvi break; 44373867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_TCP: 444557ccbf9SKumar Sanghvi switch (offset) { 445557ccbf9SKumar Sanghvi case PEDIT_TCP_SPORT_DPORT: 446*2ef813b8SHerat Ramani if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { 44763b53b0bSRahul Lakkireddy fs->nat_fport = val; 448*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; 449*2ef813b8SHerat Ramani } else { 45063b53b0bSRahul Lakkireddy fs->nat_lport = val >> 16; 451*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; 452557ccbf9SKumar Sanghvi } 453*2ef813b8SHerat Ramani } 454557ccbf9SKumar Sanghvi break; 45573867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_UDP: 456557ccbf9SKumar Sanghvi switch (offset) { 457557ccbf9SKumar Sanghvi case PEDIT_UDP_SPORT_DPORT: 458*2ef813b8SHerat Ramani if (~mask & PEDIT_TCP_UDP_SPORT_MASK) { 45963b53b0bSRahul Lakkireddy fs->nat_fport = val; 460*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; 461*2ef813b8SHerat Ramani } else { 46263b53b0bSRahul Lakkireddy fs->nat_lport = val >> 16; 463*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; 464557ccbf9SKumar Sanghvi } 46527ece1f3SKumar Sanghvi } 466*2ef813b8SHerat Ramani break; 467*2ef813b8SHerat Ramani } 468*2ef813b8SHerat Ramani } 469*2ef813b8SHerat Ramani 470*2ef813b8SHerat Ramani static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags, 471*2ef813b8SHerat Ramani struct netlink_ext_ack *extack) 472*2ef813b8SHerat Ramani { 473*2ef813b8SHerat Ramani u8 i = 0; 474*2ef813b8SHerat Ramani 475*2ef813b8SHerat Ramani /* Extract the NAT mode to enable based on what 4-tuple fields 476*2ef813b8SHerat Ramani * are enabled to be overwritten. This ensures that the 477*2ef813b8SHerat Ramani * disabled fields don't get overwritten to 0. 478*2ef813b8SHerat Ramani */ 479*2ef813b8SHerat Ramani for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) { 480*2ef813b8SHerat Ramani const struct cxgb4_natmode_config *c; 481*2ef813b8SHerat Ramani 482*2ef813b8SHerat Ramani c = &cxgb4_natmode_config_array[i]; 483*2ef813b8SHerat Ramani if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip && 484*2ef813b8SHerat Ramani natmode_flags == c->flags) 485*2ef813b8SHerat Ramani return 0; 486*2ef813b8SHerat Ramani } 487*2ef813b8SHerat Ramani NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination"); 488*2ef813b8SHerat Ramani return -EOPNOTSUPP; 48927ece1f3SKumar Sanghvi } 49027ece1f3SKumar Sanghvi 49121c4c60bSRahul Lakkireddy void cxgb4_process_flow_actions(struct net_device *in, 49221c4c60bSRahul Lakkireddy struct flow_action *actions, 49362488e4bSKumar Sanghvi struct ch_filter_specification *fs) 49462488e4bSKumar Sanghvi { 49573867881SPablo Neira Ayuso struct flow_action_entry *act; 496*2ef813b8SHerat Ramani u8 natmode_flags = 0; 497244cd96aSCong Wang int i; 49862488e4bSKumar Sanghvi 49921c4c60bSRahul Lakkireddy flow_action_for_each(i, act, actions) { 50073867881SPablo Neira Ayuso switch (act->id) { 50173867881SPablo Neira Ayuso case FLOW_ACTION_ACCEPT: 502c39bff47SKumar Sanghvi fs->action = FILTER_PASS; 50373867881SPablo Neira Ayuso break; 50473867881SPablo Neira Ayuso case FLOW_ACTION_DROP: 50562488e4bSKumar Sanghvi fs->action = FILTER_DROP; 50673867881SPablo Neira Ayuso break; 507fd2261d8SRahul Lakkireddy case FLOW_ACTION_MIRRED: 50873867881SPablo Neira Ayuso case FLOW_ACTION_REDIRECT: { 50973867881SPablo Neira Ayuso struct net_device *out = act->dev; 51062488e4bSKumar Sanghvi struct port_info *pi = netdev_priv(out); 51162488e4bSKumar Sanghvi 51262488e4bSKumar Sanghvi fs->action = FILTER_SWITCH; 51362488e4bSKumar Sanghvi fs->eport = pi->port_id; 51473867881SPablo Neira Ayuso } 51573867881SPablo Neira Ayuso break; 51673867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_POP: 51773867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_PUSH: 51873867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_MANGLE: { 51973867881SPablo Neira Ayuso u8 prio = act->vlan.prio; 52073867881SPablo Neira Ayuso u16 vid = act->vlan.vid; 521cf2885a7SKumar Sanghvi u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid; 52273867881SPablo Neira Ayuso switch (act->id) { 52373867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_POP: 524cf2885a7SKumar Sanghvi fs->newvlan |= VLAN_REMOVE; 525cf2885a7SKumar Sanghvi break; 52673867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_PUSH: 527cf2885a7SKumar Sanghvi fs->newvlan |= VLAN_INSERT; 528cf2885a7SKumar Sanghvi fs->vlan = vlan_tci; 529cf2885a7SKumar Sanghvi break; 53073867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_MANGLE: 531cf2885a7SKumar Sanghvi fs->newvlan |= VLAN_REWRITE; 532cf2885a7SKumar Sanghvi fs->vlan = vlan_tci; 533cf2885a7SKumar Sanghvi break; 534cf2885a7SKumar Sanghvi default: 535cf2885a7SKumar Sanghvi break; 536cf2885a7SKumar Sanghvi } 53773867881SPablo Neira Ayuso } 53873867881SPablo Neira Ayuso break; 53973867881SPablo Neira Ayuso case FLOW_ACTION_MANGLE: { 54027ece1f3SKumar Sanghvi u32 mask, val, offset; 54127ece1f3SKumar Sanghvi u8 htype; 54227ece1f3SKumar Sanghvi 54373867881SPablo Neira Ayuso htype = act->mangle.htype; 54473867881SPablo Neira Ayuso mask = act->mangle.mask; 54573867881SPablo Neira Ayuso val = act->mangle.val; 54673867881SPablo Neira Ayuso offset = act->mangle.offset; 54727ece1f3SKumar Sanghvi 548*2ef813b8SHerat Ramani process_pedit_field(fs, val, mask, offset, htype, 549*2ef813b8SHerat Ramani &natmode_flags); 55062488e4bSKumar Sanghvi } 55173867881SPablo Neira Ayuso break; 5524dababa2SVishal Kulkarni case FLOW_ACTION_QUEUE: 5534dababa2SVishal Kulkarni fs->action = FILTER_PASS; 5544dababa2SVishal Kulkarni fs->dirsteer = 1; 5554dababa2SVishal Kulkarni fs->iq = act->queue.index; 5564dababa2SVishal Kulkarni break; 55773867881SPablo Neira Ayuso default: 55873867881SPablo Neira Ayuso break; 55962488e4bSKumar Sanghvi } 56062488e4bSKumar Sanghvi } 561*2ef813b8SHerat Ramani if (natmode_flags) 562*2ef813b8SHerat Ramani cxgb4_action_natmode_tweak(fs, natmode_flags); 563*2ef813b8SHerat Ramani 56462488e4bSKumar Sanghvi } 56562488e4bSKumar Sanghvi 566557ccbf9SKumar Sanghvi static bool valid_l4_mask(u32 mask) 567557ccbf9SKumar Sanghvi { 568557ccbf9SKumar Sanghvi u16 hi, lo; 569557ccbf9SKumar Sanghvi 570557ccbf9SKumar Sanghvi /* Either the upper 16-bits (SPORT) OR the lower 571557ccbf9SKumar Sanghvi * 16-bits (DPORT) can be set, but NOT BOTH. 572557ccbf9SKumar Sanghvi */ 573557ccbf9SKumar Sanghvi hi = (mask >> 16) & 0xFFFF; 574557ccbf9SKumar Sanghvi lo = mask & 0xFFFF; 575557ccbf9SKumar Sanghvi 576557ccbf9SKumar Sanghvi return hi && lo ? false : true; 577557ccbf9SKumar Sanghvi } 578557ccbf9SKumar Sanghvi 579557ccbf9SKumar Sanghvi static bool valid_pedit_action(struct net_device *dev, 580*2ef813b8SHerat Ramani const struct flow_action_entry *act, 581*2ef813b8SHerat Ramani u8 *natmode_flags) 582557ccbf9SKumar Sanghvi { 583557ccbf9SKumar Sanghvi u32 mask, offset; 58473867881SPablo Neira Ayuso u8 htype; 585557ccbf9SKumar Sanghvi 58673867881SPablo Neira Ayuso htype = act->mangle.htype; 58773867881SPablo Neira Ayuso mask = act->mangle.mask; 58873867881SPablo Neira Ayuso offset = act->mangle.offset; 589557ccbf9SKumar Sanghvi 590557ccbf9SKumar Sanghvi switch (htype) { 59173867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_ETH: 592557ccbf9SKumar Sanghvi switch (offset) { 593557ccbf9SKumar Sanghvi case PEDIT_ETH_DMAC_31_0: 594557ccbf9SKumar Sanghvi case PEDIT_ETH_DMAC_47_32_SMAC_15_0: 595557ccbf9SKumar Sanghvi case PEDIT_ETH_SMAC_47_16: 596557ccbf9SKumar Sanghvi break; 597557ccbf9SKumar Sanghvi default: 598557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported pedit field\n", 599557ccbf9SKumar Sanghvi __func__); 600557ccbf9SKumar Sanghvi return false; 601557ccbf9SKumar Sanghvi } 602557ccbf9SKumar Sanghvi break; 60373867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_IP4: 604557ccbf9SKumar Sanghvi switch (offset) { 605557ccbf9SKumar Sanghvi case PEDIT_IP4_SRC: 606*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 607*2ef813b8SHerat Ramani break; 608557ccbf9SKumar Sanghvi case PEDIT_IP4_DST: 609*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 610557ccbf9SKumar Sanghvi break; 611557ccbf9SKumar Sanghvi default: 612557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported pedit field\n", 613557ccbf9SKumar Sanghvi __func__); 614557ccbf9SKumar Sanghvi return false; 615557ccbf9SKumar Sanghvi } 616557ccbf9SKumar Sanghvi break; 61773867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_IP6: 618557ccbf9SKumar Sanghvi switch (offset) { 619557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_31_0: 620557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_63_32: 621557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_95_64: 622557ccbf9SKumar Sanghvi case PEDIT_IP6_SRC_127_96: 623*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SIP; 624*2ef813b8SHerat Ramani break; 625557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_31_0: 626557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_63_32: 627557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_95_64: 628557ccbf9SKumar Sanghvi case PEDIT_IP6_DST_127_96: 629*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DIP; 630557ccbf9SKumar Sanghvi break; 631557ccbf9SKumar Sanghvi default: 632557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported pedit field\n", 633557ccbf9SKumar Sanghvi __func__); 634557ccbf9SKumar Sanghvi return false; 635557ccbf9SKumar Sanghvi } 636557ccbf9SKumar Sanghvi break; 63773867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_TCP: 638557ccbf9SKumar Sanghvi switch (offset) { 639557ccbf9SKumar Sanghvi case PEDIT_TCP_SPORT_DPORT: 640557ccbf9SKumar Sanghvi if (!valid_l4_mask(~mask)) { 641557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n", 642557ccbf9SKumar Sanghvi __func__); 643557ccbf9SKumar Sanghvi return false; 644557ccbf9SKumar Sanghvi } 645*2ef813b8SHerat Ramani if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 646*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; 647*2ef813b8SHerat Ramani else 648*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; 649557ccbf9SKumar Sanghvi break; 650557ccbf9SKumar Sanghvi default: 651557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported pedit field\n", 652557ccbf9SKumar Sanghvi __func__); 653557ccbf9SKumar Sanghvi return false; 654557ccbf9SKumar Sanghvi } 655557ccbf9SKumar Sanghvi break; 65673867881SPablo Neira Ayuso case FLOW_ACT_MANGLE_HDR_TYPE_UDP: 657557ccbf9SKumar Sanghvi switch (offset) { 658557ccbf9SKumar Sanghvi case PEDIT_UDP_SPORT_DPORT: 659557ccbf9SKumar Sanghvi if (!valid_l4_mask(~mask)) { 660557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n", 661557ccbf9SKumar Sanghvi __func__); 662557ccbf9SKumar Sanghvi return false; 663557ccbf9SKumar Sanghvi } 664*2ef813b8SHerat Ramani if (~mask & PEDIT_TCP_UDP_SPORT_MASK) 665*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT; 666*2ef813b8SHerat Ramani else 667*2ef813b8SHerat Ramani *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT; 668557ccbf9SKumar Sanghvi break; 669557ccbf9SKumar Sanghvi default: 670557ccbf9SKumar Sanghvi netdev_err(dev, "%s: Unsupported pedit field\n", 671557ccbf9SKumar Sanghvi __func__); 672557ccbf9SKumar Sanghvi return false; 673557ccbf9SKumar Sanghvi } 674557ccbf9SKumar Sanghvi break; 675557ccbf9SKumar Sanghvi default: 67673867881SPablo Neira Ayuso netdev_err(dev, "%s: Unsupported pedit type\n", __func__); 677557ccbf9SKumar Sanghvi return false; 678557ccbf9SKumar Sanghvi } 679557ccbf9SKumar Sanghvi return true; 680557ccbf9SKumar Sanghvi } 68162488e4bSKumar Sanghvi 68221c4c60bSRahul Lakkireddy int cxgb4_validate_flow_actions(struct net_device *dev, 683319a1d19SJiri Pirko struct flow_action *actions, 684fd2261d8SRahul Lakkireddy struct netlink_ext_ack *extack, 685fd2261d8SRahul Lakkireddy u8 matchall_filter) 68662488e4bSKumar Sanghvi { 687*2ef813b8SHerat Ramani struct adapter *adap = netdev2adap(dev); 68873867881SPablo Neira Ayuso struct flow_action_entry *act; 68927ece1f3SKumar Sanghvi bool act_redir = false; 69027ece1f3SKumar Sanghvi bool act_pedit = false; 69127ece1f3SKumar Sanghvi bool act_vlan = false; 692*2ef813b8SHerat Ramani u8 natmode_flags = 0; 693244cd96aSCong Wang int i; 69462488e4bSKumar Sanghvi 69553eca1f3SJakub Kicinski if (!flow_action_basic_hw_stats_check(actions, extack)) 696319a1d19SJiri Pirko return -EOPNOTSUPP; 697319a1d19SJiri Pirko 69821c4c60bSRahul Lakkireddy flow_action_for_each(i, act, actions) { 69973867881SPablo Neira Ayuso switch (act->id) { 70073867881SPablo Neira Ayuso case FLOW_ACTION_ACCEPT: 70173867881SPablo Neira Ayuso case FLOW_ACTION_DROP: 702c39bff47SKumar Sanghvi /* Do nothing */ 70373867881SPablo Neira Ayuso break; 704fd2261d8SRahul Lakkireddy case FLOW_ACTION_MIRRED: 70573867881SPablo Neira Ayuso case FLOW_ACTION_REDIRECT: { 7069f8a739eSCong Wang struct net_device *n_dev, *target_dev; 70762488e4bSKumar Sanghvi bool found = false; 708fd2261d8SRahul Lakkireddy unsigned int i; 709fd2261d8SRahul Lakkireddy 710fd2261d8SRahul Lakkireddy if (act->id == FLOW_ACTION_MIRRED && 711fd2261d8SRahul Lakkireddy !matchall_filter) { 712fd2261d8SRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack, 713fd2261d8SRahul Lakkireddy "Egress mirror action is only supported for tc-matchall"); 714fd2261d8SRahul Lakkireddy return -EOPNOTSUPP; 715fd2261d8SRahul Lakkireddy } 71662488e4bSKumar Sanghvi 71773867881SPablo Neira Ayuso target_dev = act->dev; 71862488e4bSKumar Sanghvi for_each_port(adap, i) { 71962488e4bSKumar Sanghvi n_dev = adap->port[i]; 7209f8a739eSCong Wang if (target_dev == n_dev) { 72162488e4bSKumar Sanghvi found = true; 72262488e4bSKumar Sanghvi break; 72362488e4bSKumar Sanghvi } 72462488e4bSKumar Sanghvi } 72562488e4bSKumar Sanghvi 72662488e4bSKumar Sanghvi /* If interface doesn't belong to our hw, then 72762488e4bSKumar Sanghvi * the provided output port is not valid 72862488e4bSKumar Sanghvi */ 72962488e4bSKumar Sanghvi if (!found) { 73062488e4bSKumar Sanghvi netdev_err(dev, "%s: Out port invalid\n", 73162488e4bSKumar Sanghvi __func__); 73262488e4bSKumar Sanghvi return -EINVAL; 73362488e4bSKumar Sanghvi } 73427ece1f3SKumar Sanghvi act_redir = true; 73573867881SPablo Neira Ayuso } 736cf2885a7SKumar Sanghvi break; 73773867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_POP: 73873867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_PUSH: 73973867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_MANGLE: { 74073867881SPablo Neira Ayuso u16 proto = be16_to_cpu(act->vlan.proto); 74173867881SPablo Neira Ayuso 74273867881SPablo Neira Ayuso switch (act->id) { 74373867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_POP: 74473867881SPablo Neira Ayuso break; 74573867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_PUSH: 74673867881SPablo Neira Ayuso case FLOW_ACTION_VLAN_MANGLE: 747cf2885a7SKumar Sanghvi if (proto != ETH_P_8021Q) { 748cf2885a7SKumar Sanghvi netdev_err(dev, "%s: Unsupported vlan proto\n", 749cf2885a7SKumar Sanghvi __func__); 750cf2885a7SKumar Sanghvi return -EOPNOTSUPP; 751cf2885a7SKumar Sanghvi } 752cf2885a7SKumar Sanghvi break; 753cf2885a7SKumar Sanghvi default: 754cf2885a7SKumar Sanghvi netdev_err(dev, "%s: Unsupported vlan action\n", 755cf2885a7SKumar Sanghvi __func__); 756cf2885a7SKumar Sanghvi return -EOPNOTSUPP; 757cf2885a7SKumar Sanghvi } 75827ece1f3SKumar Sanghvi act_vlan = true; 75973867881SPablo Neira Ayuso } 76073867881SPablo Neira Ayuso break; 76173867881SPablo Neira Ayuso case FLOW_ACTION_MANGLE: { 762*2ef813b8SHerat Ramani bool pedit_valid = valid_pedit_action(dev, act, 763*2ef813b8SHerat Ramani &natmode_flags); 76427ece1f3SKumar Sanghvi 765557ccbf9SKumar Sanghvi if (!pedit_valid) 76627ece1f3SKumar Sanghvi return -EOPNOTSUPP; 76727ece1f3SKumar Sanghvi act_pedit = true; 76873867881SPablo Neira Ayuso } 76973867881SPablo Neira Ayuso break; 7704dababa2SVishal Kulkarni case FLOW_ACTION_QUEUE: 7714dababa2SVishal Kulkarni /* Do nothing. cxgb4_set_filter will validate */ 7724dababa2SVishal Kulkarni break; 77373867881SPablo Neira Ayuso default: 77462488e4bSKumar Sanghvi netdev_err(dev, "%s: Unsupported action\n", __func__); 77562488e4bSKumar Sanghvi return -EOPNOTSUPP; 77662488e4bSKumar Sanghvi } 77762488e4bSKumar Sanghvi } 77827ece1f3SKumar Sanghvi 77927ece1f3SKumar Sanghvi if ((act_pedit || act_vlan) && !act_redir) { 78027ece1f3SKumar Sanghvi netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n", 78127ece1f3SKumar Sanghvi __func__); 78227ece1f3SKumar Sanghvi return -EINVAL; 78327ece1f3SKumar Sanghvi } 78427ece1f3SKumar Sanghvi 785*2ef813b8SHerat Ramani if (act_pedit) { 786*2ef813b8SHerat Ramani int ret; 787*2ef813b8SHerat Ramani 788*2ef813b8SHerat Ramani ret = cxgb4_action_natmode_validate(adap, natmode_flags, 789*2ef813b8SHerat Ramani extack); 790*2ef813b8SHerat Ramani if (ret) 791*2ef813b8SHerat Ramani return ret; 792*2ef813b8SHerat Ramani } 793*2ef813b8SHerat Ramani 79462488e4bSKumar Sanghvi return 0; 79562488e4bSKumar Sanghvi } 79662488e4bSKumar Sanghvi 7978d174351SRahul Lakkireddy static void cxgb4_tc_flower_hash_prio_add(struct adapter *adap, u32 tc_prio) 7988d174351SRahul Lakkireddy { 7998d174351SRahul Lakkireddy spin_lock_bh(&adap->tids.ftid_lock); 8008d174351SRahul Lakkireddy if (adap->tids.tc_hash_tids_max_prio < tc_prio) 8018d174351SRahul Lakkireddy adap->tids.tc_hash_tids_max_prio = tc_prio; 8028d174351SRahul Lakkireddy spin_unlock_bh(&adap->tids.ftid_lock); 8038d174351SRahul Lakkireddy } 8048d174351SRahul Lakkireddy 8058d174351SRahul Lakkireddy static void cxgb4_tc_flower_hash_prio_del(struct adapter *adap, u32 tc_prio) 8068d174351SRahul Lakkireddy { 8078d174351SRahul Lakkireddy struct tid_info *t = &adap->tids; 8088d174351SRahul Lakkireddy struct ch_tc_flower_entry *fe; 8098d174351SRahul Lakkireddy struct rhashtable_iter iter; 8108d174351SRahul Lakkireddy u32 found = 0; 8118d174351SRahul Lakkireddy 8128d174351SRahul Lakkireddy spin_lock_bh(&t->ftid_lock); 8138d174351SRahul Lakkireddy /* Bail if the current rule is not the one with the max 8148d174351SRahul Lakkireddy * prio. 8158d174351SRahul Lakkireddy */ 8168d174351SRahul Lakkireddy if (t->tc_hash_tids_max_prio != tc_prio) 8178d174351SRahul Lakkireddy goto out_unlock; 8188d174351SRahul Lakkireddy 8198d174351SRahul Lakkireddy /* Search for the next rule having the same or next lower 8208d174351SRahul Lakkireddy * max prio. 8218d174351SRahul Lakkireddy */ 8228d174351SRahul Lakkireddy rhashtable_walk_enter(&adap->flower_tbl, &iter); 8238d174351SRahul Lakkireddy do { 8248d174351SRahul Lakkireddy rhashtable_walk_start(&iter); 8258d174351SRahul Lakkireddy 8268d174351SRahul Lakkireddy fe = rhashtable_walk_next(&iter); 8278d174351SRahul Lakkireddy while (!IS_ERR_OR_NULL(fe)) { 8288d174351SRahul Lakkireddy if (fe->fs.hash && 8298d174351SRahul Lakkireddy fe->fs.tc_prio <= t->tc_hash_tids_max_prio) { 8308d174351SRahul Lakkireddy t->tc_hash_tids_max_prio = fe->fs.tc_prio; 8318d174351SRahul Lakkireddy found++; 8328d174351SRahul Lakkireddy 8338d174351SRahul Lakkireddy /* Bail if we found another rule 8348d174351SRahul Lakkireddy * having the same prio as the 8358d174351SRahul Lakkireddy * current max one. 8368d174351SRahul Lakkireddy */ 8378d174351SRahul Lakkireddy if (fe->fs.tc_prio == tc_prio) 8388d174351SRahul Lakkireddy break; 8398d174351SRahul Lakkireddy } 8408d174351SRahul Lakkireddy 8418d174351SRahul Lakkireddy fe = rhashtable_walk_next(&iter); 8428d174351SRahul Lakkireddy } 8438d174351SRahul Lakkireddy 8448d174351SRahul Lakkireddy rhashtable_walk_stop(&iter); 8458d174351SRahul Lakkireddy } while (fe == ERR_PTR(-EAGAIN)); 8468d174351SRahul Lakkireddy rhashtable_walk_exit(&iter); 8478d174351SRahul Lakkireddy 8488d174351SRahul Lakkireddy if (!found) 8498d174351SRahul Lakkireddy t->tc_hash_tids_max_prio = 0; 8508d174351SRahul Lakkireddy 8518d174351SRahul Lakkireddy out_unlock: 8528d174351SRahul Lakkireddy spin_unlock_bh(&t->ftid_lock); 8538d174351SRahul Lakkireddy } 8548d174351SRahul Lakkireddy 855c8729cacSVishal Kulkarni int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule, 856c8729cacSVishal Kulkarni u32 tc_prio, struct netlink_ext_ack *extack, 857c8729cacSVishal Kulkarni struct ch_filter_specification *fs, u32 *tid) 8586a345b3dSKumar Sanghvi { 85962488e4bSKumar Sanghvi struct adapter *adap = netdev2adap(dev); 86062488e4bSKumar Sanghvi struct filter_ctx ctx; 8618d174351SRahul Lakkireddy u8 inet_family; 86241ec03e5SRahul Lakkireddy int fidx, ret; 86362488e4bSKumar Sanghvi 864fd2261d8SRahul Lakkireddy if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0)) 8656a345b3dSKumar Sanghvi return -EOPNOTSUPP; 86662488e4bSKumar Sanghvi 867c8729cacSVishal Kulkarni if (cxgb4_validate_flow_match(dev, rule)) 86862488e4bSKumar Sanghvi return -EOPNOTSUPP; 86962488e4bSKumar Sanghvi 870c8729cacSVishal Kulkarni cxgb4_process_flow_match(dev, rule, fs); 87121c4c60bSRahul Lakkireddy cxgb4_process_flow_actions(dev, &rule->action, fs); 87262488e4bSKumar Sanghvi 8733eb8b62dSKumar Sanghvi fs->hash = is_filter_exact_match(adap, fs); 87441ec03e5SRahul Lakkireddy inet_family = fs->type ? PF_INET6 : PF_INET; 87541ec03e5SRahul Lakkireddy 8768d174351SRahul Lakkireddy /* Get a free filter entry TID, where we can insert this new 8778d174351SRahul Lakkireddy * rule. Only insert rule if its prio doesn't conflict with 8788d174351SRahul Lakkireddy * existing rules. 87941ec03e5SRahul Lakkireddy */ 8808d174351SRahul Lakkireddy fidx = cxgb4_get_free_ftid(dev, inet_family, fs->hash, 881c8729cacSVishal Kulkarni tc_prio); 8828d174351SRahul Lakkireddy if (fidx < 0) { 88341ec03e5SRahul Lakkireddy NL_SET_ERR_MSG_MOD(extack, 88441ec03e5SRahul Lakkireddy "No free LETCAM index available"); 885c8729cacSVishal Kulkarni return -ENOMEM; 88662488e4bSKumar Sanghvi } 8878d174351SRahul Lakkireddy 8888d174351SRahul Lakkireddy if (fidx < adap->tids.nhpftids) { 8898d174351SRahul Lakkireddy fs->prio = 1; 8908d174351SRahul Lakkireddy fs->hash = 0; 8913eb8b62dSKumar Sanghvi } 89262488e4bSKumar Sanghvi 8938d174351SRahul Lakkireddy /* If the rule can be inserted into HASH region, then ignore 8948d174351SRahul Lakkireddy * the index to normal FILTER region. 8958d174351SRahul Lakkireddy */ 8968d174351SRahul Lakkireddy if (fs->hash) 8978d174351SRahul Lakkireddy fidx = 0; 8988d174351SRahul Lakkireddy 899c8729cacSVishal Kulkarni fs->tc_prio = tc_prio; 90041ec03e5SRahul Lakkireddy 90162488e4bSKumar Sanghvi init_completion(&ctx.completion); 90262488e4bSKumar Sanghvi ret = __cxgb4_set_filter(dev, fidx, fs, &ctx); 90362488e4bSKumar Sanghvi if (ret) { 90462488e4bSKumar Sanghvi netdev_err(dev, "%s: filter creation err %d\n", 90562488e4bSKumar Sanghvi __func__, ret); 906c8729cacSVishal Kulkarni return ret; 90762488e4bSKumar Sanghvi } 90862488e4bSKumar Sanghvi 90962488e4bSKumar Sanghvi /* Wait for reply */ 91062488e4bSKumar Sanghvi ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ); 911c8729cacSVishal Kulkarni if (!ret) 912c8729cacSVishal Kulkarni return -ETIMEDOUT; 913c8729cacSVishal Kulkarni 914c8729cacSVishal Kulkarni /* Check if hw returned error for filter creation */ 915c8729cacSVishal Kulkarni if (ctx.result) 916c8729cacSVishal Kulkarni return ctx.result; 917c8729cacSVishal Kulkarni 918c8729cacSVishal Kulkarni *tid = ctx.tid; 919c8729cacSVishal Kulkarni 920c8729cacSVishal Kulkarni if (fs->hash) 921c8729cacSVishal Kulkarni cxgb4_tc_flower_hash_prio_add(adap, tc_prio); 922c8729cacSVishal Kulkarni 923c8729cacSVishal Kulkarni return 0; 92462488e4bSKumar Sanghvi } 92562488e4bSKumar Sanghvi 926c8729cacSVishal Kulkarni int cxgb4_tc_flower_replace(struct net_device *dev, 927c8729cacSVishal Kulkarni struct flow_cls_offload *cls) 928c8729cacSVishal Kulkarni { 929c8729cacSVishal Kulkarni struct flow_rule *rule = flow_cls_offload_flow_rule(cls); 930c8729cacSVishal Kulkarni struct netlink_ext_ack *extack = cls->common.extack; 931c8729cacSVishal Kulkarni struct adapter *adap = netdev2adap(dev); 932c8729cacSVishal Kulkarni struct ch_tc_flower_entry *ch_flower; 933c8729cacSVishal Kulkarni struct ch_filter_specification *fs; 934c8729cacSVishal Kulkarni int ret; 935c8729cacSVishal Kulkarni 936c8729cacSVishal Kulkarni ch_flower = allocate_flower_entry(); 937c8729cacSVishal Kulkarni if (!ch_flower) { 938c8729cacSVishal Kulkarni netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__); 939c8729cacSVishal Kulkarni return -ENOMEM; 940c8729cacSVishal Kulkarni } 941c8729cacSVishal Kulkarni 942c8729cacSVishal Kulkarni fs = &ch_flower->fs; 943c8729cacSVishal Kulkarni fs->hitcnts = 1; 944c8729cacSVishal Kulkarni fs->tc_cookie = cls->cookie; 945c8729cacSVishal Kulkarni 946c8729cacSVishal Kulkarni ret = cxgb4_flow_rule_replace(dev, rule, cls->common.prio, extack, fs, 947c8729cacSVishal Kulkarni &ch_flower->filter_id); 948ed514fc5SVishal Kulkarni if (ret) 94962488e4bSKumar Sanghvi goto free_entry; 95062488e4bSKumar Sanghvi 95162488e4bSKumar Sanghvi ch_flower->tc_flower_cookie = cls->cookie; 95279e6d46aSKumar Sanghvi ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node, 95379e6d46aSKumar Sanghvi adap->flower_ht_params); 95479e6d46aSKumar Sanghvi if (ret) 95579e6d46aSKumar Sanghvi goto del_filter; 95662488e4bSKumar Sanghvi 95779e6d46aSKumar Sanghvi return 0; 95879e6d46aSKumar Sanghvi 95979e6d46aSKumar Sanghvi del_filter: 960c8729cacSVishal Kulkarni if (fs->hash) 961c8729cacSVishal Kulkarni cxgb4_tc_flower_hash_prio_del(adap, cls->common.prio); 962c8729cacSVishal Kulkarni 96379e6d46aSKumar Sanghvi cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs); 96462488e4bSKumar Sanghvi 96562488e4bSKumar Sanghvi free_entry: 96662488e4bSKumar Sanghvi kfree(ch_flower); 96762488e4bSKumar Sanghvi return ret; 9686a345b3dSKumar Sanghvi } 9696a345b3dSKumar Sanghvi 970db43b30cSVishal Kulkarni int cxgb4_flow_rule_destroy(struct net_device *dev, u32 tc_prio, 971db43b30cSVishal Kulkarni struct ch_filter_specification *fs, int tid) 972db43b30cSVishal Kulkarni { 973db43b30cSVishal Kulkarni struct adapter *adap = netdev2adap(dev); 974db43b30cSVishal Kulkarni u8 hash; 975db43b30cSVishal Kulkarni int ret; 976db43b30cSVishal Kulkarni 977db43b30cSVishal Kulkarni hash = fs->hash; 978db43b30cSVishal Kulkarni 979db43b30cSVishal Kulkarni ret = cxgb4_del_filter(dev, tid, fs); 980db43b30cSVishal Kulkarni if (ret) 981db43b30cSVishal Kulkarni return ret; 982db43b30cSVishal Kulkarni 983db43b30cSVishal Kulkarni if (hash) 984db43b30cSVishal Kulkarni cxgb4_tc_flower_hash_prio_del(adap, tc_prio); 985db43b30cSVishal Kulkarni 986db43b30cSVishal Kulkarni return ret; 987db43b30cSVishal Kulkarni } 988db43b30cSVishal Kulkarni 9896a345b3dSKumar Sanghvi int cxgb4_tc_flower_destroy(struct net_device *dev, 990f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls) 9916a345b3dSKumar Sanghvi { 99262488e4bSKumar Sanghvi struct adapter *adap = netdev2adap(dev); 99362488e4bSKumar Sanghvi struct ch_tc_flower_entry *ch_flower; 99462488e4bSKumar Sanghvi int ret; 99562488e4bSKumar Sanghvi 99662488e4bSKumar Sanghvi ch_flower = ch_flower_lookup(adap, cls->cookie); 99762488e4bSKumar Sanghvi if (!ch_flower) 99862488e4bSKumar Sanghvi return -ENOENT; 99962488e4bSKumar Sanghvi 1000a27fb314SRahul Lakkireddy rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, 1001a27fb314SRahul Lakkireddy adap->flower_ht_params); 1002a27fb314SRahul Lakkireddy 1003db43b30cSVishal Kulkarni ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio, 1004db43b30cSVishal Kulkarni &ch_flower->fs, ch_flower->filter_id); 100562488e4bSKumar Sanghvi if (ret) 1006a27fb314SRahul Lakkireddy netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d", 1007a27fb314SRahul Lakkireddy ch_flower->filter_id, ret); 100862488e4bSKumar Sanghvi 100962488e4bSKumar Sanghvi kfree_rcu(ch_flower, rcu); 101062488e4bSKumar Sanghvi return ret; 10116a345b3dSKumar Sanghvi } 10126a345b3dSKumar Sanghvi 101379e6d46aSKumar Sanghvi static void ch_flower_stats_handler(struct work_struct *work) 1014e0f911c8SKumar Sanghvi { 101579e6d46aSKumar Sanghvi struct adapter *adap = container_of(work, struct adapter, 101679e6d46aSKumar Sanghvi flower_stats_work); 1017e0f911c8SKumar Sanghvi struct ch_tc_flower_entry *flower_entry; 1018e0f911c8SKumar Sanghvi struct ch_tc_flower_stats *ofld_stats; 101979e6d46aSKumar Sanghvi struct rhashtable_iter iter; 1020e0f911c8SKumar Sanghvi u64 packets; 1021e0f911c8SKumar Sanghvi u64 bytes; 1022e0f911c8SKumar Sanghvi int ret; 1023e0f911c8SKumar Sanghvi 102479e6d46aSKumar Sanghvi rhashtable_walk_enter(&adap->flower_tbl, &iter); 102579e6d46aSKumar Sanghvi do { 102697a6ec4aSTom Herbert rhashtable_walk_start(&iter); 102779e6d46aSKumar Sanghvi 102879e6d46aSKumar Sanghvi while ((flower_entry = rhashtable_walk_next(&iter)) && 102979e6d46aSKumar Sanghvi !IS_ERR(flower_entry)) { 1030e0f911c8SKumar Sanghvi ret = cxgb4_get_filter_counters(adap->port[0], 1031e0f911c8SKumar Sanghvi flower_entry->filter_id, 10329d922d4bSKumar Sanghvi &packets, &bytes, 10339d922d4bSKumar Sanghvi flower_entry->fs.hash); 1034e0f911c8SKumar Sanghvi if (!ret) { 1035e0f911c8SKumar Sanghvi spin_lock(&flower_entry->lock); 1036e0f911c8SKumar Sanghvi ofld_stats = &flower_entry->stats; 1037e0f911c8SKumar Sanghvi 1038e0f911c8SKumar Sanghvi if (ofld_stats->prev_packet_count != packets) { 1039e0f911c8SKumar Sanghvi ofld_stats->prev_packet_count = packets; 1040e0f911c8SKumar Sanghvi ofld_stats->last_used = jiffies; 1041e0f911c8SKumar Sanghvi } 1042e0f911c8SKumar Sanghvi spin_unlock(&flower_entry->lock); 1043e0f911c8SKumar Sanghvi } 1044e0f911c8SKumar Sanghvi } 104597a6ec4aSTom Herbert 104679e6d46aSKumar Sanghvi rhashtable_walk_stop(&iter); 104797a6ec4aSTom Herbert 104879e6d46aSKumar Sanghvi } while (flower_entry == ERR_PTR(-EAGAIN)); 104979e6d46aSKumar Sanghvi rhashtable_walk_exit(&iter); 1050e0f911c8SKumar Sanghvi mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 1051e0f911c8SKumar Sanghvi } 1052e0f911c8SKumar Sanghvi 105379e6d46aSKumar Sanghvi static void ch_flower_stats_cb(struct timer_list *t) 105479e6d46aSKumar Sanghvi { 105579e6d46aSKumar Sanghvi struct adapter *adap = from_timer(adap, t, flower_stats_timer); 105679e6d46aSKumar Sanghvi 105779e6d46aSKumar Sanghvi schedule_work(&adap->flower_stats_work); 105879e6d46aSKumar Sanghvi } 105979e6d46aSKumar Sanghvi 10606a345b3dSKumar Sanghvi int cxgb4_tc_flower_stats(struct net_device *dev, 1061f9e30088SPablo Neira Ayuso struct flow_cls_offload *cls) 10626a345b3dSKumar Sanghvi { 1063e0f911c8SKumar Sanghvi struct adapter *adap = netdev2adap(dev); 1064e0f911c8SKumar Sanghvi struct ch_tc_flower_stats *ofld_stats; 1065e0f911c8SKumar Sanghvi struct ch_tc_flower_entry *ch_flower; 1066e0f911c8SKumar Sanghvi u64 packets; 1067e0f911c8SKumar Sanghvi u64 bytes; 1068e0f911c8SKumar Sanghvi int ret; 1069e0f911c8SKumar Sanghvi 1070e0f911c8SKumar Sanghvi ch_flower = ch_flower_lookup(adap, cls->cookie); 1071e0f911c8SKumar Sanghvi if (!ch_flower) { 1072e0f911c8SKumar Sanghvi ret = -ENOENT; 1073e0f911c8SKumar Sanghvi goto err; 1074e0f911c8SKumar Sanghvi } 1075e0f911c8SKumar Sanghvi 1076e0f911c8SKumar Sanghvi ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id, 10779d922d4bSKumar Sanghvi &packets, &bytes, 10789d922d4bSKumar Sanghvi ch_flower->fs.hash); 1079e0f911c8SKumar Sanghvi if (ret < 0) 1080e0f911c8SKumar Sanghvi goto err; 1081e0f911c8SKumar Sanghvi 1082e0f911c8SKumar Sanghvi spin_lock_bh(&ch_flower->lock); 1083e0f911c8SKumar Sanghvi ofld_stats = &ch_flower->stats; 1084e0f911c8SKumar Sanghvi if (ofld_stats->packet_count != packets) { 1085e0f911c8SKumar Sanghvi if (ofld_stats->prev_packet_count != packets) 1086e0f911c8SKumar Sanghvi ofld_stats->last_used = jiffies; 10873b1903efSPablo Neira Ayuso flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, 10884b61d3e8SPo Liu packets - ofld_stats->packet_count, 0, 108993a129ebSJiri Pirko ofld_stats->last_used, 109093a129ebSJiri Pirko FLOW_ACTION_HW_STATS_IMMEDIATE); 1091e0f911c8SKumar Sanghvi 1092e0f911c8SKumar Sanghvi ofld_stats->packet_count = packets; 1093e0f911c8SKumar Sanghvi ofld_stats->byte_count = bytes; 1094e0f911c8SKumar Sanghvi ofld_stats->prev_packet_count = packets; 1095e0f911c8SKumar Sanghvi } 1096e0f911c8SKumar Sanghvi spin_unlock_bh(&ch_flower->lock); 1097e0f911c8SKumar Sanghvi return 0; 1098e0f911c8SKumar Sanghvi 1099e0f911c8SKumar Sanghvi err: 1100e0f911c8SKumar Sanghvi return ret; 11016a345b3dSKumar Sanghvi } 110262488e4bSKumar Sanghvi 110379e6d46aSKumar Sanghvi static const struct rhashtable_params cxgb4_tc_flower_ht_params = { 110479e6d46aSKumar Sanghvi .nelem_hint = 384, 110579e6d46aSKumar Sanghvi .head_offset = offsetof(struct ch_tc_flower_entry, node), 110679e6d46aSKumar Sanghvi .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie), 110779e6d46aSKumar Sanghvi .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie), 110879e6d46aSKumar Sanghvi .max_size = 524288, 110979e6d46aSKumar Sanghvi .min_size = 512, 111079e6d46aSKumar Sanghvi .automatic_shrinking = true 111179e6d46aSKumar Sanghvi }; 111279e6d46aSKumar Sanghvi 111379e6d46aSKumar Sanghvi int cxgb4_init_tc_flower(struct adapter *adap) 111462488e4bSKumar Sanghvi { 111579e6d46aSKumar Sanghvi int ret; 111679e6d46aSKumar Sanghvi 1117a081e115SCasey Leedom if (adap->tc_flower_initialized) 1118a081e115SCasey Leedom return -EEXIST; 1119a081e115SCasey Leedom 112079e6d46aSKumar Sanghvi adap->flower_ht_params = cxgb4_tc_flower_ht_params; 112179e6d46aSKumar Sanghvi ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params); 112279e6d46aSKumar Sanghvi if (ret) 112379e6d46aSKumar Sanghvi return ret; 112479e6d46aSKumar Sanghvi 112579e6d46aSKumar Sanghvi INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler); 11260e23daebSKees Cook timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0); 1127e0f911c8SKumar Sanghvi mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD); 1128a081e115SCasey Leedom adap->tc_flower_initialized = true; 112979e6d46aSKumar Sanghvi return 0; 1130e0f911c8SKumar Sanghvi } 1131e0f911c8SKumar Sanghvi 1132e0f911c8SKumar Sanghvi void cxgb4_cleanup_tc_flower(struct adapter *adap) 1133e0f911c8SKumar Sanghvi { 1134a081e115SCasey Leedom if (!adap->tc_flower_initialized) 1135a081e115SCasey Leedom return; 1136a081e115SCasey Leedom 1137e0f911c8SKumar Sanghvi if (adap->flower_stats_timer.function) 1138e0f911c8SKumar Sanghvi del_timer_sync(&adap->flower_stats_timer); 113979e6d46aSKumar Sanghvi cancel_work_sync(&adap->flower_stats_work); 114079e6d46aSKumar Sanghvi rhashtable_destroy(&adap->flower_tbl); 1141a081e115SCasey Leedom adap->tc_flower_initialized = false; 114262488e4bSKumar Sanghvi } 1143