Lines Matching +full:mac +full:- +full:address

1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
23 * struct nfp_tun_pre_run_rule - rule matched before decap
25 * @port_idx: index of destination MAC address for the rule
26 * @vlan_tci: VLAN info associated with MAC
37 * struct nfp_tun_active_tuns - periodic message of active tunnels
41 * @tun_info.ipv4: dest IPv4 address of active route
58 * struct nfp_tun_active_tuns_v6 - periodic message of active IPv6 tunnels
62 * @tun_info.ipv6: dest IPv6 address of active route
79 * struct nfp_tun_neigh - neighbour/route entry on the NFP
80 * @dst_ipv4: destination IPv4 address
81 * @src_ipv4: source IPv4 address
82 * @dst_addr: destination MAC address
83 * @src_addr: source MAC address
84 * @port_id: NFP port to output packet on - associated with source IPv4
95 * struct nfp_tun_neigh_v6 - neighbour/route entry on the NFP
96 * @dst_ipv6: destination IPv6 address
97 * @src_ipv6: source IPv6 address
98 * @dst_addr: destination MAC address
99 * @src_addr: source MAC address
100 * @port_id: NFP port to output packet on - associated with source IPv6
111 * struct nfp_tun_req_route_ipv4 - NFP requests a route/neighbour lookup
113 * @ipv4_addr: destination ipv4 address for route
123 * struct nfp_tun_req_route_ipv6 - NFP requests an IPv6 route/neighbour lookup
125 * @ipv6_addr: destination ipv6 address for route
133 * struct nfp_offloaded_route - routes that are offloaded to the NFP
135 * @ip_add: destination of route - can be IPv4 or IPv6
145 * struct nfp_tun_ipv4_addr - set the IP address list on the NFP
155 * struct nfp_ipv4_addr_entry - cached IPv4 addresses
156 * @ipv4_addr: IP address
169 * struct nfp_tun_ipv6_addr - set the IP address list on the NFP
181 * struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
182 * @flags: MAC address offload options
183 * @count: number of MAC addresses in the message (should be 1)
184 * @index: index of MAC address in the lookup table
185 * @addr: interface MAC address
203 * struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
205 * @addr: Offloaded MAC address
206 * @index: Offloaded index for given MAC address
207 * @ref_count: Number of devs using this MAC address
208 * @repr_list: List of reprs sharing this MAC address
209 * @bridge_count: Number of bridge/internal devs with MAC
237 count = be32_to_cpu(payload->count); in nfp_tunnel_keep_alive()
239 nfp_flower_cmsg_warn(app, "Tunnel keep-alive request exceeds max routes.\n"); in nfp_tunnel_keep_alive()
245 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); in nfp_tunnel_keep_alive()
251 ipv4_addr = payload->tun_info[i].ipv4; in nfp_tunnel_keep_alive()
252 port = be32_to_cpu(payload->tun_info[i].egress_port); in nfp_tunnel_keep_alive()
279 count = be32_to_cpu(payload->count); in nfp_tunnel_keep_alive_v6()
281 nfp_flower_cmsg_warn(app, "IPv6 tunnel keep-alive request exceeds max routes.\n"); in nfp_tunnel_keep_alive_v6()
287 nfp_flower_cmsg_warn(app, "Corruption in tunnel keep-alive message.\n"); in nfp_tunnel_keep_alive_v6()
293 ipv6_add = &payload->tun_info[i].ipv6; in nfp_tunnel_keep_alive_v6()
294 port = be32_to_cpu(payload->tun_info[i].egress_port); in nfp_tunnel_keep_alive_v6()
320 return -ENOMEM; in nfp_flower_xmit_tun_conf()
325 nfp_ctrl_tx(app->ctrl, skb); in nfp_flower_xmit_tun_conf()
337 if (!memcmp(entry->ip_add, add, add_len)) { in __nfp_tun_has_route()
353 if (!memcmp(entry->ip_add, add, add_len)) { in __nfp_tun_add_route_to_cache()
361 return -ENOMEM; in __nfp_tun_add_route_to_cache()
364 memcpy(entry->ip_add, add, add_len); in __nfp_tun_add_route_to_cache()
365 list_add_tail(&entry->list, route_list); in __nfp_tun_add_route_to_cache()
379 if (!memcmp(entry->ip_add, add, add_len)) { in __nfp_tun_del_route_from_cache()
380 list_del(&entry->list); in __nfp_tun_del_route_from_cache()
389 struct nfp_flower_priv *priv = app->priv; in nfp_tun_has_route_v4()
391 return __nfp_tun_has_route(&priv->tun.neigh_off_list_v4, in nfp_tun_has_route_v4()
392 &priv->tun.neigh_off_lock_v4, ipv4_addr, in nfp_tun_has_route_v4()
399 struct nfp_flower_priv *priv = app->priv; in nfp_tun_has_route_v6()
401 return __nfp_tun_has_route(&priv->tun.neigh_off_list_v6, in nfp_tun_has_route_v6()
402 &priv->tun.neigh_off_lock_v6, ipv6_addr, in nfp_tun_has_route_v6()
409 struct nfp_flower_priv *priv = app->priv; in nfp_tun_add_route_to_cache_v4()
411 __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v4, in nfp_tun_add_route_to_cache_v4()
412 &priv->tun.neigh_off_lock_v4, ipv4_addr, in nfp_tun_add_route_to_cache_v4()
419 struct nfp_flower_priv *priv = app->priv; in nfp_tun_add_route_to_cache_v6()
421 __nfp_tun_add_route_to_cache(&priv->tun.neigh_off_list_v6, in nfp_tun_add_route_to_cache_v6()
422 &priv->tun.neigh_off_lock_v6, ipv6_addr, in nfp_tun_add_route_to_cache_v6()
429 struct nfp_flower_priv *priv = app->priv; in nfp_tun_del_route_from_cache_v4()
431 __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v4, in nfp_tun_del_route_from_cache_v4()
432 &priv->tun.neigh_off_lock_v4, ipv4_addr, in nfp_tun_del_route_from_cache_v4()
439 struct nfp_flower_priv *priv = app->priv; in nfp_tun_del_route_from_cache_v6()
441 __nfp_tun_del_route_from_cache(&priv->tun.neigh_off_list_v6, in nfp_tun_del_route_from_cache_v6()
442 &priv->tun.neigh_off_lock_v6, ipv6_addr, in nfp_tun_del_route_from_cache_v6()
458 payload.dst_ipv4 = flow->daddr; in nfp_tun_write_neigh_v4()
461 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { in nfp_tun_write_neigh_v4()
469 payload.src_ipv4 = flow->saddr; in nfp_tun_write_neigh_v4()
470 ether_addr_copy(payload.src_addr, netdev->dev_addr); in nfp_tun_write_neigh_v4()
494 payload.dst_ipv6 = flow->daddr; in nfp_tun_write_neigh_v6()
497 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) { in nfp_tun_write_neigh_v6()
505 payload.src_ipv6 = flow->saddr; in nfp_tun_write_neigh_v6()
506 ether_addr_copy(payload.src_addr, netdev->dev_addr); in nfp_tun_write_neigh_v6()
535 n = redir->neigh; in nfp_tun_neigh_event_handler()
544 if (n->tbl->family == AF_INET6) in nfp_tun_neigh_event_handler()
548 flow6.daddr = *(struct in6_addr *)n->primary_key; in nfp_tun_neigh_event_handler()
550 flow4.daddr = *(__be32 *)n->primary_key; in nfp_tun_neigh_event_handler()
553 app = app_priv->app; in nfp_tun_neigh_event_handler()
555 if (!nfp_netdev_is_nfp_repr(n->dev) && in nfp_tun_neigh_event_handler()
556 !nfp_flower_internal_port_can_offload(app, n->dev)) in nfp_tun_neigh_event_handler()
569 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(n->dev), NULL, in nfp_tun_neigh_event_handler()
576 nfp_tun_write_neigh_v6(n->dev, app, &flow6, n, GFP_ATOMIC); in nfp_tun_neigh_event_handler()
582 rt = ip_route_output_key(dev_net(n->dev), &flow4); in nfp_tun_neigh_event_handler()
590 nfp_tun_write_neigh_v4(n->dev, app, &flow4, n, GFP_ATOMIC); in nfp_tun_neigh_event_handler()
611 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); in nfp_tunnel_request_route_v4()
615 flow.daddr = payload->ipv4_addr; in nfp_tunnel_request_route_v4()
629 n = dst_neigh_lookup(&rt->dst, &flow.daddr); in nfp_tunnel_request_route_v4()
633 nfp_tun_write_neigh_v4(n->dev, app, &flow, n, GFP_ATOMIC); in nfp_tunnel_request_route_v4()
654 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); in nfp_tunnel_request_route_v6()
658 flow.daddr = payload->ipv6_addr; in nfp_tunnel_request_route_v6()
662 dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(netdev), NULL, &flow, in nfp_tunnel_request_route_v6()
675 nfp_tun_write_neigh_v6(n->dev, app, &flow, n, GFP_ATOMIC); in nfp_tunnel_request_route_v6()
687 struct nfp_flower_priv *priv = app->priv; in nfp_tun_write_ipv4_list()
694 mutex_lock(&priv->tun.ipv4_off_lock); in nfp_tun_write_ipv4_list()
696 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { in nfp_tun_write_ipv4_list()
698 mutex_unlock(&priv->tun.ipv4_off_lock); in nfp_tun_write_ipv4_list()
703 payload.ipv4_addr[count++] = entry->ipv4_addr; in nfp_tun_write_ipv4_list()
706 mutex_unlock(&priv->tun.ipv4_off_lock); in nfp_tun_write_ipv4_list()
715 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_add_ipv4_off()
719 mutex_lock(&priv->tun.ipv4_off_lock); in nfp_tunnel_add_ipv4_off()
720 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { in nfp_tunnel_add_ipv4_off()
722 if (entry->ipv4_addr == ipv4) { in nfp_tunnel_add_ipv4_off()
723 entry->ref_count++; in nfp_tunnel_add_ipv4_off()
724 mutex_unlock(&priv->tun.ipv4_off_lock); in nfp_tunnel_add_ipv4_off()
731 mutex_unlock(&priv->tun.ipv4_off_lock); in nfp_tunnel_add_ipv4_off()
732 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); in nfp_tunnel_add_ipv4_off()
735 entry->ipv4_addr = ipv4; in nfp_tunnel_add_ipv4_off()
736 entry->ref_count = 1; in nfp_tunnel_add_ipv4_off()
737 list_add_tail(&entry->list, &priv->tun.ipv4_off_list); in nfp_tunnel_add_ipv4_off()
738 mutex_unlock(&priv->tun.ipv4_off_lock); in nfp_tunnel_add_ipv4_off()
745 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_del_ipv4_off()
749 mutex_lock(&priv->tun.ipv4_off_lock); in nfp_tunnel_del_ipv4_off()
750 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { in nfp_tunnel_del_ipv4_off()
752 if (entry->ipv4_addr == ipv4) { in nfp_tunnel_del_ipv4_off()
753 entry->ref_count--; in nfp_tunnel_del_ipv4_off()
754 if (!entry->ref_count) { in nfp_tunnel_del_ipv4_off()
755 list_del(&entry->list); in nfp_tunnel_del_ipv4_off()
761 mutex_unlock(&priv->tun.ipv4_off_lock); in nfp_tunnel_del_ipv4_off()
768 struct nfp_flower_priv *priv = app->priv; in nfp_tun_write_ipv6_list()
774 mutex_lock(&priv->tun.ipv6_off_lock); in nfp_tun_write_ipv6_list()
775 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) { in nfp_tun_write_ipv6_list()
780 payload.ipv6_addr[count++] = entry->ipv6_addr; in nfp_tun_write_ipv6_list()
782 mutex_unlock(&priv->tun.ipv6_off_lock); in nfp_tun_write_ipv6_list()
793 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_add_ipv6_off()
796 mutex_lock(&priv->tun.ipv6_off_lock); in nfp_tunnel_add_ipv6_off()
797 list_for_each_entry(entry, &priv->tun.ipv6_off_list, list) in nfp_tunnel_add_ipv6_off()
798 if (!memcmp(&entry->ipv6_addr, ipv6, sizeof(*ipv6))) { in nfp_tunnel_add_ipv6_off()
799 entry->ref_count++; in nfp_tunnel_add_ipv6_off()
800 mutex_unlock(&priv->tun.ipv6_off_lock); in nfp_tunnel_add_ipv6_off()
806 mutex_unlock(&priv->tun.ipv6_off_lock); in nfp_tunnel_add_ipv6_off()
807 nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n"); in nfp_tunnel_add_ipv6_off()
810 entry->ipv6_addr = *ipv6; in nfp_tunnel_add_ipv6_off()
811 entry->ref_count = 1; in nfp_tunnel_add_ipv6_off()
812 list_add_tail(&entry->list, &priv->tun.ipv6_off_list); in nfp_tunnel_add_ipv6_off()
813 mutex_unlock(&priv->tun.ipv6_off_lock); in nfp_tunnel_add_ipv6_off()
823 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_put_ipv6_off()
826 mutex_lock(&priv->tun.ipv6_off_lock); in nfp_tunnel_put_ipv6_off()
827 if (!--entry->ref_count) { in nfp_tunnel_put_ipv6_off()
828 list_del(&entry->list); in nfp_tunnel_put_ipv6_off()
832 mutex_unlock(&priv->tun.ipv6_off_lock); in nfp_tunnel_put_ipv6_off()
839 __nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del) in __nfp_tunnel_offload_mac() argument
851 ether_addr_copy(payload.addr, mac); in __nfp_tunnel_offload_mac()
888 nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac) in nfp_tunnel_lookup_offloaded_macs() argument
890 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_lookup_offloaded_macs()
892 return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac, in nfp_tunnel_lookup_offloaded_macs()
905 repr_priv = repr->app_priv; in nfp_tunnel_offloaded_macs_inc_ref_and_link()
907 /* If modifing MAC, remove repr from old list first. */ in nfp_tunnel_offloaded_macs_inc_ref_and_link()
909 list_del(&repr_priv->mac_list); in nfp_tunnel_offloaded_macs_inc_ref_and_link()
911 list_add_tail(&repr_priv->mac_list, &entry->repr_list); in nfp_tunnel_offloaded_macs_inc_ref_and_link()
913 entry->bridge_count++; in nfp_tunnel_offloaded_macs_inc_ref_and_link()
916 entry->ref_count++; in nfp_tunnel_offloaded_macs_inc_ref_and_link()
923 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_add_shared_mac()
928 entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); in nfp_tunnel_add_shared_mac()
929 if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { in nfp_tunnel_add_shared_mac()
930 if (entry->bridge_count || in nfp_tunnel_add_shared_mac()
937 /* MAC is global but matches need to go to pre_tun table. */ in nfp_tunnel_add_shared_mac()
938 nfp_mac_idx = entry->index | NFP_TUN_PRE_TUN_IDX_BIT; in nfp_tunnel_add_shared_mac()
942 /* Assign a global index if non-repr or MAC is now shared. */ in nfp_tunnel_add_shared_mac()
944 ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0, in nfp_tunnel_add_shared_mac()
964 err = -ENOMEM; in nfp_tunnel_add_shared_mac()
968 ether_addr_copy(entry->addr, netdev->dev_addr); in nfp_tunnel_add_shared_mac()
969 INIT_LIST_HEAD(&entry->repr_list); in nfp_tunnel_add_shared_mac()
971 if (rhashtable_insert_fast(&priv->tun.offloaded_macs, in nfp_tunnel_add_shared_mac()
972 &entry->ht_node, in nfp_tunnel_add_shared_mac()
974 err = -ENOMEM; in nfp_tunnel_add_shared_mac()
979 err = __nfp_tunnel_offload_mac(app, netdev->dev_addr, in nfp_tunnel_add_shared_mac()
983 if (!entry->ref_count) in nfp_tunnel_add_shared_mac()
988 entry->index = nfp_mac_idx; in nfp_tunnel_add_shared_mac()
994 rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node, in nfp_tunnel_add_shared_mac()
1000 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); in nfp_tunnel_add_shared_mac()
1007 u8 *mac, bool mod) in nfp_tunnel_del_shared_mac() argument
1009 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_del_shared_mac()
1015 entry = nfp_tunnel_lookup_offloaded_macs(app, mac); in nfp_tunnel_del_shared_mac()
1019 entry->ref_count--; in nfp_tunnel_del_shared_mac()
1023 repr_priv = repr->app_priv; in nfp_tunnel_del_shared_mac()
1024 list_del(&repr_priv->mac_list); in nfp_tunnel_del_shared_mac()
1028 entry->bridge_count--; in nfp_tunnel_del_shared_mac()
1030 if (!entry->bridge_count && entry->ref_count) { in nfp_tunnel_del_shared_mac()
1033 nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT; in nfp_tunnel_del_shared_mac()
1034 if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, in nfp_tunnel_del_shared_mac()
1036 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", in nfp_tunnel_del_shared_mac()
1041 entry->index = nfp_mac_idx; in nfp_tunnel_del_shared_mac()
1046 /* If MAC is now used by 1 repr set the offloaded MAC index to port. */ in nfp_tunnel_del_shared_mac()
1047 if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) { in nfp_tunnel_del_shared_mac()
1051 repr_priv = list_first_entry(&entry->repr_list, in nfp_tunnel_del_shared_mac()
1054 repr = repr_priv->nfp_repr; in nfp_tunnel_del_shared_mac()
1055 port = nfp_repr_get_port_id(repr->netdev); in nfp_tunnel_del_shared_mac()
1057 err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false); in nfp_tunnel_del_shared_mac()
1059 nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n", in nfp_tunnel_del_shared_mac()
1064 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); in nfp_tunnel_del_shared_mac()
1065 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); in nfp_tunnel_del_shared_mac()
1066 entry->index = nfp_mac_idx; in nfp_tunnel_del_shared_mac()
1070 if (entry->ref_count) in nfp_tunnel_del_shared_mac()
1073 WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs, in nfp_tunnel_del_shared_mac()
1074 &entry->ht_node, in nfp_tunnel_del_shared_mac()
1076 /* If MAC has global ID then extract and free the ida entry. */ in nfp_tunnel_del_shared_mac()
1077 if (nfp_tunnel_is_mac_idx_global(entry->index)) { in nfp_tunnel_del_shared_mac()
1078 ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index); in nfp_tunnel_del_shared_mac()
1079 ida_simple_remove(&priv->tun.mac_off_ids, ida_idx); in nfp_tunnel_del_shared_mac()
1084 return __nfp_tunnel_offload_mac(app, mac, 0, true); in nfp_tunnel_del_shared_mac()
1101 if (repr->app != app) in nfp_tunnel_offload_mac()
1104 repr_priv = repr->app_priv; in nfp_tunnel_offload_mac()
1105 if (repr_priv->on_bridge) in nfp_tunnel_offload_mac()
1108 mac_offloaded = &repr_priv->mac_offloaded; in nfp_tunnel_offload_mac()
1109 off_mac = &repr_priv->offloaded_mac_addr[0]; in nfp_tunnel_offload_mac()
1116 return -ENOMEM; in nfp_tunnel_offload_mac()
1118 mac_offloaded = &nr_priv->mac_offloaded; in nfp_tunnel_offload_mac()
1119 off_mac = &nr_priv->offloaded_mac_addr[0]; in nfp_tunnel_offload_mac()
1125 if (!is_valid_ether_addr(netdev->dev_addr)) { in nfp_tunnel_offload_mac()
1126 err = -EINVAL; in nfp_tunnel_offload_mac()
1143 ether_addr_copy(off_mac, netdev->dev_addr); in nfp_tunnel_offload_mac()
1155 err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr, in nfp_tunnel_offload_mac()
1162 /* Ignore if changing to the same address. */ in nfp_tunnel_offload_mac()
1163 if (ether_addr_equal(netdev->dev_addr, off_mac)) in nfp_tunnel_offload_mac()
1170 /* Delete the previous MAC address. */ in nfp_tunnel_offload_mac()
1173 nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n", in nfp_tunnel_offload_mac()
1176 ether_addr_copy(off_mac, netdev->dev_addr); in nfp_tunnel_offload_mac()
1179 err = -EINVAL; in nfp_tunnel_offload_mac()
1205 nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n", in nfp_tunnel_mac_event_handler()
1211 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", in nfp_tunnel_mac_event_handler()
1215 if (!(netdev->flags & IFF_UP)) in nfp_tunnel_mac_event_handler()
1221 nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n", in nfp_tunnel_mac_event_handler()
1231 struct net_device *upper = info->upper_dev; in nfp_tunnel_mac_event_handler()
1240 if (repr->app != app) in nfp_tunnel_mac_event_handler()
1243 repr_priv = repr->app_priv; in nfp_tunnel_mac_event_handler()
1245 if (info->linking) { in nfp_tunnel_mac_event_handler()
1248 nfp_flower_cmsg_warn(app, "Failed to delete offloaded MAC on %s.\n", in nfp_tunnel_mac_event_handler()
1250 repr_priv->on_bridge = true; in nfp_tunnel_mac_event_handler()
1252 repr_priv->on_bridge = false; in nfp_tunnel_mac_event_handler()
1254 if (!(netdev->flags & IFF_UP)) in nfp_tunnel_mac_event_handler()
1259 nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n", in nfp_tunnel_mac_event_handler()
1269 struct nfp_flower_priv *app_priv = app->priv; in nfp_flower_xmit_pre_tun_flow()
1275 if (app_priv->pre_tun_rule_cnt == NFP_TUN_PRE_TUN_RULE_LIMIT) in nfp_flower_xmit_pre_tun_flow()
1276 return -ENOSPC; in nfp_flower_xmit_pre_tun_flow()
1280 internal_dev = flow->pre_tun_rule.dev; in nfp_flower_xmit_pre_tun_flow()
1281 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; in nfp_flower_xmit_pre_tun_flow()
1282 payload.host_ctx_id = flow->meta.host_ctx_id; in nfp_flower_xmit_pre_tun_flow()
1284 /* Lookup MAC index for the pre-tunnel rule egress device. in nfp_flower_xmit_pre_tun_flow()
1289 internal_dev->dev_addr); in nfp_flower_xmit_pre_tun_flow()
1291 return -ENOENT; in nfp_flower_xmit_pre_tun_flow()
1293 payload.port_idx = cpu_to_be16(mac_entry->index); in nfp_flower_xmit_pre_tun_flow()
1295 /* Copy mac id and vlan to flow - dev may not exist at delete time. */ in nfp_flower_xmit_pre_tun_flow()
1296 flow->pre_tun_rule.vlan_tci = payload.vlan_tci; in nfp_flower_xmit_pre_tun_flow()
1297 flow->pre_tun_rule.port_idx = payload.port_idx; in nfp_flower_xmit_pre_tun_flow()
1305 app_priv->pre_tun_rule_cnt++; in nfp_flower_xmit_pre_tun_flow()
1313 struct nfp_flower_priv *app_priv = app->priv; in nfp_flower_xmit_pre_tun_del_flow()
1322 payload.vlan_tci = flow->pre_tun_rule.vlan_tci; in nfp_flower_xmit_pre_tun_del_flow()
1323 payload.port_idx = flow->pre_tun_rule.port_idx; in nfp_flower_xmit_pre_tun_del_flow()
1331 app_priv->pre_tun_rule_cnt--; in nfp_flower_xmit_pre_tun_del_flow()
1338 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_config_start()
1341 /* Initialise rhash for MAC offload tracking. */ in nfp_tunnel_config_start()
1342 err = rhashtable_init(&priv->tun.offloaded_macs, in nfp_tunnel_config_start()
1347 ida_init(&priv->tun.mac_off_ids); in nfp_tunnel_config_start()
1350 mutex_init(&priv->tun.ipv4_off_lock); in nfp_tunnel_config_start()
1351 INIT_LIST_HEAD(&priv->tun.ipv4_off_list); in nfp_tunnel_config_start()
1352 mutex_init(&priv->tun.ipv6_off_lock); in nfp_tunnel_config_start()
1353 INIT_LIST_HEAD(&priv->tun.ipv6_off_list); in nfp_tunnel_config_start()
1356 spin_lock_init(&priv->tun.neigh_off_lock_v4); in nfp_tunnel_config_start()
1357 INIT_LIST_HEAD(&priv->tun.neigh_off_list_v4); in nfp_tunnel_config_start()
1358 spin_lock_init(&priv->tun.neigh_off_lock_v6); in nfp_tunnel_config_start()
1359 INIT_LIST_HEAD(&priv->tun.neigh_off_list_v6); in nfp_tunnel_config_start()
1360 priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler; in nfp_tunnel_config_start()
1362 err = register_netevent_notifier(&priv->tun.neigh_nb); in nfp_tunnel_config_start()
1364 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, in nfp_tunnel_config_start()
1375 struct nfp_flower_priv *priv = app->priv; in nfp_tunnel_config_stop()
1381 unregister_netevent_notifier(&priv->tun.neigh_nb); in nfp_tunnel_config_stop()
1383 ida_destroy(&priv->tun.mac_off_ids); in nfp_tunnel_config_stop()
1386 list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { in nfp_tunnel_config_stop()
1388 list_del(&ip_entry->list); in nfp_tunnel_config_stop()
1392 mutex_destroy(&priv->tun.ipv6_off_lock); in nfp_tunnel_config_stop()
1396 &priv->tun.neigh_off_list_v4, list) { in nfp_tunnel_config_stop()
1398 memcpy(&ipv4_route.dst_ipv4, &route_entry->ip_add, in nfp_tunnel_config_stop()
1400 list_del(&route_entry->list); in nfp_tunnel_config_stop()
1410 &priv->tun.neigh_off_list_v6, list) { in nfp_tunnel_config_stop()
1412 memcpy(&ipv6_route.dst_ipv6, &route_entry->ip_add, in nfp_tunnel_config_stop()
1414 list_del(&route_entry->list); in nfp_tunnel_config_stop()
1424 rhashtable_free_and_destroy(&priv->tun.offloaded_macs, in nfp_tunnel_config_stop()