Lines Matching +full:sync +full:- +full:update +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2019 Intel Corporation. */
10 * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
17 struct device *dev = tx_ring->dev; in fm10k_setup_tx_resources()
20 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; in fm10k_setup_tx_resources()
22 tx_ring->tx_buffer = vzalloc(size); in fm10k_setup_tx_resources()
23 if (!tx_ring->tx_buffer) in fm10k_setup_tx_resources()
26 u64_stats_init(&tx_ring->syncp); in fm10k_setup_tx_resources()
29 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc); in fm10k_setup_tx_resources()
30 tx_ring->size = ALIGN(tx_ring->size, 4096); in fm10k_setup_tx_resources()
32 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in fm10k_setup_tx_resources()
33 &tx_ring->dma, GFP_KERNEL); in fm10k_setup_tx_resources()
34 if (!tx_ring->desc) in fm10k_setup_tx_resources()
40 vfree(tx_ring->tx_buffer); in fm10k_setup_tx_resources()
41 tx_ring->tx_buffer = NULL; in fm10k_setup_tx_resources()
42 return -ENOMEM; in fm10k_setup_tx_resources()
46 * fm10k_setup_all_tx_resources - allocate all queues Tx resources
59 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_setup_all_tx_resources()
60 err = fm10k_setup_tx_resources(interface->tx_ring[i]); in fm10k_setup_all_tx_resources()
64 netif_err(interface, probe, interface->netdev, in fm10k_setup_all_tx_resources()
72 while (i--) in fm10k_setup_all_tx_resources()
73 fm10k_free_tx_resources(interface->tx_ring[i]); in fm10k_setup_all_tx_resources()
78 * fm10k_setup_rx_resources - allocate Rx resources (Descriptors)
85 struct device *dev = rx_ring->dev; in fm10k_setup_rx_resources()
88 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; in fm10k_setup_rx_resources()
90 rx_ring->rx_buffer = vzalloc(size); in fm10k_setup_rx_resources()
91 if (!rx_ring->rx_buffer) in fm10k_setup_rx_resources()
94 u64_stats_init(&rx_ring->syncp); in fm10k_setup_rx_resources()
97 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc); in fm10k_setup_rx_resources()
98 rx_ring->size = ALIGN(rx_ring->size, 4096); in fm10k_setup_rx_resources()
100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in fm10k_setup_rx_resources()
101 &rx_ring->dma, GFP_KERNEL); in fm10k_setup_rx_resources()
102 if (!rx_ring->desc) in fm10k_setup_rx_resources()
107 vfree(rx_ring->rx_buffer); in fm10k_setup_rx_resources()
108 rx_ring->rx_buffer = NULL; in fm10k_setup_rx_resources()
109 return -ENOMEM; in fm10k_setup_rx_resources()
113 * fm10k_setup_all_rx_resources - allocate all queues Rx resources
126 for (i = 0; i < interface->num_rx_queues; i++) { in fm10k_setup_all_rx_resources()
127 err = fm10k_setup_rx_resources(interface->rx_ring[i]); in fm10k_setup_all_rx_resources()
131 netif_err(interface, probe, interface->netdev, in fm10k_setup_all_rx_resources()
139 while (i--) in fm10k_setup_all_rx_resources()
140 fm10k_free_rx_resources(interface->rx_ring[i]); in fm10k_setup_all_rx_resources()
147 if (tx_buffer->skb) { in fm10k_unmap_and_free_tx_resource()
148 dev_kfree_skb_any(tx_buffer->skb); in fm10k_unmap_and_free_tx_resource()
150 dma_unmap_single(ring->dev, in fm10k_unmap_and_free_tx_resource()
155 dma_unmap_page(ring->dev, in fm10k_unmap_and_free_tx_resource()
160 tx_buffer->next_to_watch = NULL; in fm10k_unmap_and_free_tx_resource()
161 tx_buffer->skb = NULL; in fm10k_unmap_and_free_tx_resource()
167 * fm10k_clean_tx_ring - Free Tx Buffers
176 if (!tx_ring->tx_buffer) in fm10k_clean_tx_ring()
180 for (i = 0; i < tx_ring->count; i++) { in fm10k_clean_tx_ring()
181 struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i]; in fm10k_clean_tx_ring()
189 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count; in fm10k_clean_tx_ring()
190 memset(tx_ring->tx_buffer, 0, size); in fm10k_clean_tx_ring()
193 memset(tx_ring->desc, 0, tx_ring->size); in fm10k_clean_tx_ring()
197 * fm10k_free_tx_resources - Free Tx Resources per Queue
206 vfree(tx_ring->tx_buffer); in fm10k_free_tx_resources()
207 tx_ring->tx_buffer = NULL; in fm10k_free_tx_resources()
210 if (!tx_ring->desc) in fm10k_free_tx_resources()
213 dma_free_coherent(tx_ring->dev, tx_ring->size, in fm10k_free_tx_resources()
214 tx_ring->desc, tx_ring->dma); in fm10k_free_tx_resources()
215 tx_ring->desc = NULL; in fm10k_free_tx_resources()
219 * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues
226 for (i = 0; i < interface->num_tx_queues; i++) in fm10k_clean_all_tx_rings()
227 fm10k_clean_tx_ring(interface->tx_ring[i]); in fm10k_clean_all_tx_rings()
231 * fm10k_free_all_tx_resources - Free Tx Resources for All Queues
238 int i = interface->num_tx_queues; in fm10k_free_all_tx_resources()
240 while (i--) in fm10k_free_all_tx_resources()
241 fm10k_free_tx_resources(interface->tx_ring[i]); in fm10k_free_all_tx_resources()
245 * fm10k_clean_rx_ring - Free Rx Buffers per Queue
253 if (!rx_ring->rx_buffer) in fm10k_clean_rx_ring()
256 dev_kfree_skb(rx_ring->skb); in fm10k_clean_rx_ring()
257 rx_ring->skb = NULL; in fm10k_clean_rx_ring()
260 for (i = 0; i < rx_ring->count; i++) { in fm10k_clean_rx_ring()
261 struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i]; in fm10k_clean_rx_ring()
262 /* clean-up will only set page pointer to NULL */ in fm10k_clean_rx_ring()
263 if (!buffer->page) in fm10k_clean_rx_ring()
266 dma_unmap_page(rx_ring->dev, buffer->dma, in fm10k_clean_rx_ring()
268 __free_page(buffer->page); in fm10k_clean_rx_ring()
270 buffer->page = NULL; in fm10k_clean_rx_ring()
273 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count; in fm10k_clean_rx_ring()
274 memset(rx_ring->rx_buffer, 0, size); in fm10k_clean_rx_ring()
277 memset(rx_ring->desc, 0, rx_ring->size); in fm10k_clean_rx_ring()
279 rx_ring->next_to_alloc = 0; in fm10k_clean_rx_ring()
280 rx_ring->next_to_clean = 0; in fm10k_clean_rx_ring()
281 rx_ring->next_to_use = 0; in fm10k_clean_rx_ring()
285 * fm10k_free_rx_resources - Free Rx Resources
294 vfree(rx_ring->rx_buffer); in fm10k_free_rx_resources()
295 rx_ring->rx_buffer = NULL; in fm10k_free_rx_resources()
298 if (!rx_ring->desc) in fm10k_free_rx_resources()
301 dma_free_coherent(rx_ring->dev, rx_ring->size, in fm10k_free_rx_resources()
302 rx_ring->desc, rx_ring->dma); in fm10k_free_rx_resources()
304 rx_ring->desc = NULL; in fm10k_free_rx_resources()
308 * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues
315 for (i = 0; i < interface->num_rx_queues; i++) in fm10k_clean_all_rx_rings()
316 fm10k_clean_rx_ring(interface->rx_ring[i]); in fm10k_clean_all_rx_rings()
320 * fm10k_free_all_rx_resources - Free Rx Resources for All Queues
327 int i = interface->num_rx_queues; in fm10k_free_all_rx_resources()
329 while (i--) in fm10k_free_all_rx_resources()
330 fm10k_free_rx_resources(interface->rx_ring[i]); in fm10k_free_all_rx_resources()
334 * fm10k_request_glort_range - Request GLORTs for use in configuring rules
341 struct fm10k_hw *hw = &interface->hw; in fm10k_request_glort_range()
342 u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT; in fm10k_request_glort_range() local
345 interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE; in fm10k_request_glort_range()
346 interface->glort_count = 0; in fm10k_request_glort_range()
348 /* nothing we can do until mask is allocated */ in fm10k_request_glort_range()
349 if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE) in fm10k_request_glort_range()
357 if (mask <= hw->iov.total_vfs) { in fm10k_request_glort_range()
358 interface->glort_count = 1; in fm10k_request_glort_range()
359 interface->glort += mask; in fm10k_request_glort_range()
360 } else if (mask < 64) { in fm10k_request_glort_range()
361 interface->glort_count = (mask + 1) / 2; in fm10k_request_glort_range()
362 interface->glort += interface->glort_count; in fm10k_request_glort_range()
364 interface->glort_count = mask - 63; in fm10k_request_glort_range()
365 interface->glort += 64; in fm10k_request_glort_range()
377 struct fm10k_hw *hw = &interface->hw; in fm10k_restore_udp_port_info()
380 if (hw->mac.type != fm10k_mac_pf) in fm10k_restore_udp_port_info()
385 ntohs(interface->vxlan_port) | in fm10k_restore_udp_port_info()
390 ntohs(interface->geneve_port)); in fm10k_restore_udp_port_info()
394 * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change
409 interface->vxlan_port = ti.port; in fm10k_udp_tunnel_sync()
411 interface->geneve_port = ti.port; in fm10k_udp_tunnel_sync()
426 * fm10k_open - Called when a network interface is made active
462 interface->num_tx_queues); in fm10k_open()
467 interface->num_rx_queues); in fm10k_open()
486 * fm10k_close - Disables a network interface
491 * The close entry point is called when an interface is de-activated
513 int num_tx_queues = READ_ONCE(interface->num_tx_queues); in fm10k_xmit_frame()
514 unsigned int r_idx = skb->queue_mapping; in fm10k_xmit_frame()
520 if ((skb->protocol == htons(ETH_P_8021Q)) && in fm10k_xmit_frame()
545 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in fm10k_xmit_frame()
550 ntohs(vhdr->h_vlan_TCI)); in fm10k_xmit_frame()
551 proto = vhdr->h_vlan_encapsulated_proto; in fm10k_xmit_frame()
552 skb->protocol = (ntohs(proto) >= 1536) ? proto : in fm10k_xmit_frame()
556 memmove(skb->data + VLAN_HLEN, skb->data, 12); in fm10k_xmit_frame()
564 if (unlikely(skb->len < 17)) { in fm10k_xmit_frame()
565 int pad_len = 17 - skb->len; in fm10k_xmit_frame()
575 err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]); in fm10k_xmit_frame()
581 * fm10k_tx_timeout - Respond to a Tx Hang
591 if (txqueue >= interface->num_tx_queues) { in fm10k_tx_timeout()
596 tx_ring = interface->tx_ring[txqueue]; in fm10k_tx_timeout()
606 netdev->watchdog_timeo / HZ); in fm10k_tx_timeout()
608 /* fake Tx hang - increase the kernel timeout */ in fm10k_tx_timeout()
609 if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) in fm10k_tx_timeout()
610 netdev->watchdog_timeo *= 2; in fm10k_tx_timeout()
615 * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
621 * check since it will block all PF-VF mailbox messages from being sent from
626 struct fm10k_hw *hw = &interface->hw; in fm10k_host_mbx_ready()
628 return (hw->mac.type == fm10k_mac_vf || interface->host_ready); in fm10k_host_mbx_ready()
632 * fm10k_queue_vlan_request - Queue a VLAN update request
638 * This function queues up a VLAN update. For VFs, this must be sent to the
640 * it's similar to the VF. This avoids storming the PF<->VF mailbox with too
654 return -ENOMEM; in fm10k_queue_vlan_request()
656 request->type = FM10K_VLAN_REQUEST; in fm10k_queue_vlan_request()
657 request->vlan.vid = vid; in fm10k_queue_vlan_request()
658 request->vlan.vsi = vsi; in fm10k_queue_vlan_request()
659 request->set = set; in fm10k_queue_vlan_request()
661 spin_lock_irqsave(&interface->macvlan_lock, flags); in fm10k_queue_vlan_request()
662 list_add_tail(&request->list, &interface->macvlan_requests); in fm10k_queue_vlan_request()
663 spin_unlock_irqrestore(&interface->macvlan_lock, flags); in fm10k_queue_vlan_request()
671 * fm10k_queue_mac_request - Queue a MAC update request
673 * @glort: the target glort for this update
674 * @addr: the address to update
675 * @vid: the vid to update
693 return -ENOMEM; in fm10k_queue_mac_request()
696 request->type = FM10K_MC_MAC_REQUEST; in fm10k_queue_mac_request()
698 request->type = FM10K_UC_MAC_REQUEST; in fm10k_queue_mac_request()
700 ether_addr_copy(request->mac.addr, addr); in fm10k_queue_mac_request()
701 request->mac.glort = glort; in fm10k_queue_mac_request()
702 request->mac.vid = vid; in fm10k_queue_mac_request()
703 request->set = set; in fm10k_queue_mac_request()
705 spin_lock_irqsave(&interface->macvlan_lock, flags); in fm10k_queue_mac_request()
706 list_add_tail(&request->list, &interface->macvlan_requests); in fm10k_queue_mac_request()
707 spin_unlock_irqrestore(&interface->macvlan_lock, flags); in fm10k_queue_mac_request()
715 * fm10k_clear_macvlan_queue - Cancel pending updates for a given glort
730 spin_lock_irqsave(&interface->macvlan_lock, flags); in fm10k_clear_macvlan_queue()
733 list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) { in fm10k_clear_macvlan_queue()
734 switch (r->type) { in fm10k_clear_macvlan_queue()
738 if (r->mac.glort != glort) in fm10k_clear_macvlan_queue()
743 list_del(&r->list); in fm10k_clear_macvlan_queue()
750 spin_unlock_irqrestore(&interface->macvlan_lock, flags); in fm10k_clear_macvlan_queue()
757 u16 glort = interface->glort; in fm10k_uc_vlan_unsync()
758 u16 vid = interface->vid; in fm10k_uc_vlan_unsync()
763 vid &= VLAN_N_VID - 1; in fm10k_uc_vlan_unsync()
769 /* return non-zero value as we are only doing a partial sync/unsync */ in fm10k_uc_vlan_unsync()
777 u16 glort = interface->glort; in fm10k_mc_vlan_unsync()
778 u16 vid = interface->vid; in fm10k_mc_vlan_unsync()
783 vid &= VLAN_N_VID - 1; in fm10k_mc_vlan_unsync()
789 /* return non-zero value as we are only doing a partial sync/unsync */ in fm10k_mc_vlan_unsync()
796 struct fm10k_l2_accel *l2_accel = interface->l2_accel; in fm10k_update_vid()
797 struct fm10k_hw *hw = &interface->hw; in fm10k_update_vid()
807 return -EINVAL; in fm10k_update_vid()
814 if (set && hw->mac.vlan_override) in fm10k_update_vid()
815 return -EACCES; in fm10k_update_vid()
817 /* update active_vlans bitmask */ in fm10k_update_vid()
818 set_bit(vid, interface->active_vlans); in fm10k_update_vid()
820 clear_bit(vid, interface->active_vlans); in fm10k_update_vid()
823 for (i = 0; i < interface->num_rx_queues; i++) { in fm10k_update_vid()
824 struct fm10k_ring *rx_ring = interface->rx_ring[i]; in fm10k_update_vid()
825 u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1); in fm10k_update_vid()
827 if (test_bit(rx_vid, interface->active_vlans)) in fm10k_update_vid()
828 rx_ring->vid |= FM10K_VLAN_CLEAR; in fm10k_update_vid()
830 rx_ring->vid &= ~FM10K_VLAN_CLEAR; in fm10k_update_vid()
836 if (hw->mac.vlan_override) in fm10k_update_vid()
842 if (!set && vid == hw->mac.default_vid) in fm10k_update_vid()
845 /* Do not throw an error if the interface is down. We will sync once in fm10k_update_vid()
848 if (test_bit(__FM10K_DOWN, interface->state)) in fm10k_update_vid()
853 /* only need to update the VLAN if not in promiscuous mode */ in fm10k_update_vid()
854 if (!(netdev->flags & IFF_PROMISC)) { in fm10k_update_vid()
860 /* Update our base MAC address */ in fm10k_update_vid()
861 err = fm10k_queue_mac_request(interface, interface->glort, in fm10k_update_vid()
862 hw->mac.addr, vid, set); in fm10k_update_vid()
866 /* Update L2 accelerated macvlan addresses */ in fm10k_update_vid()
868 for (i = 0; i < l2_accel->size; i++) { in fm10k_update_vid()
869 struct net_device *sdev = l2_accel->macvlan[i]; in fm10k_update_vid()
874 glort = l2_accel->dglort + 1 + i; in fm10k_update_vid()
877 sdev->dev_addr, in fm10k_update_vid()
883 interface->vid = vid + (set ? VLAN_N_VID : 0); in fm10k_update_vid()
885 /* Update the unicast and multicast address list to add/drop VLAN */ in fm10k_update_vid()
898 /* update VLAN and address table based on changes */ in fm10k_vlan_rx_add_vid()
905 /* update VLAN and address table based on changes */ in fm10k_vlan_rx_kill_vid()
911 struct fm10k_hw *hw = &interface->hw; in fm10k_find_next_vlan()
912 u16 default_vid = hw->mac.default_vid; in fm10k_find_next_vlan()
915 vid = find_next_bit(interface->active_vlans, vid_limit, ++vid); in fm10k_find_next_vlan()
932 prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT; in fm10k_clear_unused_vlans()
938 const unsigned char *addr, bool sync) in __fm10k_uc_sync() argument
941 u16 vid, glort = interface->glort; in __fm10k_uc_sync()
945 return -EADDRNOTAVAIL; in __fm10k_uc_sync()
951 addr, vid, sync); in __fm10k_uc_sync()
974 struct fm10k_hw *hw = &interface->hw; in fm10k_set_mac()
978 if (!is_valid_ether_addr(addr->sa_data)) in fm10k_set_mac()
979 return -EADDRNOTAVAIL; in fm10k_set_mac()
981 if (dev->flags & IFF_UP) { in fm10k_set_mac()
985 err = fm10k_uc_sync(dev, addr->sa_data); in fm10k_set_mac()
987 fm10k_uc_unsync(dev, hw->mac.addr); in fm10k_set_mac()
993 eth_hw_addr_set(dev, addr->sa_data); in fm10k_set_mac()
994 ether_addr_copy(hw->mac.addr, addr->sa_data); in fm10k_set_mac()
995 dev->addr_assign_type &= ~NET_ADDR_RANDOM; in fm10k_set_mac()
999 return err ? -EAGAIN : 0; in fm10k_set_mac()
1003 const unsigned char *addr, bool sync) in __fm10k_mc_sync() argument
1006 u16 vid, glort = interface->glort; in __fm10k_mc_sync()
1010 return -EADDRNOTAVAIL; in __fm10k_mc_sync()
1016 addr, vid, sync); in __fm10k_mc_sync()
1039 struct fm10k_hw *hw = &interface->hw; in fm10k_set_rx_mode()
1042 /* no need to update the harwdare if we are not running */ in fm10k_set_rx_mode()
1043 if (!(dev->flags & IFF_UP)) in fm10k_set_rx_mode()
1047 xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC : in fm10k_set_rx_mode()
1048 (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI : in fm10k_set_rx_mode()
1049 (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ? in fm10k_set_rx_mode()
1054 /* update xcast mode first, but only if it changed */ in fm10k_set_rx_mode()
1055 if (interface->xcast_mode != xcast_mode) { in fm10k_set_rx_mode()
1056 /* update VLAN table when entering promiscuous mode */ in fm10k_set_rx_mode()
1062 if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC) in fm10k_set_rx_mode()
1065 /* update xcast mode if host's mailbox is ready */ in fm10k_set_rx_mode()
1067 hw->mac.ops.update_xcast_mode(hw, interface->glort, in fm10k_set_rx_mode()
1071 interface->xcast_mode = xcast_mode; in fm10k_set_rx_mode()
1083 struct fm10k_l2_accel *l2_accel = interface->l2_accel; in fm10k_restore_rx_state()
1084 struct net_device *netdev = interface->netdev; in fm10k_restore_rx_state()
1085 struct fm10k_hw *hw = &interface->hw; in fm10k_restore_rx_state()
1090 glort = interface->glort; in fm10k_restore_rx_state()
1093 if (netdev->flags & IFF_PROMISC) in fm10k_restore_rx_state()
1095 else if (netdev->flags & IFF_ALLMULTI) in fm10k_restore_rx_state()
1097 else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST)) in fm10k_restore_rx_state()
1106 hw->mac.ops.update_lport_state(hw, glort, in fm10k_restore_rx_state()
1107 interface->glort_count, true); in fm10k_restore_rx_state()
1109 /* update VLAN table */ in fm10k_restore_rx_state()
1113 /* update table with current entries */ in fm10k_restore_rx_state()
1120 hw->mac.addr, vid, true); in fm10k_restore_rx_state()
1124 for (i = 0; i < l2_accel->size; i++) { in fm10k_restore_rx_state()
1125 struct net_device *sdev = l2_accel->macvlan[i]; in fm10k_restore_rx_state()
1130 glort = l2_accel->dglort + 1 + i; in fm10k_restore_rx_state()
1133 sdev->dev_addr, in fm10k_restore_rx_state()
1139 /* update xcast mode before synchronizing addresses if host's mailbox in fm10k_restore_rx_state()
1143 hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode); in fm10k_restore_rx_state()
1151 for (i = 0; i < l2_accel->size; i++) { in fm10k_restore_rx_state()
1152 struct net_device *sdev = l2_accel->macvlan[i]; in fm10k_restore_rx_state()
1157 glort = l2_accel->dglort + 1 + i; in fm10k_restore_rx_state()
1159 hw->mac.ops.update_xcast_mode(hw, glort, in fm10k_restore_rx_state()
1162 sdev->dev_addr, in fm10k_restore_rx_state()
1163 hw->mac.default_vid, true); in fm10k_restore_rx_state()
1170 interface->xcast_mode = xcast_mode; in fm10k_restore_rx_state()
1178 struct net_device *netdev = interface->netdev; in fm10k_reset_rx_state()
1179 struct fm10k_hw *hw = &interface->hw; in fm10k_reset_rx_state()
1182 while (test_bit(__FM10K_MACVLAN_SCHED, interface->state)) in fm10k_reset_rx_state()
1186 fm10k_clear_macvlan_queue(interface, interface->glort, true); in fm10k_reset_rx_state()
1194 hw->mac.ops.update_lport_state(hw, interface->glort, in fm10k_reset_rx_state()
1195 interface->glort_count, false); in fm10k_reset_rx_state()
1200 interface->xcast_mode = FM10K_XCAST_MODE_NONE; in fm10k_reset_rx_state()
1202 /* clear the sync flag since the lport has been dropped */ in fm10k_reset_rx_state()
1208 * fm10k_get_stats64 - Get System Network Statistics
1225 for (i = 0; i < interface->num_rx_queues; i++) { in fm10k_get_stats64()
1226 ring = READ_ONCE(interface->rx_ring[i]); in fm10k_get_stats64()
1232 start = u64_stats_fetch_begin(&ring->syncp); in fm10k_get_stats64()
1233 packets = ring->stats.packets; in fm10k_get_stats64()
1234 bytes = ring->stats.bytes; in fm10k_get_stats64()
1235 } while (u64_stats_fetch_retry(&ring->syncp, start)); in fm10k_get_stats64()
1237 stats->rx_packets += packets; in fm10k_get_stats64()
1238 stats->rx_bytes += bytes; in fm10k_get_stats64()
1241 for (i = 0; i < interface->num_tx_queues; i++) { in fm10k_get_stats64()
1242 ring = READ_ONCE(interface->tx_ring[i]); in fm10k_get_stats64()
1248 start = u64_stats_fetch_begin(&ring->syncp); in fm10k_get_stats64()
1249 packets = ring->stats.packets; in fm10k_get_stats64()
1250 bytes = ring->stats.bytes; in fm10k_get_stats64()
1251 } while (u64_stats_fetch_retry(&ring->syncp, start)); in fm10k_get_stats64()
1253 stats->tx_packets += packets; in fm10k_get_stats64()
1254 stats->tx_bytes += bytes; in fm10k_get_stats64()
1260 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in fm10k_get_stats64()
1269 if (tc && (interface->hw.mac.type != fm10k_mac_pf)) in fm10k_setup_tc()
1270 return -EINVAL; in fm10k_setup_tc()
1274 return -EINVAL; in fm10k_setup_tc()
1304 set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags); in fm10k_setup_tc()
1323 return -EOPNOTSUPP; in __fm10k_setup_tc()
1325 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; in __fm10k_setup_tc()
1327 return fm10k_setup_tc(dev, mqprio->num_tc); in __fm10k_setup_tc()
1335 for (i = 0; i < interface->num_rx_queues; i++) { in fm10k_assign_l2_accel()
1336 struct fm10k_ring *ring = interface->rx_ring[i]; in fm10k_assign_l2_accel()
1338 rcu_assign_pointer(ring->l2_accel, l2_accel); in fm10k_assign_l2_accel()
1341 interface->l2_accel = l2_accel; in fm10k_assign_l2_accel()
1348 struct fm10k_l2_accel *l2_accel = interface->l2_accel; in fm10k_dfwd_add_station()
1351 struct fm10k_hw *hw = &interface->hw; in fm10k_dfwd_add_station()
1360 return ERR_PTR(-EMEDIUMTYPE); in fm10k_dfwd_add_station()
1365 if (interface->glort_count < 7) in fm10k_dfwd_add_station()
1366 return ERR_PTR(-EBUSY); in fm10k_dfwd_add_station()
1371 return ERR_PTR(-ENOMEM); in fm10k_dfwd_add_station()
1373 l2_accel->size = 7; in fm10k_dfwd_add_station()
1374 l2_accel->dglort = interface->glort; in fm10k_dfwd_add_station()
1376 /* update pointers */ in fm10k_dfwd_add_station()
1379 } else if ((l2_accel->count == FM10K_MAX_STATIONS) || in fm10k_dfwd_add_station()
1380 (l2_accel->count == (interface->glort_count - 1))) { in fm10k_dfwd_add_station()
1381 return ERR_PTR(-EBUSY); in fm10k_dfwd_add_station()
1383 } else if (l2_accel->count == l2_accel->size) { in fm10k_dfwd_add_station()
1386 macvlan[(l2_accel->size * 2) + 1]); in fm10k_dfwd_add_station()
1389 return ERR_PTR(-ENOMEM); in fm10k_dfwd_add_station()
1393 macvlan[old_l2_accel->size])); in fm10k_dfwd_add_station()
1395 l2_accel->size = (old_l2_accel->size * 2) + 1; in fm10k_dfwd_add_station()
1397 /* update pointers */ in fm10k_dfwd_add_station()
1403 for (i = 0; i < l2_accel->size; i++) { in fm10k_dfwd_add_station()
1404 if (!l2_accel->macvlan[i]) in fm10k_dfwd_add_station()
1409 l2_accel->macvlan[i] = sdev; in fm10k_dfwd_add_station()
1410 l2_accel->count++; in fm10k_dfwd_add_station()
1415 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); in fm10k_dfwd_add_station()
1416 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); in fm10k_dfwd_add_station()
1417 dglort.glort = interface->glort; in fm10k_dfwd_add_station()
1418 dglort.shared_l = fls(l2_accel->size); in fm10k_dfwd_add_station()
1419 hw->mac.ops.configure_dglort_map(hw, &dglort); in fm10k_dfwd_add_station()
1424 glort = l2_accel->dglort + 1 + i; in fm10k_dfwd_add_station()
1427 hw->mac.ops.update_xcast_mode(hw, glort, in fm10k_dfwd_add_station()
1430 fm10k_queue_mac_request(interface, glort, sdev->dev_addr, in fm10k_dfwd_add_station()
1431 hw->mac.default_vid, true); in fm10k_dfwd_add_station()
1436 fm10k_queue_mac_request(interface, glort, sdev->dev_addr, in fm10k_dfwd_add_station()
1447 struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel); in fm10k_dfwd_del_station()
1449 struct fm10k_hw *hw = &interface->hw; in fm10k_dfwd_del_station()
1458 for (i = 0; i < l2_accel->size; i++) { in fm10k_dfwd_del_station()
1459 if (l2_accel->macvlan[i] == sdev) in fm10k_dfwd_del_station()
1464 if (i == l2_accel->size) in fm10k_dfwd_del_station()
1470 glort = l2_accel->dglort + 1 + i; in fm10k_dfwd_del_station()
1473 hw->mac.ops.update_xcast_mode(hw, glort, in fm10k_dfwd_del_station()
1476 fm10k_queue_mac_request(interface, glort, sdev->dev_addr, in fm10k_dfwd_del_station()
1477 hw->mac.default_vid, false); in fm10k_dfwd_del_station()
1482 fm10k_queue_mac_request(interface, glort, sdev->dev_addr, in fm10k_dfwd_del_station()
1488 l2_accel->macvlan[i] = NULL; in fm10k_dfwd_del_station()
1489 l2_accel->count--; in fm10k_dfwd_del_station()
1494 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask); in fm10k_dfwd_del_station()
1495 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask); in fm10k_dfwd_del_station()
1496 dglort.glort = interface->glort; in fm10k_dfwd_del_station()
1497 dglort.shared_l = fls(l2_accel->size); in fm10k_dfwd_del_station()
1498 hw->mac.ops.configure_dglort_map(hw, &dglort); in fm10k_dfwd_del_station()
1501 if (l2_accel->count == 0) { in fm10k_dfwd_del_station()
1511 if (!skb->encapsulation || fm10k_tx_encap_offload(skb)) in fm10k_features_check()
1552 dev->netdev_ops = &fm10k_netdev_ops; in fm10k_alloc_netdev()
1557 interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; in fm10k_alloc_netdev()
1560 dev->features |= NETIF_F_IP_CSUM | in fm10k_alloc_netdev()
1570 if (info->mac == fm10k_mac_pf) { in fm10k_alloc_netdev()
1571 dev->hw_enc_features = NETIF_F_IP_CSUM | in fm10k_alloc_netdev()
1579 dev->features |= NETIF_F_GSO_UDP_TUNNEL; in fm10k_alloc_netdev()
1581 dev->udp_tunnel_nic_info = &fm10k_udp_tunnels; in fm10k_alloc_netdev()
1585 hw_features = dev->features; in fm10k_alloc_netdev()
1591 dev->vlan_features |= dev->features; in fm10k_alloc_netdev()
1597 dev->features |= NETIF_F_HW_VLAN_CTAG_TX | in fm10k_alloc_netdev()
1601 dev->priv_flags |= IFF_UNICAST_FLT; in fm10k_alloc_netdev()
1603 dev->hw_features |= hw_features; in fm10k_alloc_netdev()
1605 /* MTU range: 68 - 15342 */ in fm10k_alloc_netdev()
1606 dev->min_mtu = ETH_MIN_MTU; in fm10k_alloc_netdev()
1607 dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE; in fm10k_alloc_netdev()