Lines Matching +full:tcs +full:- +full:wait

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2021 Intel Corporation. */
31 static const char i40e_copyright[] = "Copyright (c) 2013 - 2019 Intel Corporation.";
57 /* i40e_pci_tbl - PCI Device ID Table
97 static int debug = -1;
101 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
116 if (is_unicast_ether_addr(f->macaddr) || is_link_local_ether_addr(f->macaddr)) in netdev_hw_addr_refcnt()
117 ha_list = &netdev->uc; in netdev_hw_addr_refcnt()
119 ha_list = &netdev->mc; in netdev_hw_addr_refcnt()
122 if (ether_addr_equal(ha->addr, f->macaddr)) { in netdev_hw_addr_refcnt()
123 ha->refcount += delta; in netdev_hw_addr_refcnt()
124 if (ha->refcount <= 0) in netdev_hw_addr_refcnt()
125 ha->refcount = 1; in netdev_hw_addr_refcnt()
132 * i40e_hw_to_dev - get device pointer from the hardware structure
139 return &pf->pdev->dev; in i40e_hw_to_dev()
143 * i40e_allocate_dma_mem - OS specific memory alloc for shared code
154 mem->size = ALIGN(size, alignment); in i40e_allocate_dma_mem()
155 mem->va = dma_alloc_coherent(&pf->pdev->dev, mem->size, &mem->pa, in i40e_allocate_dma_mem()
157 if (!mem->va) in i40e_allocate_dma_mem()
158 return -ENOMEM; in i40e_allocate_dma_mem()
164 * i40e_free_dma_mem - OS specific memory free for shared code
172 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); in i40e_free_dma_mem()
173 mem->va = NULL; in i40e_free_dma_mem()
174 mem->pa = 0; in i40e_free_dma_mem()
175 mem->size = 0; in i40e_free_dma_mem()
181 * i40e_allocate_virt_mem - OS specific memory alloc for shared code
189 mem->size = size; in i40e_allocate_virt_mem()
190 mem->va = kzalloc(size, GFP_KERNEL); in i40e_allocate_virt_mem()
192 if (!mem->va) in i40e_allocate_virt_mem()
193 return -ENOMEM; in i40e_allocate_virt_mem()
199 * i40e_free_virt_mem - OS specific memory free for shared code
206 kfree(mem->va); in i40e_free_virt_mem()
207 mem->va = NULL; in i40e_free_virt_mem()
208 mem->size = 0; in i40e_free_virt_mem()
214 * i40e_get_lump - find a lump of free generic resource
225 int ret = -ENOMEM; in i40e_get_lump()
229 dev_info(&pf->pdev->dev, in i40e_get_lump()
232 return -EINVAL; in i40e_get_lump()
238 if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) { in i40e_get_lump()
239 if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) { in i40e_get_lump()
240 dev_err(&pf->pdev->dev, in i40e_get_lump()
242 pile->num_entries - 1); in i40e_get_lump()
243 return -ENOMEM; in i40e_get_lump()
245 pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT; in i40e_get_lump()
246 return pile->num_entries - 1; in i40e_get_lump()
250 while (i < pile->num_entries) { in i40e_get_lump()
252 if (pile->list[i] & I40E_PILE_VALID_BIT) { in i40e_get_lump()
258 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) { in i40e_get_lump()
259 if (pile->list[i+j] & I40E_PILE_VALID_BIT) in i40e_get_lump()
266 pile->list[i+j] = id | I40E_PILE_VALID_BIT; in i40e_get_lump()
279 * i40e_put_lump - return a lump of generic resource
292 if (!pile || index >= pile->num_entries) in i40e_put_lump()
293 return -EINVAL; in i40e_put_lump()
296 i < pile->num_entries && pile->list[i] == valid_id; in i40e_put_lump()
298 pile->list[i] = 0; in i40e_put_lump()
307 * i40e_find_vsi_from_id - searches for the vsi with the given id
315 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_find_vsi_from_id()
316 if (pf->vsi[i] && (pf->vsi[i]->id == id)) in i40e_find_vsi_from_id()
317 return pf->vsi[i]; in i40e_find_vsi_from_id()
323 * i40e_service_event_schedule - Schedule the service task to wake up
330 if ((!test_bit(__I40E_DOWN, pf->state) && in i40e_service_event_schedule()
331 !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) || in i40e_service_event_schedule()
332 test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_service_event_schedule()
333 queue_work(i40e_wq, &pf->service_task); in i40e_service_event_schedule()
337 * i40e_tx_timeout - Respond to a Tx Hang
348 struct i40e_vsi *vsi = np->vsi; in i40e_tx_timeout()
349 struct i40e_pf *pf = vsi->back; in i40e_tx_timeout()
354 pf->tx_timeout_count++; in i40e_tx_timeout()
357 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_tx_timeout()
358 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { in i40e_tx_timeout()
360 vsi->tx_rings[i]->queue_index) { in i40e_tx_timeout()
361 tx_ring = vsi->tx_rings[i]; in i40e_tx_timeout()
367 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) in i40e_tx_timeout()
368 pf->tx_timeout_recovery_level = 1; /* reset after some time */ in i40e_tx_timeout()
370 (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) in i40e_tx_timeout()
374 if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) in i40e_tx_timeout()
380 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_tx_timeout()
381 val = rd32(&pf->hw, in i40e_tx_timeout()
382 I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx + in i40e_tx_timeout()
383 tx_ring->vsi->base_vector - 1)); in i40e_tx_timeout()
385 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); in i40e_tx_timeout()
388 vsi->seid, txqueue, tx_ring->next_to_clean, in i40e_tx_timeout()
389 head, tx_ring->next_to_use, in i40e_tx_timeout()
390 readl(tx_ring->tail), val); in i40e_tx_timeout()
393 pf->tx_timeout_last_recovery = jiffies; in i40e_tx_timeout()
395 pf->tx_timeout_recovery_level, txqueue); in i40e_tx_timeout()
397 switch (pf->tx_timeout_recovery_level) { in i40e_tx_timeout()
399 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
402 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
405 set_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_tx_timeout()
408 netdev_err(netdev, "tx_timeout recovery unsuccessful, device is in non-recoverable state.\n"); in i40e_tx_timeout()
409 set_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_tx_timeout()
410 set_bit(__I40E_VSI_DOWN_REQUESTED, vsi->state); in i40e_tx_timeout()
415 pf->tx_timeout_recovery_level++; in i40e_tx_timeout()
419 * i40e_get_vsi_stats_struct - Get System Network Statistics
427 return &vsi->net_stats; in i40e_get_vsi_stats_struct()
431 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
442 start = u64_stats_fetch_begin(&ring->syncp); in i40e_get_netdev_stats_struct_tx()
443 packets = ring->stats.packets; in i40e_get_netdev_stats_struct_tx()
444 bytes = ring->stats.bytes; in i40e_get_netdev_stats_struct_tx()
445 } while (u64_stats_fetch_retry(&ring->syncp, start)); in i40e_get_netdev_stats_struct_tx()
447 stats->tx_packets += packets; in i40e_get_netdev_stats_struct_tx()
448 stats->tx_bytes += bytes; in i40e_get_netdev_stats_struct_tx()
452 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
463 struct i40e_vsi *vsi = np->vsi; in i40e_get_netdev_stats_struct()
468 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_get_netdev_stats_struct()
471 if (!vsi->tx_rings) in i40e_get_netdev_stats_struct()
475 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_get_netdev_stats_struct()
479 ring = READ_ONCE(vsi->tx_rings[i]); in i40e_get_netdev_stats_struct()
485 ring = READ_ONCE(vsi->xdp_rings[i]); in i40e_get_netdev_stats_struct()
491 ring = READ_ONCE(vsi->rx_rings[i]); in i40e_get_netdev_stats_struct()
495 start = u64_stats_fetch_begin(&ring->syncp); in i40e_get_netdev_stats_struct()
496 packets = ring->stats.packets; in i40e_get_netdev_stats_struct()
497 bytes = ring->stats.bytes; in i40e_get_netdev_stats_struct()
498 } while (u64_stats_fetch_retry(&ring->syncp, start)); in i40e_get_netdev_stats_struct()
500 stats->rx_packets += packets; in i40e_get_netdev_stats_struct()
501 stats->rx_bytes += bytes; in i40e_get_netdev_stats_struct()
507 stats->multicast = vsi_stats->multicast; in i40e_get_netdev_stats_struct()
508 stats->tx_errors = vsi_stats->tx_errors; in i40e_get_netdev_stats_struct()
509 stats->tx_dropped = vsi_stats->tx_dropped; in i40e_get_netdev_stats_struct()
510 stats->rx_errors = vsi_stats->rx_errors; in i40e_get_netdev_stats_struct()
511 stats->rx_dropped = vsi_stats->rx_dropped; in i40e_get_netdev_stats_struct()
512 stats->rx_missed_errors = vsi_stats->rx_missed_errors; in i40e_get_netdev_stats_struct()
513 stats->rx_crc_errors = vsi_stats->rx_crc_errors; in i40e_get_netdev_stats_struct()
514 stats->rx_length_errors = vsi_stats->rx_length_errors; in i40e_get_netdev_stats_struct()
518 * i40e_vsi_reset_stats - Resets all stats of the given vsi
531 memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets)); in i40e_vsi_reset_stats()
532 memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats)); in i40e_vsi_reset_stats()
533 memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); in i40e_vsi_reset_stats()
534 if (vsi->rx_rings && vsi->rx_rings[0]) { in i40e_vsi_reset_stats()
535 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_reset_stats()
536 memset(&vsi->rx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
537 sizeof(vsi->rx_rings[i]->stats)); in i40e_vsi_reset_stats()
538 memset(&vsi->rx_rings[i]->rx_stats, 0, in i40e_vsi_reset_stats()
539 sizeof(vsi->rx_rings[i]->rx_stats)); in i40e_vsi_reset_stats()
540 memset(&vsi->tx_rings[i]->stats, 0, in i40e_vsi_reset_stats()
541 sizeof(vsi->tx_rings[i]->stats)); in i40e_vsi_reset_stats()
542 memset(&vsi->tx_rings[i]->tx_stats, 0, in i40e_vsi_reset_stats()
543 sizeof(vsi->tx_rings[i]->tx_stats)); in i40e_vsi_reset_stats()
546 vsi->stat_offsets_loaded = false; in i40e_vsi_reset_stats()
550 * i40e_pf_reset_stats - Reset all of the stats for the given PF
557 memset(&pf->stats, 0, sizeof(pf->stats)); in i40e_pf_reset_stats()
558 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); in i40e_pf_reset_stats()
559 pf->stat_offsets_loaded = false; in i40e_pf_reset_stats()
562 if (pf->veb[i]) { in i40e_pf_reset_stats()
563 memset(&pf->veb[i]->stats, 0, in i40e_pf_reset_stats()
564 sizeof(pf->veb[i]->stats)); in i40e_pf_reset_stats()
565 memset(&pf->veb[i]->stats_offsets, 0, in i40e_pf_reset_stats()
566 sizeof(pf->veb[i]->stats_offsets)); in i40e_pf_reset_stats()
567 memset(&pf->veb[i]->tc_stats, 0, in i40e_pf_reset_stats()
568 sizeof(pf->veb[i]->tc_stats)); in i40e_pf_reset_stats()
569 memset(&pf->veb[i]->tc_stats_offsets, 0, in i40e_pf_reset_stats()
570 sizeof(pf->veb[i]->tc_stats_offsets)); in i40e_pf_reset_stats()
571 pf->veb[i]->stat_offsets_loaded = false; in i40e_pf_reset_stats()
574 pf->hw_csum_rx_error = 0; in i40e_pf_reset_stats()
578 * i40e_compute_pci_to_hw_id - compute index form PCI function.
586 if (vsi->type == I40E_VSI_SRIOV) in i40e_compute_pci_to_hw_id()
587 return (hw->port * BIT(7)) / pf_count + vsi->vf_id; in i40e_compute_pci_to_hw_id()
589 return hw->port + BIT(7); in i40e_compute_pci_to_hw_id()
593 * i40e_stat_update64 - read and update a 64 bit stat from the chip.
615 *stat = new_data - *offset; in i40e_stat_update64()
619 * i40e_stat_update48 - read and update a 48 bit stat from the chip
631 * the potential roll-over.
638 if (hw->device_id == I40E_DEV_ID_QEMU) { in i40e_stat_update48()
647 *stat = new_data - *offset; in i40e_stat_update48()
649 *stat = (new_data + BIT_ULL(48)) - *offset; in i40e_stat_update48()
654 * i40e_stat_update32 - read and update a 32 bit stat from the chip
670 *stat = (u32)(new_data - *offset); in i40e_stat_update32()
672 *stat = (u32)((new_data + BIT_ULL(32)) - *offset); in i40e_stat_update32()
676 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
690 * i40e_stats_update_rx_discards - update rx_discards.
705 &stat_offset->rx_discards, &stat->rx_discards); in i40e_stats_update_rx_discards()
709 offset_loaded, &stat_offset->rx_discards_other, in i40e_stats_update_rx_discards()
710 &stat->rx_discards_other); in i40e_stats_update_rx_discards()
714 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
719 int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx); in i40e_update_eth_stats()
720 struct i40e_pf *pf = vsi->back; in i40e_update_eth_stats()
721 struct i40e_hw *hw = &pf->hw; in i40e_update_eth_stats()
725 es = &vsi->eth_stats; in i40e_update_eth_stats()
726 oes = &vsi->eth_stats_offsets; in i40e_update_eth_stats()
730 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
731 &oes->tx_errors, &es->tx_errors); in i40e_update_eth_stats()
733 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
734 &oes->rx_unknown_protocol, &es->rx_unknown_protocol); in i40e_update_eth_stats()
738 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
739 &oes->rx_bytes, &es->rx_bytes); in i40e_update_eth_stats()
742 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
743 &oes->rx_unicast, &es->rx_unicast); in i40e_update_eth_stats()
746 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
747 &oes->rx_multicast, &es->rx_multicast); in i40e_update_eth_stats()
750 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
751 &oes->rx_broadcast, &es->rx_broadcast); in i40e_update_eth_stats()
755 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
756 &oes->tx_bytes, &es->tx_bytes); in i40e_update_eth_stats()
759 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
760 &oes->tx_unicast, &es->tx_unicast); in i40e_update_eth_stats()
763 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
764 &oes->tx_multicast, &es->tx_multicast); in i40e_update_eth_stats()
767 vsi->stat_offsets_loaded, in i40e_update_eth_stats()
768 &oes->tx_broadcast, &es->tx_broadcast); in i40e_update_eth_stats()
771 vsi->stat_offsets_loaded, oes, es); in i40e_update_eth_stats()
773 vsi->stat_offsets_loaded = true; in i40e_update_eth_stats()
777 * i40e_update_veb_stats - Update Switch component statistics
782 struct i40e_pf *pf = veb->pf; in i40e_update_veb_stats()
783 struct i40e_hw *hw = &pf->hw; in i40e_update_veb_stats()
790 idx = veb->stats_idx; in i40e_update_veb_stats()
791 es = &veb->stats; in i40e_update_veb_stats()
792 oes = &veb->stats_offsets; in i40e_update_veb_stats()
793 veb_es = &veb->tc_stats; in i40e_update_veb_stats()
794 veb_oes = &veb->tc_stats_offsets; in i40e_update_veb_stats()
798 veb->stat_offsets_loaded, in i40e_update_veb_stats()
799 &oes->tx_discards, &es->tx_discards); in i40e_update_veb_stats()
800 if (hw->revision_id > 0) in i40e_update_veb_stats()
802 veb->stat_offsets_loaded, in i40e_update_veb_stats()
803 &oes->rx_unknown_protocol, in i40e_update_veb_stats()
804 &es->rx_unknown_protocol); in i40e_update_veb_stats()
806 veb->stat_offsets_loaded, in i40e_update_veb_stats()
807 &oes->rx_bytes, &es->rx_bytes); in i40e_update_veb_stats()
809 veb->stat_offsets_loaded, in i40e_update_veb_stats()
810 &oes->rx_unicast, &es->rx_unicast); in i40e_update_veb_stats()
812 veb->stat_offsets_loaded, in i40e_update_veb_stats()
813 &oes->rx_multicast, &es->rx_multicast); in i40e_update_veb_stats()
815 veb->stat_offsets_loaded, in i40e_update_veb_stats()
816 &oes->rx_broadcast, &es->rx_broadcast); in i40e_update_veb_stats()
819 veb->stat_offsets_loaded, in i40e_update_veb_stats()
820 &oes->tx_bytes, &es->tx_bytes); in i40e_update_veb_stats()
822 veb->stat_offsets_loaded, in i40e_update_veb_stats()
823 &oes->tx_unicast, &es->tx_unicast); in i40e_update_veb_stats()
825 veb->stat_offsets_loaded, in i40e_update_veb_stats()
826 &oes->tx_multicast, &es->tx_multicast); in i40e_update_veb_stats()
828 veb->stat_offsets_loaded, in i40e_update_veb_stats()
829 &oes->tx_broadcast, &es->tx_broadcast); in i40e_update_veb_stats()
833 veb->stat_offsets_loaded, in i40e_update_veb_stats()
834 &veb_oes->tc_rx_packets[i], in i40e_update_veb_stats()
835 &veb_es->tc_rx_packets[i]); in i40e_update_veb_stats()
838 veb->stat_offsets_loaded, in i40e_update_veb_stats()
839 &veb_oes->tc_rx_bytes[i], in i40e_update_veb_stats()
840 &veb_es->tc_rx_bytes[i]); in i40e_update_veb_stats()
843 veb->stat_offsets_loaded, in i40e_update_veb_stats()
844 &veb_oes->tc_tx_packets[i], in i40e_update_veb_stats()
845 &veb_es->tc_tx_packets[i]); in i40e_update_veb_stats()
848 veb->stat_offsets_loaded, in i40e_update_veb_stats()
849 &veb_oes->tc_tx_bytes[i], in i40e_update_veb_stats()
850 &veb_es->tc_tx_bytes[i]); in i40e_update_veb_stats()
852 veb->stat_offsets_loaded = true; in i40e_update_veb_stats()
856 * i40e_update_vsi_stats - Update the vsi statistics counters.
868 struct i40e_pf *pf = vsi->back; in i40e_update_vsi_stats()
884 if (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_update_vsi_stats()
885 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_update_vsi_stats()
889 ons = &vsi->net_stats_offsets; in i40e_update_vsi_stats()
890 es = &vsi->eth_stats; in i40e_update_vsi_stats()
891 oes = &vsi->eth_stats_offsets; in i40e_update_vsi_stats()
907 for (q = 0; q < vsi->num_queue_pairs; q++) { in i40e_update_vsi_stats()
909 p = READ_ONCE(vsi->tx_rings[q]); in i40e_update_vsi_stats()
914 start = u64_stats_fetch_begin(&p->syncp); in i40e_update_vsi_stats()
915 packets = p->stats.packets; in i40e_update_vsi_stats()
916 bytes = p->stats.bytes; in i40e_update_vsi_stats()
917 } while (u64_stats_fetch_retry(&p->syncp, start)); in i40e_update_vsi_stats()
920 tx_restart += p->tx_stats.restart_queue; in i40e_update_vsi_stats()
921 tx_busy += p->tx_stats.tx_busy; in i40e_update_vsi_stats()
922 tx_linearize += p->tx_stats.tx_linearize; in i40e_update_vsi_stats()
923 tx_force_wb += p->tx_stats.tx_force_wb; in i40e_update_vsi_stats()
924 tx_stopped += p->tx_stats.tx_stopped; in i40e_update_vsi_stats()
927 p = READ_ONCE(vsi->rx_rings[q]); in i40e_update_vsi_stats()
932 start = u64_stats_fetch_begin(&p->syncp); in i40e_update_vsi_stats()
933 packets = p->stats.packets; in i40e_update_vsi_stats()
934 bytes = p->stats.bytes; in i40e_update_vsi_stats()
935 } while (u64_stats_fetch_retry(&p->syncp, start)); in i40e_update_vsi_stats()
938 rx_buf += p->rx_stats.alloc_buff_failed; in i40e_update_vsi_stats()
939 rx_page += p->rx_stats.alloc_page_failed; in i40e_update_vsi_stats()
940 rx_reuse += p->rx_stats.page_reuse_count; in i40e_update_vsi_stats()
941 rx_alloc += p->rx_stats.page_alloc_count; in i40e_update_vsi_stats()
942 rx_waive += p->rx_stats.page_waive_count; in i40e_update_vsi_stats()
943 rx_busy += p->rx_stats.page_busy_count; in i40e_update_vsi_stats()
947 p = READ_ONCE(vsi->xdp_rings[q]); in i40e_update_vsi_stats()
952 start = u64_stats_fetch_begin(&p->syncp); in i40e_update_vsi_stats()
953 packets = p->stats.packets; in i40e_update_vsi_stats()
954 bytes = p->stats.bytes; in i40e_update_vsi_stats()
955 } while (u64_stats_fetch_retry(&p->syncp, start)); in i40e_update_vsi_stats()
958 tx_restart += p->tx_stats.restart_queue; in i40e_update_vsi_stats()
959 tx_busy += p->tx_stats.tx_busy; in i40e_update_vsi_stats()
960 tx_linearize += p->tx_stats.tx_linearize; in i40e_update_vsi_stats()
961 tx_force_wb += p->tx_stats.tx_force_wb; in i40e_update_vsi_stats()
965 vsi->tx_restart = tx_restart; in i40e_update_vsi_stats()
966 vsi->tx_busy = tx_busy; in i40e_update_vsi_stats()
967 vsi->tx_linearize = tx_linearize; in i40e_update_vsi_stats()
968 vsi->tx_force_wb = tx_force_wb; in i40e_update_vsi_stats()
969 vsi->tx_stopped = tx_stopped; in i40e_update_vsi_stats()
970 vsi->rx_page_failed = rx_page; in i40e_update_vsi_stats()
971 vsi->rx_buf_failed = rx_buf; in i40e_update_vsi_stats()
972 vsi->rx_page_reuse = rx_reuse; in i40e_update_vsi_stats()
973 vsi->rx_page_alloc = rx_alloc; in i40e_update_vsi_stats()
974 vsi->rx_page_waive = rx_waive; in i40e_update_vsi_stats()
975 vsi->rx_page_busy = rx_busy; in i40e_update_vsi_stats()
977 ns->rx_packets = rx_p; in i40e_update_vsi_stats()
978 ns->rx_bytes = rx_b; in i40e_update_vsi_stats()
979 ns->tx_packets = tx_p; in i40e_update_vsi_stats()
980 ns->tx_bytes = tx_b; in i40e_update_vsi_stats()
984 ons->tx_errors = oes->tx_errors; in i40e_update_vsi_stats()
985 ns->tx_errors = es->tx_errors; in i40e_update_vsi_stats()
986 ons->multicast = oes->rx_multicast; in i40e_update_vsi_stats()
987 ns->multicast = es->rx_multicast; in i40e_update_vsi_stats()
988 ons->rx_dropped = oes->rx_discards_other; in i40e_update_vsi_stats()
989 ns->rx_dropped = es->rx_discards_other; in i40e_update_vsi_stats()
990 ons->rx_missed_errors = oes->rx_discards; in i40e_update_vsi_stats()
991 ns->rx_missed_errors = es->rx_discards; in i40e_update_vsi_stats()
992 ons->tx_dropped = oes->tx_discards; in i40e_update_vsi_stats()
993 ns->tx_dropped = es->tx_discards; in i40e_update_vsi_stats()
996 if (vsi == pf->vsi[pf->lan_vsi]) { in i40e_update_vsi_stats()
997 ns->rx_crc_errors = pf->stats.crc_errors; in i40e_update_vsi_stats()
998 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; in i40e_update_vsi_stats()
999 ns->rx_length_errors = pf->stats.rx_length_errors; in i40e_update_vsi_stats()
1004 * i40e_update_pf_stats - Update the PF statistics counters.
1009 struct i40e_hw_port_stats *osd = &pf->stats_offsets; in i40e_update_pf_stats()
1010 struct i40e_hw_port_stats *nsd = &pf->stats; in i40e_update_pf_stats()
1011 struct i40e_hw *hw = &pf->hw; in i40e_update_pf_stats()
1015 i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port), in i40e_update_pf_stats()
1016 I40E_GLPRT_GORCL(hw->port), in i40e_update_pf_stats()
1017 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1018 &osd->eth.rx_bytes, &nsd->eth.rx_bytes); in i40e_update_pf_stats()
1019 i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port), in i40e_update_pf_stats()
1020 I40E_GLPRT_GOTCL(hw->port), in i40e_update_pf_stats()
1021 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1022 &osd->eth.tx_bytes, &nsd->eth.tx_bytes); in i40e_update_pf_stats()
1023 i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port), in i40e_update_pf_stats()
1024 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1025 &osd->eth.rx_discards, in i40e_update_pf_stats()
1026 &nsd->eth.rx_discards); in i40e_update_pf_stats()
1027 i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port), in i40e_update_pf_stats()
1028 I40E_GLPRT_UPRCL(hw->port), in i40e_update_pf_stats()
1029 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1030 &osd->eth.rx_unicast, in i40e_update_pf_stats()
1031 &nsd->eth.rx_unicast); in i40e_update_pf_stats()
1032 i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port), in i40e_update_pf_stats()
1033 I40E_GLPRT_MPRCL(hw->port), in i40e_update_pf_stats()
1034 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1035 &osd->eth.rx_multicast, in i40e_update_pf_stats()
1036 &nsd->eth.rx_multicast); in i40e_update_pf_stats()
1037 i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port), in i40e_update_pf_stats()
1038 I40E_GLPRT_BPRCL(hw->port), in i40e_update_pf_stats()
1039 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1040 &osd->eth.rx_broadcast, in i40e_update_pf_stats()
1041 &nsd->eth.rx_broadcast); in i40e_update_pf_stats()
1042 i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port), in i40e_update_pf_stats()
1043 I40E_GLPRT_UPTCL(hw->port), in i40e_update_pf_stats()
1044 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1045 &osd->eth.tx_unicast, in i40e_update_pf_stats()
1046 &nsd->eth.tx_unicast); in i40e_update_pf_stats()
1047 i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port), in i40e_update_pf_stats()
1048 I40E_GLPRT_MPTCL(hw->port), in i40e_update_pf_stats()
1049 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1050 &osd->eth.tx_multicast, in i40e_update_pf_stats()
1051 &nsd->eth.tx_multicast); in i40e_update_pf_stats()
1052 i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port), in i40e_update_pf_stats()
1053 I40E_GLPRT_BPTCL(hw->port), in i40e_update_pf_stats()
1054 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1055 &osd->eth.tx_broadcast, in i40e_update_pf_stats()
1056 &nsd->eth.tx_broadcast); in i40e_update_pf_stats()
1058 i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port), in i40e_update_pf_stats()
1059 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1060 &osd->tx_dropped_link_down, in i40e_update_pf_stats()
1061 &nsd->tx_dropped_link_down); in i40e_update_pf_stats()
1063 i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port), in i40e_update_pf_stats()
1064 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1065 &osd->crc_errors, &nsd->crc_errors); in i40e_update_pf_stats()
1067 i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port), in i40e_update_pf_stats()
1068 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1069 &osd->illegal_bytes, &nsd->illegal_bytes); in i40e_update_pf_stats()
1071 i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port), in i40e_update_pf_stats()
1072 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1073 &osd->mac_local_faults, in i40e_update_pf_stats()
1074 &nsd->mac_local_faults); in i40e_update_pf_stats()
1075 i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port), in i40e_update_pf_stats()
1076 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1077 &osd->mac_remote_faults, in i40e_update_pf_stats()
1078 &nsd->mac_remote_faults); in i40e_update_pf_stats()
1080 i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port), in i40e_update_pf_stats()
1081 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1082 &osd->rx_length_errors, in i40e_update_pf_stats()
1083 &nsd->rx_length_errors); in i40e_update_pf_stats()
1085 i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port), in i40e_update_pf_stats()
1086 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1087 &osd->link_xon_rx, &nsd->link_xon_rx); in i40e_update_pf_stats()
1088 i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port), in i40e_update_pf_stats()
1089 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1090 &osd->link_xon_tx, &nsd->link_xon_tx); in i40e_update_pf_stats()
1091 i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port), in i40e_update_pf_stats()
1092 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1093 &osd->link_xoff_rx, &nsd->link_xoff_rx); in i40e_update_pf_stats()
1094 i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port), in i40e_update_pf_stats()
1095 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1096 &osd->link_xoff_tx, &nsd->link_xoff_tx); in i40e_update_pf_stats()
1099 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i), in i40e_update_pf_stats()
1100 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1101 &osd->priority_xoff_rx[i], in i40e_update_pf_stats()
1102 &nsd->priority_xoff_rx[i]); in i40e_update_pf_stats()
1103 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i), in i40e_update_pf_stats()
1104 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1105 &osd->priority_xon_rx[i], in i40e_update_pf_stats()
1106 &nsd->priority_xon_rx[i]); in i40e_update_pf_stats()
1107 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i), in i40e_update_pf_stats()
1108 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1109 &osd->priority_xon_tx[i], in i40e_update_pf_stats()
1110 &nsd->priority_xon_tx[i]); in i40e_update_pf_stats()
1111 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i), in i40e_update_pf_stats()
1112 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1113 &osd->priority_xoff_tx[i], in i40e_update_pf_stats()
1114 &nsd->priority_xoff_tx[i]); in i40e_update_pf_stats()
1116 I40E_GLPRT_RXON2OFFCNT(hw->port, i), in i40e_update_pf_stats()
1117 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1118 &osd->priority_xon_2_xoff[i], in i40e_update_pf_stats()
1119 &nsd->priority_xon_2_xoff[i]); in i40e_update_pf_stats()
1122 i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port), in i40e_update_pf_stats()
1123 I40E_GLPRT_PRC64L(hw->port), in i40e_update_pf_stats()
1124 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1125 &osd->rx_size_64, &nsd->rx_size_64); in i40e_update_pf_stats()
1126 i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port), in i40e_update_pf_stats()
1127 I40E_GLPRT_PRC127L(hw->port), in i40e_update_pf_stats()
1128 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1129 &osd->rx_size_127, &nsd->rx_size_127); in i40e_update_pf_stats()
1130 i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port), in i40e_update_pf_stats()
1131 I40E_GLPRT_PRC255L(hw->port), in i40e_update_pf_stats()
1132 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1133 &osd->rx_size_255, &nsd->rx_size_255); in i40e_update_pf_stats()
1134 i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port), in i40e_update_pf_stats()
1135 I40E_GLPRT_PRC511L(hw->port), in i40e_update_pf_stats()
1136 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1137 &osd->rx_size_511, &nsd->rx_size_511); in i40e_update_pf_stats()
1138 i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port), in i40e_update_pf_stats()
1139 I40E_GLPRT_PRC1023L(hw->port), in i40e_update_pf_stats()
1140 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1141 &osd->rx_size_1023, &nsd->rx_size_1023); in i40e_update_pf_stats()
1142 i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port), in i40e_update_pf_stats()
1143 I40E_GLPRT_PRC1522L(hw->port), in i40e_update_pf_stats()
1144 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1145 &osd->rx_size_1522, &nsd->rx_size_1522); in i40e_update_pf_stats()
1146 i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port), in i40e_update_pf_stats()
1147 I40E_GLPRT_PRC9522L(hw->port), in i40e_update_pf_stats()
1148 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1149 &osd->rx_size_big, &nsd->rx_size_big); in i40e_update_pf_stats()
1151 i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port), in i40e_update_pf_stats()
1152 I40E_GLPRT_PTC64L(hw->port), in i40e_update_pf_stats()
1153 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1154 &osd->tx_size_64, &nsd->tx_size_64); in i40e_update_pf_stats()
1155 i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port), in i40e_update_pf_stats()
1156 I40E_GLPRT_PTC127L(hw->port), in i40e_update_pf_stats()
1157 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1158 &osd->tx_size_127, &nsd->tx_size_127); in i40e_update_pf_stats()
1159 i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port), in i40e_update_pf_stats()
1160 I40E_GLPRT_PTC255L(hw->port), in i40e_update_pf_stats()
1161 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1162 &osd->tx_size_255, &nsd->tx_size_255); in i40e_update_pf_stats()
1163 i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port), in i40e_update_pf_stats()
1164 I40E_GLPRT_PTC511L(hw->port), in i40e_update_pf_stats()
1165 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1166 &osd->tx_size_511, &nsd->tx_size_511); in i40e_update_pf_stats()
1167 i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port), in i40e_update_pf_stats()
1168 I40E_GLPRT_PTC1023L(hw->port), in i40e_update_pf_stats()
1169 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1170 &osd->tx_size_1023, &nsd->tx_size_1023); in i40e_update_pf_stats()
1171 i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port), in i40e_update_pf_stats()
1172 I40E_GLPRT_PTC1522L(hw->port), in i40e_update_pf_stats()
1173 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1174 &osd->tx_size_1522, &nsd->tx_size_1522); in i40e_update_pf_stats()
1175 i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port), in i40e_update_pf_stats()
1176 I40E_GLPRT_PTC9522L(hw->port), in i40e_update_pf_stats()
1177 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1178 &osd->tx_size_big, &nsd->tx_size_big); in i40e_update_pf_stats()
1180 i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port), in i40e_update_pf_stats()
1181 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1182 &osd->rx_undersize, &nsd->rx_undersize); in i40e_update_pf_stats()
1183 i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port), in i40e_update_pf_stats()
1184 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1185 &osd->rx_fragments, &nsd->rx_fragments); in i40e_update_pf_stats()
1186 i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port), in i40e_update_pf_stats()
1187 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1188 &osd->rx_oversize, &nsd->rx_oversize); in i40e_update_pf_stats()
1189 i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port), in i40e_update_pf_stats()
1190 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1191 &osd->rx_jabber, &nsd->rx_jabber); in i40e_update_pf_stats()
1195 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1196 &nsd->fd_atr_match); in i40e_update_pf_stats()
1198 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1199 &nsd->fd_sb_match); in i40e_update_pf_stats()
1201 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw->pf_id)), in i40e_update_pf_stats()
1202 &nsd->fd_atr_tunnel_match); in i40e_update_pf_stats()
1205 nsd->tx_lpi_status = in i40e_update_pf_stats()
1207 nsd->rx_lpi_status = in i40e_update_pf_stats()
1210 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1211 &osd->tx_lpi_count, &nsd->tx_lpi_count); in i40e_update_pf_stats()
1213 pf->stat_offsets_loaded, in i40e_update_pf_stats()
1214 &osd->rx_lpi_count, &nsd->rx_lpi_count); in i40e_update_pf_stats()
1216 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_update_pf_stats()
1217 !test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1218 nsd->fd_sb_status = true; in i40e_update_pf_stats()
1220 nsd->fd_sb_status = false; in i40e_update_pf_stats()
1222 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_update_pf_stats()
1223 !test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_update_pf_stats()
1224 nsd->fd_atr_status = true; in i40e_update_pf_stats()
1226 nsd->fd_atr_status = false; in i40e_update_pf_stats()
1228 pf->stat_offsets_loaded = true; in i40e_update_pf_stats()
1232 * i40e_update_stats - Update the various statistics counters.
1239 struct i40e_pf *pf = vsi->back; in i40e_update_stats()
1241 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_update_stats()
1248 * i40e_count_filters - counts VSI mac filters
1260 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_count_filters()
1267 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1284 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_filter()
1285 if ((ether_addr_equal(macaddr, f->macaddr)) && in i40e_find_filter()
1286 (vlan == f->vlan)) in i40e_find_filter()
1293 * i40e_find_mac - Find a mac addr in the macvlan filters list
1309 hash_for_each_possible(vsi->mac_filter_hash, f, hlist, key) { in i40e_find_mac()
1310 if ((ether_addr_equal(macaddr, f->macaddr))) in i40e_find_mac()
1317 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1325 if (vsi->info.pvid) in i40e_is_vsi_in_vlan()
1348 return vsi->has_vlan_filter; in i40e_is_vsi_in_vlan()
1352 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1358 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1360 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1362 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1367 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1372 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1385 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_correct_mac_vlan_filters()
1398 * which are marked as VLAN=-1 must be replaced with in i40e_correct_mac_vlan_filters()
1402 * marked as VLAN=-1 in i40e_correct_mac_vlan_filters()
1407 if (pvid && new->f->vlan != pvid) in i40e_correct_mac_vlan_filters()
1408 new->f->vlan = pvid; in i40e_correct_mac_vlan_filters()
1409 else if (vlan_filters && new->f->vlan == I40E_VLAN_ANY) in i40e_correct_mac_vlan_filters()
1410 new->f->vlan = 0; in i40e_correct_mac_vlan_filters()
1411 else if (!vlan_filters && new->f->vlan == 0) in i40e_correct_mac_vlan_filters()
1412 new->f->vlan = I40E_VLAN_ANY; in i40e_correct_mac_vlan_filters()
1416 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_mac_vlan_filters()
1422 if ((pvid && f->vlan != pvid) || in i40e_correct_mac_vlan_filters()
1423 (vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_correct_mac_vlan_filters()
1424 (!vlan_filters && f->vlan == 0)) { in i40e_correct_mac_vlan_filters()
1434 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_mac_vlan_filters()
1436 return -ENOMEM; in i40e_correct_mac_vlan_filters()
1441 return -ENOMEM; in i40e_correct_mac_vlan_filters()
1443 new->f = add_head; in i40e_correct_mac_vlan_filters()
1444 new->state = add_head->state; in i40e_correct_mac_vlan_filters()
1447 hlist_add_head(&new->hlist, tmp_add_list); in i40e_correct_mac_vlan_filters()
1450 f->state = I40E_FILTER_REMOVE; in i40e_correct_mac_vlan_filters()
1451 hash_del(&f->hlist); in i40e_correct_mac_vlan_filters()
1452 hlist_add_head(&f->hlist, tmp_del_list); in i40e_correct_mac_vlan_filters()
1456 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_mac_vlan_filters()
1462 * i40e_get_vf_new_vlan - Get new vlan id on a vf
1465 * @f: existing mac filter, replaced with new_mac->f if new_mac is not NULL
1470 * and vf-vlan-prune-disable flag.
1481 s16 pvid = le16_to_cpu(vsi->info.pvid); in i40e_get_vf_new_vlan()
1482 struct i40e_pf *pf = vsi->back; in i40e_get_vf_new_vlan()
1486 f = new_mac->f; in i40e_get_vf_new_vlan()
1488 if (pvid && f->vlan != pvid) in i40e_get_vf_new_vlan()
1492 !test_bit(I40E_FLAG_VF_VLAN_PRUNING_ENA, pf->flags)); in i40e_get_vf_new_vlan()
1494 if ((vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_get_vf_new_vlan()
1495 (!is_any && !vlan_filters && f->vlan == I40E_VLAN_ANY) || in i40e_get_vf_new_vlan()
1496 (is_any && !vlan_filters && f->vlan == 0)) { in i40e_get_vf_new_vlan()
1503 return f->vlan; in i40e_get_vf_new_vlan()
1507 * i40e_correct_vf_mac_vlan_filters - Correct non-VLAN VF filters if necessary
1515 * and vf-vlan-prune-disable flag.
1517 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1537 new_mac->f->vlan = i40e_get_vf_new_vlan(vsi, new_mac, NULL, in i40e_correct_vf_mac_vlan_filters()
1541 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_correct_vf_mac_vlan_filters()
1544 if (new_vlan != f->vlan) { in i40e_correct_vf_mac_vlan_filters()
1545 add_head = i40e_add_filter(vsi, f->macaddr, new_vlan); in i40e_correct_vf_mac_vlan_filters()
1547 return -ENOMEM; in i40e_correct_vf_mac_vlan_filters()
1551 return -ENOMEM; in i40e_correct_vf_mac_vlan_filters()
1552 new_mac->f = add_head; in i40e_correct_vf_mac_vlan_filters()
1553 new_mac->state = add_head->state; in i40e_correct_vf_mac_vlan_filters()
1556 hlist_add_head(&new_mac->hlist, tmp_add_list); in i40e_correct_vf_mac_vlan_filters()
1559 f->state = I40E_FILTER_REMOVE; in i40e_correct_vf_mac_vlan_filters()
1560 hash_del(&f->hlist); in i40e_correct_vf_mac_vlan_filters()
1561 hlist_add_head(&f->hlist, tmp_del_list); in i40e_correct_vf_mac_vlan_filters()
1565 vsi->has_vlan_filter = !!vlan_filters; in i40e_correct_vf_mac_vlan_filters()
1570 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1571 * @vsi: the PF Main VSI - inappropriate for any other VSI
1580 struct i40e_pf *pf = vsi->back; in i40e_rm_default_mac_filter()
1583 if (vsi->type != I40E_VSI_MAIN) in i40e_rm_default_mac_filter()
1591 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1599 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); in i40e_rm_default_mac_filter()
1603 * i40e_add_filter - Add a mac/vlan filter to the VSI
1632 vsi->has_vlan_filter = true; in i40e_add_filter()
1634 ether_addr_copy(f->macaddr, macaddr); in i40e_add_filter()
1635 f->vlan = vlan; in i40e_add_filter()
1636 f->state = I40E_FILTER_NEW; in i40e_add_filter()
1637 INIT_HLIST_NODE(&f->hlist); in i40e_add_filter()
1640 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_add_filter()
1642 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_filter()
1643 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_add_filter()
1654 if (f->state == I40E_FILTER_REMOVE) in i40e_add_filter()
1655 f->state = I40E_FILTER_ACTIVE; in i40e_add_filter()
1661 * __i40e_del_filter - Remove a specific filter from the VSI
1684 if ((f->state == I40E_FILTER_FAILED) || in __i40e_del_filter()
1685 (f->state == I40E_FILTER_NEW)) { in __i40e_del_filter()
1686 hash_del(&f->hlist); in __i40e_del_filter()
1689 f->state = I40E_FILTER_REMOVE; in __i40e_del_filter()
1692 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in __i40e_del_filter()
1693 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in __i40e_del_filter()
1697 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1720 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1738 if (vsi->info.pvid) in i40e_add_mac_filter()
1740 le16_to_cpu(vsi->info.pvid)); in i40e_add_mac_filter()
1745 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_mac_filter()
1746 if (f->state == I40E_FILTER_REMOVE) in i40e_add_mac_filter()
1748 add = i40e_add_filter(vsi, macaddr, f->vlan); in i40e_add_mac_filter()
1757 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1773 lockdep_assert_held(&vsi->mac_filter_hash_lock); in i40e_del_mac_filter()
1774 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_del_mac_filter()
1775 if (ether_addr_equal(macaddr, f->macaddr)) { in i40e_del_mac_filter()
1784 return -ENOENT; in i40e_del_mac_filter()
1788 * i40e_set_mac - NDO callback to set mac address
1797 struct i40e_vsi *vsi = np->vsi; in i40e_set_mac()
1798 struct i40e_pf *pf = vsi->back; in i40e_set_mac()
1799 struct i40e_hw *hw = &pf->hw; in i40e_set_mac()
1802 if (!is_valid_ether_addr(addr->sa_data)) in i40e_set_mac()
1803 return -EADDRNOTAVAIL; in i40e_set_mac()
1805 if (test_bit(__I40E_DOWN, pf->state) || in i40e_set_mac()
1806 test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_set_mac()
1807 return -EADDRNOTAVAIL; in i40e_set_mac()
1809 if (ether_addr_equal(hw->mac.addr, addr->sa_data)) in i40e_set_mac()
1811 hw->mac.addr); in i40e_set_mac()
1813 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data); in i40e_set_mac()
1817 * - Remove old address from MAC filter in i40e_set_mac()
1818 * - Copy new address in i40e_set_mac()
1819 * - Add new address to MAC filter in i40e_set_mac()
1821 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1822 i40e_del_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1823 eth_hw_addr_set(netdev, addr->sa_data); in i40e_set_mac()
1824 i40e_add_mac_filter(vsi, netdev->dev_addr); in i40e_set_mac()
1825 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_mac()
1827 if (vsi->type == I40E_VSI_MAIN) { in i40e_set_mac()
1831 addr->sa_data, NULL); in i40e_set_mac()
1835 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_mac()
1846 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1855 struct i40e_pf *pf = vsi->back; in i40e_config_rss_aq()
1856 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_aq()
1862 ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw); in i40e_config_rss_aq()
1864 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1867 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq()
1872 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_config_rss_aq()
1874 ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_config_rss_aq()
1876 dev_info(&pf->pdev->dev, in i40e_config_rss_aq()
1879 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_config_rss_aq()
1887 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1892 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_rss()
1897 if (!test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) in i40e_vsi_config_rss()
1899 if (!vsi->rss_size) in i40e_vsi_config_rss()
1900 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_vsi_config_rss()
1901 vsi->num_queue_pairs); in i40e_vsi_config_rss()
1902 if (!vsi->rss_size) in i40e_vsi_config_rss()
1903 return -EINVAL; in i40e_vsi_config_rss()
1904 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_config_rss()
1906 return -ENOMEM; in i40e_vsi_config_rss()
1911 if (vsi->rss_lut_user) in i40e_vsi_config_rss()
1912 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_vsi_config_rss()
1914 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_vsi_config_rss()
1915 if (vsi->rss_hkey_user) in i40e_vsi_config_rss()
1916 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_config_rss()
1919 ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_config_rss()
1925 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1940 if (vsi->type != I40E_VSI_MAIN) in i40e_vsi_setup_queue_map_mqprio()
1941 return -EINVAL; in i40e_vsi_setup_queue_map_mqprio()
1944 vsi->tc_config.numtc = vsi->mqprio_qopt.qopt.num_tc; in i40e_vsi_setup_queue_map_mqprio()
1945 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map_mqprio()
1946 num_qps = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1948 /* find the next higher power-of-2 of num queue pairs */ in i40e_vsi_setup_queue_map_mqprio()
1955 /* Setup queue offset/count for all TCs for given VSI */ in i40e_vsi_setup_queue_map_mqprio()
1956 max_qcount = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
1959 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map_mqprio()
1960 offset = vsi->mqprio_qopt.qopt.offset[i]; in i40e_vsi_setup_queue_map_mqprio()
1961 qcount = vsi->mqprio_qopt.qopt.count[i]; in i40e_vsi_setup_queue_map_mqprio()
1964 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map_mqprio()
1965 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map_mqprio()
1966 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map_mqprio()
1972 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map_mqprio()
1973 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map_mqprio()
1974 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map_mqprio()
1979 vsi->num_queue_pairs = offset + qcount; in i40e_vsi_setup_queue_map_mqprio()
1982 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map_mqprio()
1983 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_vsi_setup_queue_map_mqprio()
1984 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map_mqprio()
1985 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map_mqprio()
1988 vsi->rss_size = max_qcount; in i40e_vsi_setup_queue_map_mqprio()
1991 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
1996 vsi->reconfig_rss = true; in i40e_vsi_setup_queue_map_mqprio()
1997 dev_dbg(&vsi->back->pdev->dev, in i40e_vsi_setup_queue_map_mqprio()
2003 override_q = vsi->mqprio_qopt.qopt.count[0]; in i40e_vsi_setup_queue_map_mqprio()
2004 if (override_q && override_q < vsi->num_queue_pairs) { in i40e_vsi_setup_queue_map_mqprio()
2005 vsi->cnt_q_avail = vsi->num_queue_pairs - override_q; in i40e_vsi_setup_queue_map_mqprio()
2006 vsi->next_base_queue = override_q; in i40e_vsi_setup_queue_map_mqprio()
2012 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
2015 * @enabled_tc: Enabled TCs bitmap
2025 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_queue_map()
2038 memset(ctxt->info.queue_mapping, 0, sizeof(ctxt->info.queue_mapping)); in i40e_vsi_setup_queue_map()
2040 if (vsi->type == I40E_VSI_MAIN) { in i40e_vsi_setup_queue_map()
2044 * non-zero req_queue_pairs says that user requested a new in i40e_vsi_setup_queue_map()
2050 if (vsi->req_queue_pairs > 0) in i40e_vsi_setup_queue_map()
2051 vsi->num_queue_pairs = vsi->req_queue_pairs; in i40e_vsi_setup_queue_map()
2052 else if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_setup_queue_map()
2053 vsi->num_queue_pairs = pf->num_lan_msix; in i40e_vsi_setup_queue_map()
2055 vsi->num_queue_pairs = 1; in i40e_vsi_setup_queue_map()
2059 if (vsi->type == I40E_VSI_MAIN || in i40e_vsi_setup_queue_map()
2060 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs != 0)) in i40e_vsi_setup_queue_map()
2061 num_tc_qps = vsi->num_queue_pairs; in i40e_vsi_setup_queue_map()
2063 num_tc_qps = vsi->alloc_queue_pairs; in i40e_vsi_setup_queue_map()
2065 if (enabled_tc && test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { in i40e_vsi_setup_queue_map()
2072 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); in i40e_vsi_setup_queue_map()
2080 vsi->tc_config.numtc = numtc; in i40e_vsi_setup_queue_map()
2081 vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; in i40e_vsi_setup_queue_map()
2083 /* Do not allow use more TC queue pairs than MSI-X vectors exist */ in i40e_vsi_setup_queue_map()
2084 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_setup_queue_map()
2085 num_tc_qps = min_t(int, num_tc_qps, pf->num_lan_msix); in i40e_vsi_setup_queue_map()
2087 /* Setup queue offset/count for all TCs for given VSI */ in i40e_vsi_setup_queue_map()
2090 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_vsi_setup_queue_map()
2094 switch (vsi->type) { in i40e_vsi_setup_queue_map()
2097 pf->flags) && in i40e_vsi_setup_queue_map()
2099 pf->flags)) || in i40e_vsi_setup_queue_map()
2100 vsi->tc_config.enabled_tc != 1) { in i40e_vsi_setup_queue_map()
2101 qcount = min_t(int, pf->alloc_rss_size, in i40e_vsi_setup_queue_map()
2114 vsi->tc_config.tc_info[i].qoffset = offset; in i40e_vsi_setup_queue_map()
2115 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_setup_queue_map()
2117 /* find the next higher power-of-2 of num queue pairs */ in i40e_vsi_setup_queue_map()
2125 vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++; in i40e_vsi_setup_queue_map()
2136 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_setup_queue_map()
2137 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_setup_queue_map()
2138 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_setup_queue_map()
2142 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); in i40e_vsi_setup_queue_map()
2145 if ((vsi->type == I40E_VSI_MAIN && numtc != 1) || in i40e_vsi_setup_queue_map()
2146 (vsi->type == I40E_VSI_SRIOV && vsi->num_queue_pairs == 0) || in i40e_vsi_setup_queue_map()
2147 (vsi->type != I40E_VSI_MAIN && vsi->type != I40E_VSI_SRIOV)) in i40e_vsi_setup_queue_map()
2148 vsi->num_queue_pairs = offset; in i40e_vsi_setup_queue_map()
2154 ctxt->info.up_enable_bits = enabled_tc; in i40e_vsi_setup_queue_map()
2156 if (vsi->type == I40E_VSI_SRIOV) { in i40e_vsi_setup_queue_map()
2157 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
2159 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_setup_queue_map()
2160 ctxt->info.queue_mapping[i] = in i40e_vsi_setup_queue_map()
2161 cpu_to_le16(vsi->base_queue + i); in i40e_vsi_setup_queue_map()
2163 ctxt->info.mapping_flags |= in i40e_vsi_setup_queue_map()
2165 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_vsi_setup_queue_map()
2167 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_vsi_setup_queue_map()
2171 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
2181 struct i40e_vsi *vsi = np->vsi; in i40e_addr_sync()
2186 return -ENOMEM; in i40e_addr_sync()
2190 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
2200 struct i40e_vsi *vsi = np->vsi; in i40e_addr_unsync()
2207 if (ether_addr_equal(addr, netdev->dev_addr)) in i40e_addr_unsync()
2216 * i40e_set_rx_mode - NDO callback to set the netdev filters
2222 struct i40e_vsi *vsi = np->vsi; in i40e_set_rx_mode()
2224 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2229 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_set_rx_mode()
2232 if (vsi->current_netdev_flags != vsi->netdev->flags) { in i40e_set_rx_mode()
2233 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_set_rx_mode()
2234 set_bit(__I40E_MACVLAN_SYNC_PENDING, vsi->back->state); in i40e_set_rx_mode()
2239 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
2241 * @from: Pointer to list which contains MAC filter entries - changes to
2253 u64 key = i40e_addr_to_hkey(f->macaddr); in i40e_undo_del_filter_entries()
2256 hlist_del(&f->hlist); in i40e_undo_del_filter_entries()
2257 hash_add(vsi->mac_filter_hash, &f->hlist, key); in i40e_undo_del_filter_entries()
2262 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
2264 * @from: Pointer to list which contains MAC filter entries - changes to
2277 hlist_del(&new->hlist); in i40e_undo_add_filter_entries()
2278 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_undo_add_filter_entries()
2284 * i40e_next_filter - Get the next non-broadcast filter from a list
2287 * Returns the next non-broadcast filter in the list. Required so that we
2295 if (!is_broadcast_ether_addr(next->f->macaddr)) in i40e_next_filter()
2303 * i40e_update_filter_state - Update filter state based on return data
2322 * the firmware return status because we pre-set the filter in i40e_update_filter_state()
2328 add_head->state = I40E_FILTER_FAILED; in i40e_update_filter_state()
2330 add_head->state = I40E_FILTER_ACTIVE; in i40e_update_filter_state()
2343 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2348 * @retval: Set to -EIO on failure to delete
2360 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_del_filters()
2364 aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL, in i40e_aqc_del_filters()
2369 *retval = -EIO; in i40e_aqc_del_filters()
2370 dev_info(&vsi->back->pdev->dev, in i40e_aqc_del_filters()
2378 * i40e_aqc_add_filters - Request firmware to add a set of filters
2386 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2395 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_add_filters()
2399 i40e_aq_add_macvlan_v2(hw, vsi->seid, list, num_add, NULL, &aq_status); in i40e_aqc_add_filters()
2403 if (vsi->type == I40E_VSI_MAIN) { in i40e_aqc_add_filters()
2404 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_add_filters()
2405 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2408 } else if (vsi->type == I40E_VSI_SRIOV || in i40e_aqc_add_filters()
2409 vsi->type == I40E_VSI_VMDQ1 || in i40e_aqc_add_filters()
2410 vsi->type == I40E_VSI_VMDQ2) { in i40e_aqc_add_filters()
2411 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2416 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_add_filters()
2419 vsi->type); in i40e_aqc_add_filters()
2425 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2440 bool enable = f->state == I40E_FILTER_NEW; in i40e_aqc_broadcast_filter()
2441 struct i40e_hw *hw = &vsi->back->hw; in i40e_aqc_broadcast_filter()
2444 if (f->vlan == I40E_VLAN_ANY) { in i40e_aqc_broadcast_filter()
2446 vsi->seid, in i40e_aqc_broadcast_filter()
2451 vsi->seid, in i40e_aqc_broadcast_filter()
2453 f->vlan, in i40e_aqc_broadcast_filter()
2458 set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_aqc_broadcast_filter()
2459 dev_warn(&vsi->back->pdev->dev, in i40e_aqc_broadcast_filter()
2461 i40e_aq_str(hw, hw->aq.asq_last_status), in i40e_aqc_broadcast_filter()
2469 * i40e_set_promiscuous - set promiscuous mode
2479 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_set_promiscuous()
2480 struct i40e_hw *hw = &pf->hw; in i40e_set_promiscuous()
2483 if (vsi->type == I40E_VSI_MAIN && in i40e_set_promiscuous()
2484 pf->lan_veb != I40E_NO_VEB && in i40e_set_promiscuous()
2485 !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_set_promiscuous()
2493 vsi->seid, in i40e_set_promiscuous()
2497 vsi->seid, in i40e_set_promiscuous()
2500 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2503 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2508 vsi->seid, in i40e_set_promiscuous()
2512 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2515 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2519 vsi->seid, in i40e_set_promiscuous()
2522 dev_info(&pf->pdev->dev, in i40e_set_promiscuous()
2525 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_promiscuous()
2530 pf->cur_promisc = promisc; in i40e_set_promiscuous()
2536 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2548 struct i40e_hw *hw = &vsi->back->hw; in i40e_sync_vsi_filters()
2569 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state)) in i40e_sync_vsi_filters()
2571 pf = vsi->back; in i40e_sync_vsi_filters()
2573 old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2575 if (vsi->netdev) { in i40e_sync_vsi_filters()
2576 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in i40e_sync_vsi_filters()
2577 vsi->current_netdev_flags = vsi->netdev->flags; in i40e_sync_vsi_filters()
2583 if (vsi->type == I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2584 snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id); in i40e_sync_vsi_filters()
2585 else if (vsi->type != I40E_VSI_MAIN) in i40e_sync_vsi_filters()
2586 snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid); in i40e_sync_vsi_filters()
2588 if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) { in i40e_sync_vsi_filters()
2589 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2591 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2593 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_sync_vsi_filters()
2594 if (f->state == I40E_FILTER_REMOVE) { in i40e_sync_vsi_filters()
2596 hash_del(&f->hlist); in i40e_sync_vsi_filters()
2597 hlist_add_head(&f->hlist, &tmp_del_list); in i40e_sync_vsi_filters()
2602 if (f->state == I40E_FILTER_NEW) { in i40e_sync_vsi_filters()
2609 new->f = f; in i40e_sync_vsi_filters()
2610 new->state = f->state; in i40e_sync_vsi_filters()
2613 hlist_add_head(&new->hlist, &tmp_add_list); in i40e_sync_vsi_filters()
2620 if (f->vlan > 0) in i40e_sync_vsi_filters()
2624 if (vsi->type != I40E_VSI_SRIOV) in i40e_sync_vsi_filters()
2628 else if (pf->vf) in i40e_sync_vsi_filters()
2631 vlan_filters, pf->vf[vsi->vf_id].trusted); in i40e_sync_vsi_filters()
2634 netdev_hw_addr_refcnt(new->f, vsi->netdev, 1); in i40e_sync_vsi_filters()
2639 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2644 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters()
2658 if (is_broadcast_ether_addr(f->macaddr)) { in i40e_sync_vsi_filters()
2661 hlist_del(&f->hlist); in i40e_sync_vsi_filters()
2667 ether_addr_copy(del_list[num_del].mac_addr, f->macaddr); in i40e_sync_vsi_filters()
2668 if (f->vlan == I40E_VLAN_ANY) { in i40e_sync_vsi_filters()
2673 cpu_to_le16((u16)(f->vlan)); in i40e_sync_vsi_filters()
2690 hlist_del(&f->hlist); in i40e_sync_vsi_filters()
2705 filter_list_len = hw->aq.asq_buf_size / in i40e_sync_vsi_filters()
2718 if (is_broadcast_ether_addr(new->f->macaddr)) { in i40e_sync_vsi_filters()
2720 new->f)) in i40e_sync_vsi_filters()
2721 new->state = I40E_FILTER_FAILED; in i40e_sync_vsi_filters()
2723 new->state = I40E_FILTER_ACTIVE; in i40e_sync_vsi_filters()
2732 new->f->macaddr); in i40e_sync_vsi_filters()
2733 if (new->f->vlan == I40E_VLAN_ANY) { in i40e_sync_vsi_filters()
2738 cpu_to_le16((u16)(new->f->vlan)); in i40e_sync_vsi_filters()
2762 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2765 if (new->f->state == I40E_FILTER_NEW) in i40e_sync_vsi_filters()
2766 new->f->state = new->state; in i40e_sync_vsi_filters()
2767 hlist_del(&new->hlist); in i40e_sync_vsi_filters()
2768 netdev_hw_addr_refcnt(new->f, vsi->netdev, -1); in i40e_sync_vsi_filters()
2771 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2777 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2778 vsi->active_filters = 0; in i40e_sync_vsi_filters()
2779 hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { in i40e_sync_vsi_filters()
2780 if (f->state == I40E_FILTER_ACTIVE) in i40e_sync_vsi_filters()
2781 vsi->active_filters++; in i40e_sync_vsi_filters()
2782 else if (f->state == I40E_FILTER_FAILED) in i40e_sync_vsi_filters()
2785 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2792 vsi->active_filters < vsi->promisc_threshold) { in i40e_sync_vsi_filters()
2793 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2796 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2797 vsi->promisc_threshold = 0; in i40e_sync_vsi_filters()
2801 if (vsi->type == I40E_VSI_SRIOV && pf->vf && in i40e_sync_vsi_filters()
2802 !pf->vf[vsi->vf_id].trusted) { in i40e_sync_vsi_filters()
2803 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2807 new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_sync_vsi_filters()
2813 vsi->promisc_threshold = (vsi->active_filters * 3) / 4; in i40e_sync_vsi_filters()
2819 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI); in i40e_sync_vsi_filters()
2820 aq_ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw, in i40e_sync_vsi_filters()
2821 vsi->seid, in i40e_sync_vsi_filters()
2826 hw->aq.asq_last_status); in i40e_sync_vsi_filters()
2827 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2831 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_sync_vsi_filters()
2833 dev_info(&pf->pdev->dev, "%s allmulti mode.\n", in i40e_sync_vsi_filters()
2841 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || in i40e_sync_vsi_filters()
2846 hw->aq.asq_last_status); in i40e_sync_vsi_filters()
2847 dev_info(&pf->pdev->dev, in i40e_sync_vsi_filters()
2852 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_sync_vsi_filters()
2858 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2860 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2865 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2869 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_sync_vsi_filters()
2871 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_sync_vsi_filters()
2872 clear_bit(__I40E_VSI_SYNCING_FILTERS, vsi->state); in i40e_sync_vsi_filters()
2873 return -ENOMEM; in i40e_sync_vsi_filters()
2877 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2886 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state)) in i40e_sync_filters_subtask()
2888 if (test_bit(__I40E_VF_DISABLE, pf->state)) { in i40e_sync_filters_subtask()
2889 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_sync_filters_subtask()
2893 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_sync_filters_subtask()
2894 if (pf->vsi[v] && in i40e_sync_filters_subtask()
2895 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED) && in i40e_sync_filters_subtask()
2896 !test_bit(__I40E_VSI_RELEASING, pf->vsi[v]->state)) { in i40e_sync_filters_subtask()
2897 int ret = i40e_sync_vsi_filters(pf->vsi[v]); in i40e_sync_filters_subtask()
2902 pf->state); in i40e_sync_filters_subtask()
2910 * i40e_calculate_vsi_rx_buf_len - Calculates buffer length
2916 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) in i40e_calculate_vsi_rx_buf_len()
2923 * i40e_max_vsi_frame_size - returns the maximum allowed frame size for VSI
2933 if (xdp_prog && !xdp_prog->aux->xdp_has_frags) in i40e_max_vsi_frame_size()
2942 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2951 struct i40e_vsi *vsi = np->vsi; in i40e_change_mtu()
2952 struct i40e_pf *pf = vsi->back; in i40e_change_mtu()
2955 frame_size = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); in i40e_change_mtu()
2956 if (new_mtu > frame_size - I40E_PACKET_HDR_PAD) { in i40e_change_mtu()
2958 new_mtu, frame_size - I40E_PACKET_HDR_PAD); in i40e_change_mtu()
2959 return -EINVAL; in i40e_change_mtu()
2963 netdev->mtu, new_mtu); in i40e_change_mtu()
2964 netdev->mtu = new_mtu; in i40e_change_mtu()
2967 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_change_mtu()
2968 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_change_mtu()
2973 * i40e_ioctl - Access the hwtstamp interface
2981 struct i40e_pf *pf = np->vsi->back; in i40e_ioctl()
2989 return -EOPNOTSUPP; in i40e_ioctl()
2994 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
3003 if (vsi->info.pvid) in i40e_vlan_stripping_enable()
3006 if ((vsi->info.valid_sections & in i40e_vlan_stripping_enable()
3008 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0)) in i40e_vlan_stripping_enable()
3011 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_enable()
3012 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_enable()
3015 ctxt.seid = vsi->seid; in i40e_vlan_stripping_enable()
3016 ctxt.info = vsi->info; in i40e_vlan_stripping_enable()
3017 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_enable()
3019 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_enable()
3022 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_enable()
3023 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_enable()
3028 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
3037 if (vsi->info.pvid) in i40e_vlan_stripping_disable()
3040 if ((vsi->info.valid_sections & in i40e_vlan_stripping_disable()
3042 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) == in i40e_vlan_stripping_disable()
3046 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vlan_stripping_disable()
3047 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | in i40e_vlan_stripping_disable()
3050 ctxt.seid = vsi->seid; in i40e_vlan_stripping_disable()
3051 ctxt.info = vsi->info; in i40e_vlan_stripping_disable()
3052 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vlan_stripping_disable()
3054 dev_info(&vsi->back->pdev->dev, in i40e_vlan_stripping_disable()
3057 i40e_aq_str(&vsi->back->hw, in i40e_vlan_stripping_disable()
3058 vsi->back->hw.aq.asq_last_status)); in i40e_vlan_stripping_disable()
3063 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
3065 * @vid: vlan id to be added (0 = untagged only , -1 = any)
3081 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vlan_all_mac()
3091 if (f->state == I40E_FILTER_REMOVE && f->vlan == vid) { in i40e_add_vlan_all_mac()
3092 f->state = I40E_FILTER_ACTIVE; in i40e_add_vlan_all_mac()
3094 } else if (f->state == I40E_FILTER_REMOVE) { in i40e_add_vlan_all_mac()
3097 add_f = i40e_add_filter(vsi, f->macaddr, vid); in i40e_add_vlan_all_mac()
3099 dev_info(&vsi->back->pdev->dev, in i40e_add_vlan_all_mac()
3101 vid, f->macaddr); in i40e_add_vlan_all_mac()
3102 return -ENOMEM; in i40e_add_vlan_all_mac()
3110 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
3118 if (vsi->info.pvid) in i40e_vsi_add_vlan()
3119 return -EINVAL; in i40e_vsi_add_vlan()
3133 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3135 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_add_vlan()
3142 i40e_service_event_schedule(vsi->back); in i40e_vsi_add_vlan()
3147 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
3149 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
3165 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_rm_vlan_all_mac()
3166 if (f->vlan == vid) in i40e_rm_vlan_all_mac()
3172 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
3178 if (!vid || vsi->info.pvid) in i40e_vsi_kill_vlan()
3181 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3183 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_kill_vlan()
3188 i40e_service_event_schedule(vsi->back); in i40e_vsi_kill_vlan()
3192 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
3203 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid()
3207 return -EINVAL; in i40e_vlan_rx_add_vid()
3211 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid()
3217 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
3226 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_add_vid_up()
3230 set_bit(vid, vsi->active_vlans); in i40e_vlan_rx_add_vid_up()
3234 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
3245 struct i40e_vsi *vsi = np->vsi; in i40e_vlan_rx_kill_vid()
3253 clear_bit(vid, vsi->active_vlans); in i40e_vlan_rx_kill_vid()
3259 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
3266 if (!vsi->netdev) in i40e_restore_vlan()
3269 if (vsi->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) in i40e_restore_vlan()
3274 for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID) in i40e_restore_vlan()
3275 i40e_vlan_rx_add_vid_up(vsi->netdev, htons(ETH_P_8021Q), in i40e_restore_vlan()
3280 * i40e_vsi_add_pvid - Add pvid for the VSI
3289 vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID); in i40e_vsi_add_pvid()
3290 vsi->info.pvid = cpu_to_le16(vid); in i40e_vsi_add_pvid()
3291 vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED | in i40e_vsi_add_pvid()
3295 ctxt.seid = vsi->seid; in i40e_vsi_add_pvid()
3296 ctxt.info = vsi->info; in i40e_vsi_add_pvid()
3297 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_vsi_add_pvid()
3299 dev_info(&vsi->back->pdev->dev, in i40e_vsi_add_pvid()
3302 i40e_aq_str(&vsi->back->hw, in i40e_vsi_add_pvid()
3303 vsi->back->hw.aq.asq_last_status)); in i40e_vsi_add_pvid()
3304 return -ENOENT; in i40e_vsi_add_pvid()
3311 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
3318 vsi->info.pvid = 0; in i40e_vsi_remove_pvid()
3324 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
3337 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3338 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]); in i40e_vsi_setup_tx_resources()
3343 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_tx_resources()
3344 err = i40e_setup_tx_descriptors(vsi->xdp_rings[i]); in i40e_vsi_setup_tx_resources()
3350 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3359 if (vsi->tx_rings) { in i40e_vsi_free_tx_resources()
3360 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3361 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in i40e_vsi_free_tx_resources()
3362 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_vsi_free_tx_resources()
3365 if (vsi->xdp_rings) { in i40e_vsi_free_tx_resources()
3366 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_tx_resources()
3367 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) in i40e_vsi_free_tx_resources()
3368 i40e_free_tx_resources(vsi->xdp_rings[i]); in i40e_vsi_free_tx_resources()
3373 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3386 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_setup_rx_resources()
3387 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]); in i40e_vsi_setup_rx_resources()
3392 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3401 if (!vsi->rx_rings) in i40e_vsi_free_rx_resources()
3404 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_vsi_free_rx_resources()
3405 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc) in i40e_vsi_free_rx_resources()
3406 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_vsi_free_rx_resources()
3410 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3414 * based on the TCs enabled for the VSI that ring belongs to.
3420 if (!ring->q_vector || !ring->netdev || ring->ch) in i40e_config_xps_tx_ring()
3424 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE, ring->state)) in i40e_config_xps_tx_ring()
3427 cpu = cpumask_local_spread(ring->q_vector->v_idx, -1); in i40e_config_xps_tx_ring()
3428 netif_set_xps_queue(ring->netdev, get_cpu_mask(cpu), in i40e_config_xps_tx_ring()
3429 ring->queue_index); in i40e_config_xps_tx_ring()
3433 * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
3440 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi); in i40e_xsk_pool()
3441 int qid = ring->queue_index; in i40e_xsk_pool()
3444 qid -= ring->vsi->alloc_queue_pairs; in i40e_xsk_pool()
3446 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps)) in i40e_xsk_pool()
3449 return xsk_get_pool_from_qid(ring->vsi->netdev, qid); in i40e_xsk_pool()
3453 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3460 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_tx_ring()
3461 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_tx_ring()
3462 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_tx_ring()
3468 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_tx_ring()
3471 if (test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) { in i40e_configure_tx_ring()
3472 ring->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; in i40e_configure_tx_ring()
3473 ring->atr_count = 0; in i40e_configure_tx_ring()
3475 ring->atr_sample_rate = 0; in i40e_configure_tx_ring()
3485 tx_ctx.base = (ring->dma / 128); in i40e_configure_tx_ring()
3486 tx_ctx.qlen = ring->count; in i40e_configure_tx_ring()
3487 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags) || in i40e_configure_tx_ring()
3488 test_bit(I40E_FLAG_FD_ATR_ENA, vsi->back->flags)) in i40e_configure_tx_ring()
3490 if (test_bit(I40E_FLAG_PTP_ENA, vsi->back->flags)) in i40e_configure_tx_ring()
3493 if (vsi->type != I40E_VSI_FDIR) in i40e_configure_tx_ring()
3495 tx_ctx.head_wb_addr = ring->dma + in i40e_configure_tx_ring()
3496 (ring->count * sizeof(struct i40e_tx_desc)); in i40e_configure_tx_ring()
3509 if (ring->ch) in i40e_configure_tx_ring()
3511 le16_to_cpu(ring->ch->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3514 tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]); in i40e_configure_tx_ring()
3521 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3523 ring->queue_index, pf_q, err); in i40e_configure_tx_ring()
3524 return -ENOMEM; in i40e_configure_tx_ring()
3530 dev_info(&vsi->back->pdev->dev, in i40e_configure_tx_ring()
3532 ring->queue_index, pf_q, err); in i40e_configure_tx_ring()
3533 return -ENOMEM; in i40e_configure_tx_ring()
3537 if (ring->ch) { in i40e_configure_tx_ring()
3538 if (ring->ch->type == I40E_VSI_VMDQ2) in i40e_configure_tx_ring()
3541 return -EINVAL; in i40e_configure_tx_ring()
3544 ring->ch->vsi_number); in i40e_configure_tx_ring()
3546 if (vsi->type == I40E_VSI_VMDQ2) { in i40e_configure_tx_ring()
3549 vsi->id); in i40e_configure_tx_ring()
3555 qtx_ctl |= FIELD_PREP(I40E_QTX_CTL_PF_INDX_MASK, hw->pf_id); in i40e_configure_tx_ring()
3560 ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q); in i40e_configure_tx_ring()
3566 * i40e_rx_offset - Return expected offset into page to access data
3577 * i40e_configure_rx_ring - Configure a receive ring context
3584 struct i40e_vsi *vsi = ring->vsi; in i40e_configure_rx_ring()
3585 u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len; in i40e_configure_rx_ring()
3586 u16 pf_q = vsi->base_queue + ring->queue_index; in i40e_configure_rx_ring()
3587 struct i40e_hw *hw = &vsi->back->hw; in i40e_configure_rx_ring()
3592 bitmap_zero(ring->state, __I40E_RING_STATE_NBITS); in i40e_configure_rx_ring()
3597 ring->rx_buf_len = vsi->rx_buf_len; in i40e_configure_rx_ring()
3599 /* XDP RX-queue info only needed for RX rings exposed to XDP */ in i40e_configure_rx_ring()
3600 if (ring->vsi->type != I40E_VSI_MAIN) in i40e_configure_rx_ring()
3603 if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) { in i40e_configure_rx_ring()
3604 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, in i40e_configure_rx_ring()
3605 ring->queue_index, in i40e_configure_rx_ring()
3606 ring->q_vector->napi.napi_id, in i40e_configure_rx_ring()
3607 ring->rx_buf_len); in i40e_configure_rx_ring()
3612 ring->xsk_pool = i40e_xsk_pool(ring); in i40e_configure_rx_ring()
3613 if (ring->xsk_pool) { in i40e_configure_rx_ring()
3614 xdp_rxq_info_unreg(&ring->xdp_rxq); in i40e_configure_rx_ring()
3615 ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); in i40e_configure_rx_ring()
3616 err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, in i40e_configure_rx_ring()
3617 ring->queue_index, in i40e_configure_rx_ring()
3618 ring->q_vector->napi.napi_id, in i40e_configure_rx_ring()
3619 ring->rx_buf_len); in i40e_configure_rx_ring()
3622 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in i40e_configure_rx_ring()
3627 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3629 ring->queue_index); in i40e_configure_rx_ring()
3632 err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in i40e_configure_rx_ring()
3640 xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq); in i40e_configure_rx_ring()
3642 rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len, in i40e_configure_rx_ring()
3645 rx_ctx.base = (ring->dma / 128); in i40e_configure_rx_ring()
3646 rx_ctx.qlen = ring->count; in i40e_configure_rx_ring()
3656 rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len); in i40e_configure_rx_ring()
3657 if (hw->revision_id == 0) in i40e_configure_rx_ring()
3671 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3673 ring->queue_index, pf_q, err); in i40e_configure_rx_ring()
3674 return -ENOMEM; in i40e_configure_rx_ring()
3680 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3682 ring->queue_index, pf_q, err); in i40e_configure_rx_ring()
3683 return -ENOMEM; in i40e_configure_rx_ring()
3687 if (!vsi->netdev || test_bit(I40E_FLAG_LEGACY_RX_ENA, vsi->back->flags)) { in i40e_configure_rx_ring()
3689 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3691 return -EOPNOTSUPP; in i40e_configure_rx_ring()
3698 ring->rx_offset = i40e_rx_offset(ring); in i40e_configure_rx_ring()
3701 ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q); in i40e_configure_rx_ring()
3702 writel(0, ring->tail); in i40e_configure_rx_ring()
3704 if (ring->xsk_pool) { in i40e_configure_rx_ring()
3705 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in i40e_configure_rx_ring()
3714 dev_info(&vsi->back->pdev->dev, in i40e_configure_rx_ring()
3716 ring->xsk_pool ? "AF_XDP ZC enabled " : "", in i40e_configure_rx_ring()
3717 ring->queue_index, pf_q); in i40e_configure_rx_ring()
3724 * i40e_vsi_configure_tx - Configure the VSI for Tx
3734 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3735 err = i40e_configure_tx_ring(vsi->tx_rings[i]); in i40e_vsi_configure_tx()
3740 for (i = 0; (i < vsi->num_queue_pairs) && !err; i++) in i40e_vsi_configure_tx()
3741 err = i40e_configure_tx_ring(vsi->xdp_rings[i]); in i40e_vsi_configure_tx()
3747 * i40e_vsi_configure_rx - Configure the VSI for Rx
3757 vsi->max_frame = i40e_max_vsi_frame_size(vsi, vsi->xdp_prog); in i40e_vsi_configure_rx()
3758 vsi->rx_buf_len = i40e_calculate_vsi_rx_buf_len(vsi); in i40e_vsi_configure_rx()
3761 if (vsi->netdev && !I40E_2K_TOO_SMALL_WITH_PADDING && in i40e_vsi_configure_rx()
3762 vsi->netdev->mtu <= ETH_DATA_LEN) { in i40e_vsi_configure_rx()
3763 vsi->rx_buf_len = I40E_RXBUFFER_1536 - NET_IP_ALIGN; in i40e_vsi_configure_rx()
3764 vsi->max_frame = vsi->rx_buf_len; in i40e_vsi_configure_rx()
3769 for (i = 0; i < vsi->num_queue_pairs && !err; i++) in i40e_vsi_configure_rx()
3770 err = i40e_configure_rx_ring(vsi->rx_rings[i]); in i40e_vsi_configure_rx()
3776 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3785 if (!test_bit(I40E_FLAG_DCB_ENA, vsi->back->flags)) { in i40e_vsi_config_dcb_rings()
3787 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_config_dcb_rings()
3788 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3789 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3790 rx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3791 tx_ring->dcb_tc = 0; in i40e_vsi_config_dcb_rings()
3797 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n))) in i40e_vsi_config_dcb_rings()
3800 qoffset = vsi->tc_config.tc_info[n].qoffset; in i40e_vsi_config_dcb_rings()
3801 qcount = vsi->tc_config.tc_info[n].qcount; in i40e_vsi_config_dcb_rings()
3803 rx_ring = vsi->rx_rings[i]; in i40e_vsi_config_dcb_rings()
3804 tx_ring = vsi->tx_rings[i]; in i40e_vsi_config_dcb_rings()
3805 rx_ring->dcb_tc = n; in i40e_vsi_config_dcb_rings()
3806 tx_ring->dcb_tc = n; in i40e_vsi_config_dcb_rings()
3812 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3817 if (vsi->netdev) in i40e_set_vsi_rx_mode()
3818 i40e_set_rx_mode(vsi->netdev); in i40e_set_vsi_rx_mode()
3822 * i40e_reset_fdir_filter_cnt - Reset flow director filter counters
3829 pf->fd_tcp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3830 pf->fd_udp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3831 pf->fd_sctp4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3832 pf->fd_ip4_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3833 pf->fd_tcp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3834 pf->fd_udp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3835 pf->fd_sctp6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3836 pf->fd_ip6_filter_cnt = 0; in i40e_reset_fdir_filter_cnt()
3840 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3849 struct i40e_pf *pf = vsi->back; in i40e_fdir_filter_restore()
3852 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_fdir_filter_restore()
3859 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_restore()
3865 * i40e_vsi_configure - Set up the VSI for action
3883 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3889 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_msix()
3890 struct i40e_hw *hw = &pf->hw; in i40e_vsi_configure_msix()
3897 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts) in i40e_vsi_configure_msix()
3899 qp = vsi->base_queue; in i40e_vsi_configure_msix()
3900 vector = vsi->base_vector; in i40e_vsi_configure_msix()
3901 for (i = 0; i < vsi->num_q_vectors; i++, vector++) { in i40e_vsi_configure_msix()
3902 struct i40e_q_vector *q_vector = vsi->q_vectors[i]; in i40e_vsi_configure_msix()
3904 q_vector->rx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3905 q_vector->rx.target_itr = in i40e_vsi_configure_msix()
3906 ITR_TO_REG(vsi->rx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3907 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), in i40e_vsi_configure_msix()
3908 q_vector->rx.target_itr >> 1); in i40e_vsi_configure_msix()
3909 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_vsi_configure_msix()
3911 q_vector->tx.next_update = jiffies + 1; in i40e_vsi_configure_msix()
3912 q_vector->tx.target_itr = in i40e_vsi_configure_msix()
3913 ITR_TO_REG(vsi->tx_rings[i]->itr_setting); in i40e_vsi_configure_msix()
3914 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), in i40e_vsi_configure_msix()
3915 q_vector->tx.target_itr >> 1); in i40e_vsi_configure_msix()
3916 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_vsi_configure_msix()
3918 wr32(hw, I40E_PFINT_RATEN(vector - 1), in i40e_vsi_configure_msix()
3919 i40e_intrl_usec_to_reg(vsi->int_rate_limit)); in i40e_vsi_configure_msix()
3922 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp); in i40e_vsi_configure_msix()
3923 for (q = 0; q < q_vector->num_ringpairs; q++) { in i40e_vsi_configure_msix()
3924 u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp; in i40e_vsi_configure_msix()
3956 if (q == (q_vector->num_ringpairs - 1)) in i40e_vsi_configure_msix()
3969 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3974 struct i40e_hw *hw = &pf->hw; in i40e_enable_misc_int_causes()
3990 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) in i40e_enable_misc_int_causes()
3993 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_enable_misc_int_causes()
4007 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
4012 u32 nextqp = i40e_enabled_xdp_vsi(vsi) ? vsi->alloc_queue_pairs : 0; in i40e_configure_msi_and_legacy()
4013 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_configure_msi_and_legacy()
4014 struct i40e_pf *pf = vsi->back; in i40e_configure_msi_and_legacy()
4015 struct i40e_hw *hw = &pf->hw; in i40e_configure_msi_and_legacy()
4018 q_vector->rx.next_update = jiffies + 1; in i40e_configure_msi_and_legacy()
4019 q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
4020 wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr >> 1); in i40e_configure_msi_and_legacy()
4021 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_configure_msi_and_legacy()
4022 q_vector->tx.next_update = jiffies + 1; in i40e_configure_msi_and_legacy()
4023 q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting); in i40e_configure_msi_and_legacy()
4024 wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr >> 1); in i40e_configure_msi_and_legacy()
4025 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_configure_msi_and_legacy()
4050 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
4055 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_disable_icr0()
4063 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
4068 struct i40e_hw *hw = &pf->hw; in i40e_irq_dynamic_enable_icr0()
4080 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
4088 if (!q_vector->tx.ring && !q_vector->rx.ring) in i40e_msix_clean_rings()
4091 napi_schedule_irqoff(&q_vector->napi); in i40e_msix_clean_rings()
4097 * i40e_irq_affinity_notify - Callback for affinity changes
4110 cpumask_copy(&q_vector->affinity_mask, mask); in i40e_irq_affinity_notify()
4114 * i40e_irq_affinity_release - Callback for affinity notifier release
4124 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
4128 * Allocates MSI-X vectors and requests interrupts from the kernel.
4132 int q_vectors = vsi->num_q_vectors; in i40e_vsi_request_irq_msix()
4133 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq_msix()
4134 int base = vsi->base_vector; in i40e_vsi_request_irq_msix()
4142 struct i40e_q_vector *q_vector = vsi->q_vectors[vector]; in i40e_vsi_request_irq_msix()
4144 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4146 if (q_vector->tx.ring && q_vector->rx.ring) { in i40e_vsi_request_irq_msix()
4147 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
4148 "%s-%s-%d", basename, "TxRx", rx_int_idx++); in i40e_vsi_request_irq_msix()
4150 } else if (q_vector->rx.ring) { in i40e_vsi_request_irq_msix()
4151 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
4152 "%s-%s-%d", basename, "rx", rx_int_idx++); in i40e_vsi_request_irq_msix()
4153 } else if (q_vector->tx.ring) { in i40e_vsi_request_irq_msix()
4154 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in i40e_vsi_request_irq_msix()
4155 "%s-%s-%d", basename, "tx", tx_int_idx++); in i40e_vsi_request_irq_msix()
4161 vsi->irq_handler, in i40e_vsi_request_irq_msix()
4163 q_vector->name, in i40e_vsi_request_irq_msix()
4166 dev_info(&pf->pdev->dev, in i40e_vsi_request_irq_msix()
4172 q_vector->irq_num = irq_num; in i40e_vsi_request_irq_msix()
4173 q_vector->affinity_notify.notify = i40e_irq_affinity_notify; in i40e_vsi_request_irq_msix()
4174 q_vector->affinity_notify.release = i40e_irq_affinity_release; in i40e_vsi_request_irq_msix()
4175 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify); in i40e_vsi_request_irq_msix()
4182 cpu = cpumask_local_spread(q_vector->v_idx, -1); in i40e_vsi_request_irq_msix()
4186 vsi->irqs_ready = true; in i40e_vsi_request_irq_msix()
4191 vector--; in i40e_vsi_request_irq_msix()
4192 irq_num = pf->msix_entries[base + vector].vector; in i40e_vsi_request_irq_msix()
4195 free_irq(irq_num, &vsi->q_vectors[vector]); in i40e_vsi_request_irq_msix()
4201 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
4202 * @vsi: the VSI being un-configured
4206 struct i40e_pf *pf = vsi->back; in i40e_vsi_disable_irq()
4207 struct i40e_hw *hw = &pf->hw; in i40e_vsi_disable_irq()
4208 int base = vsi->base_vector; in i40e_vsi_disable_irq()
4212 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_vsi_disable_irq()
4215 val = rd32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4217 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4219 val = rd32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx)); in i40e_vsi_disable_irq()
4221 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), val); in i40e_vsi_disable_irq()
4225 wr32(hw, I40E_QINT_TQCTL(vsi->xdp_rings[i]->reg_idx), 0); in i40e_vsi_disable_irq()
4229 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_vsi_disable_irq()
4230 for (i = vsi->base_vector; in i40e_vsi_disable_irq()
4231 i < (vsi->num_q_vectors + vsi->base_vector); i++) in i40e_vsi_disable_irq()
4232 wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0); in i40e_vsi_disable_irq()
4235 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_disable_irq()
4236 synchronize_irq(pf->msix_entries[i + base].vector); in i40e_vsi_disable_irq()
4238 /* Legacy and MSI mode - this stops all interrupt handling */ in i40e_vsi_disable_irq()
4242 synchronize_irq(pf->pdev->irq); in i40e_vsi_disable_irq()
4247 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
4252 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_irq()
4255 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_vsi_enable_irq()
4256 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_vsi_enable_irq()
4262 i40e_flush(&pf->hw); in i40e_vsi_enable_irq()
4267 * i40e_free_misc_vector - Free the vector that handles non-queue events
4273 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); in i40e_free_misc_vector()
4274 i40e_flush(&pf->hw); in i40e_free_misc_vector()
4276 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && pf->msix_entries) { in i40e_free_misc_vector()
4277 free_irq(pf->msix_entries[0].vector, pf); in i40e_free_misc_vector()
4278 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_free_misc_vector()
4283 * i40e_intr - MSI/Legacy and non-queue interrupt handler
4288 * with both queue and non-queue interrupts. This is also used in
4289 * MSIX mode to handle the non-queue interrupts.
4294 struct i40e_hw *hw = &pf->hw; in i40e_intr()
4309 pf->sw_int_count++; in i40e_intr()
4311 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && in i40e_intr()
4314 dev_dbg(&pf->pdev->dev, "cleared PE_CRITERR\n"); in i40e_intr()
4315 set_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_intr()
4320 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_intr()
4321 struct i40e_q_vector *q_vector = vsi->q_vectors[0]; in i40e_intr()
4329 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_intr()
4330 napi_schedule_irqoff(&q_vector->napi); in i40e_intr()
4335 set_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_intr()
4336 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); in i40e_intr()
4341 set_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_intr()
4346 if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) { in i40e_intr()
4353 set_bit(__I40E_VFLR_EVENT_PENDING, pf->state); in i40e_intr()
4358 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_intr()
4359 set_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4364 pf->corer_count++; in i40e_intr()
4366 pf->globr_count++; in i40e_intr()
4368 pf->empr_count++; in i40e_intr()
4369 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state); in i40e_intr()
4375 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); in i40e_intr()
4376 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", in i40e_intr()
4385 schedule_work(&pf->ptp_extts0_work); in i40e_intr()
4399 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", in i40e_intr()
4404 dev_info(&pf->pdev->dev, "device will be reset\n"); in i40e_intr()
4405 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_intr()
4413 /* re-enable interrupt causes */ in i40e_intr()
4415 if (!test_bit(__I40E_DOWN, pf->state) || in i40e_intr()
4416 test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_intr()
4425 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
4433 struct i40e_vsi *vsi = tx_ring->vsi; in i40e_clean_fdir_tx_irq()
4434 u16 i = tx_ring->next_to_clean; in i40e_clean_fdir_tx_irq()
4438 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_fdir_tx_irq()
4440 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4443 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; in i40e_clean_fdir_tx_irq()
4453 if (!(eop_desc->cmd_type_offset_bsz & in i40e_clean_fdir_tx_irq()
4458 tx_buf->next_to_watch = NULL; in i40e_clean_fdir_tx_irq()
4460 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4461 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4467 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4468 tx_buf = tx_ring->tx_bi; in i40e_clean_fdir_tx_irq()
4472 dma_unmap_single(tx_ring->dev, in i40e_clean_fdir_tx_irq()
4476 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB) in i40e_clean_fdir_tx_irq()
4477 kfree(tx_buf->raw_buf); in i40e_clean_fdir_tx_irq()
4479 tx_buf->raw_buf = NULL; in i40e_clean_fdir_tx_irq()
4480 tx_buf->tx_flags = 0; in i40e_clean_fdir_tx_irq()
4481 tx_buf->next_to_watch = NULL; in i40e_clean_fdir_tx_irq()
4483 tx_desc->buffer_addr = 0; in i40e_clean_fdir_tx_irq()
4484 tx_desc->cmd_type_offset_bsz = 0; in i40e_clean_fdir_tx_irq()
4491 i -= tx_ring->count; in i40e_clean_fdir_tx_irq()
4492 tx_buf = tx_ring->tx_bi; in i40e_clean_fdir_tx_irq()
4497 budget--; in i40e_clean_fdir_tx_irq()
4500 i += tx_ring->count; in i40e_clean_fdir_tx_irq()
4501 tx_ring->next_to_clean = i; in i40e_clean_fdir_tx_irq()
4503 if (test_bit(I40E_FLAG_MSIX_ENA, vsi->back->flags)) in i40e_clean_fdir_tx_irq()
4504 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx); in i40e_clean_fdir_tx_irq()
4510 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4519 if (!q_vector->tx.ring) in i40e_fdir_clean_ring()
4522 vsi = q_vector->tx.ring->vsi; in i40e_fdir_clean_ring()
4523 i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit); in i40e_fdir_clean_ring()
4529 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4536 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_map_vector_to_qp()
4537 struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx]; in i40e_map_vector_to_qp()
4538 struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx]; in i40e_map_vector_to_qp()
4540 tx_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4541 tx_ring->next = q_vector->tx.ring; in i40e_map_vector_to_qp()
4542 q_vector->tx.ring = tx_ring; in i40e_map_vector_to_qp()
4543 q_vector->tx.count++; in i40e_map_vector_to_qp()
4547 struct i40e_ring *xdp_ring = vsi->xdp_rings[qp_idx]; in i40e_map_vector_to_qp()
4549 xdp_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4550 xdp_ring->next = q_vector->tx.ring; in i40e_map_vector_to_qp()
4551 q_vector->tx.ring = xdp_ring; in i40e_map_vector_to_qp()
4552 q_vector->tx.count++; in i40e_map_vector_to_qp()
4555 rx_ring->q_vector = q_vector; in i40e_map_vector_to_qp()
4556 rx_ring->next = q_vector->rx.ring; in i40e_map_vector_to_qp()
4557 q_vector->rx.ring = rx_ring; in i40e_map_vector_to_qp()
4558 q_vector->rx.count++; in i40e_map_vector_to_qp()
4562 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4565 * This function maps descriptor rings to the queue-specific vectors
4566 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4572 int qp_remaining = vsi->num_queue_pairs; in i40e_vsi_map_rings_to_vectors()
4573 int q_vectors = vsi->num_q_vectors; in i40e_vsi_map_rings_to_vectors()
4578 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to in i40e_vsi_map_rings_to_vectors()
4586 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start]; in i40e_vsi_map_rings_to_vectors()
4588 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start); in i40e_vsi_map_rings_to_vectors()
4590 q_vector->num_ringpairs = num_ringpairs; in i40e_vsi_map_rings_to_vectors()
4591 q_vector->reg_idx = q_vector->v_idx + vsi->base_vector - 1; in i40e_vsi_map_rings_to_vectors()
4593 q_vector->rx.count = 0; in i40e_vsi_map_rings_to_vectors()
4594 q_vector->tx.count = 0; in i40e_vsi_map_rings_to_vectors()
4595 q_vector->rx.ring = NULL; in i40e_vsi_map_rings_to_vectors()
4596 q_vector->tx.ring = NULL; in i40e_vsi_map_rings_to_vectors()
4598 while (num_ringpairs--) { in i40e_vsi_map_rings_to_vectors()
4601 qp_remaining--; in i40e_vsi_map_rings_to_vectors()
4607 * i40e_vsi_request_irq - Request IRQ from the OS
4613 struct i40e_pf *pf = vsi->back; in i40e_vsi_request_irq()
4616 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_request_irq()
4618 else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) in i40e_vsi_request_irq()
4619 err = request_irq(pf->pdev->irq, i40e_intr, 0, in i40e_vsi_request_irq()
4620 pf->int_name, pf); in i40e_vsi_request_irq()
4622 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, in i40e_vsi_request_irq()
4623 pf->int_name, pf); in i40e_vsi_request_irq()
4626 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); in i40e_vsi_request_irq()
4633 * i40e_netpoll - A Polling 'interrupt' handler
4636 * This is used by netconsole to send skbs without having to re-enable
4642 struct i40e_vsi *vsi = np->vsi; in i40e_netpoll()
4643 struct i40e_pf *pf = vsi->back; in i40e_netpoll()
4647 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_netpoll()
4650 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_netpoll()
4651 for (i = 0; i < vsi->num_q_vectors; i++) in i40e_netpoll()
4652 i40e_msix_clean_rings(0, vsi->q_vectors[i]); in i40e_netpoll()
4654 i40e_intr(pf->pdev->irq, netdev); in i40e_netpoll()
4662 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4667 * This routine will wait for the given Tx queue of the PF to reach the
4669 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4678 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); in i40e_pf_txq_wait()
4685 return -ETIMEDOUT; in i40e_pf_txq_wait()
4691 * i40e_control_tx_q - Start or stop a particular Tx queue
4702 struct i40e_hw *hw = &pf->hw; in i40e_control_tx_q()
4707 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); in i40e_control_tx_q()
4735 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4749 /* wait for the change to finish */ in i40e_control_wait_tx_q()
4752 dev_info(&pf->pdev->dev, in i40e_control_wait_tx_q()
4762 * i40e_vsi_enable_tx - Start a VSI's rings
4767 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_tx()
4770 pf_q = vsi->base_queue; in i40e_vsi_enable_tx()
4771 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_tx()
4772 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4781 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_vsi_enable_tx()
4782 pf_q + vsi->alloc_queue_pairs, in i40e_vsi_enable_tx()
4791 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4796 * This routine will wait for the given Rx queue of the PF to reach the
4798 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4807 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); in i40e_pf_rxq_wait()
4814 return -ETIMEDOUT; in i40e_pf_rxq_wait()
4820 * i40e_control_rx_q - Start or stop a particular Rx queue
4831 struct i40e_hw *hw = &pf->hw; in i40e_control_rx_q()
4872 /* wait for the change to finish */ in i40e_control_wait_rx_q()
4881 * i40e_vsi_enable_rx - Start a VSI's rings
4886 struct i40e_pf *pf = vsi->back; in i40e_vsi_enable_rx()
4889 pf_q = vsi->base_queue; in i40e_vsi_enable_rx()
4890 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_enable_rx()
4893 dev_info(&pf->pdev->dev, in i40e_vsi_enable_rx()
4895 vsi->seid, pf_q); in i40e_vsi_enable_rx()
4904 * i40e_vsi_start_rings - Start a VSI's rings
4923 * i40e_vsi_stop_rings - Stop a VSI's rings
4928 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings()
4931 /* When port TX is suspended, don't wait */ in i40e_vsi_stop_rings()
4932 if (test_bit(__I40E_PORT_SUSPENDED, vsi->back->state)) in i40e_vsi_stop_rings()
4935 tx_q_end = vsi->base_queue + in i40e_vsi_stop_rings()
4936 vsi->alloc_queue_pairs * (i40e_enabled_xdp_vsi(vsi) ? 2 : 1); in i40e_vsi_stop_rings()
4937 for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) in i40e_vsi_stop_rings()
4938 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, false); in i40e_vsi_stop_rings()
4940 rx_q_end = vsi->base_queue + vsi->num_queue_pairs; in i40e_vsi_stop_rings()
4941 for (pf_q = vsi->base_queue; pf_q < rx_q_end; pf_q++) in i40e_vsi_stop_rings()
4945 for (pf_q = vsi->base_queue; pf_q < tx_q_end; pf_q++) in i40e_vsi_stop_rings()
4946 wr32(&pf->hw, I40E_QTX_ENA(pf_q), 0); in i40e_vsi_stop_rings()
4952 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4964 struct i40e_pf *pf = vsi->back; in i40e_vsi_stop_rings_no_wait()
4967 pf_q = vsi->base_queue; in i40e_vsi_stop_rings_no_wait()
4968 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_stop_rings_no_wait()
4975 * i40e_vsi_free_irq - Free the irq association with the OS
4980 struct i40e_pf *pf = vsi->back; in i40e_vsi_free_irq()
4981 struct i40e_hw *hw = &pf->hw; in i40e_vsi_free_irq()
4982 int base = vsi->base_vector; in i40e_vsi_free_irq()
4986 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_vsi_free_irq()
4987 if (!vsi->q_vectors) in i40e_vsi_free_irq()
4990 if (!vsi->irqs_ready) in i40e_vsi_free_irq()
4993 vsi->irqs_ready = false; in i40e_vsi_free_irq()
4994 for (i = 0; i < vsi->num_q_vectors; i++) { in i40e_vsi_free_irq()
4999 irq_num = pf->msix_entries[vector].vector; in i40e_vsi_free_irq()
5002 if (!vsi->q_vectors[i] || in i40e_vsi_free_irq()
5003 !vsi->q_vectors[i]->num_ringpairs) in i40e_vsi_free_irq()
5010 free_irq(irq_num, vsi->q_vectors[i]); in i40e_vsi_free_irq()
5019 val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1)); in i40e_vsi_free_irq()
5024 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val); in i40e_vsi_free_irq()
5059 free_irq(pf->pdev->irq, pf); in i40e_vsi_free_irq()
5093 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
5103 struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; in i40e_free_q_vector()
5110 i40e_for_each_ring(ring, q_vector->tx) in i40e_free_q_vector()
5111 ring->q_vector = NULL; in i40e_free_q_vector()
5113 i40e_for_each_ring(ring, q_vector->rx) in i40e_free_q_vector()
5114 ring->q_vector = NULL; in i40e_free_q_vector()
5117 if (vsi->netdev) in i40e_free_q_vector()
5118 netif_napi_del(&q_vector->napi); in i40e_free_q_vector()
5120 vsi->q_vectors[v_idx] = NULL; in i40e_free_q_vector()
5126 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
5127 * @vsi: the VSI being un-configured
5136 for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) in i40e_vsi_free_q_vectors()
5141 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
5147 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_reset_interrupt_capability()
5148 pci_disable_msix(pf->pdev); in i40e_reset_interrupt_capability()
5149 kfree(pf->msix_entries); in i40e_reset_interrupt_capability()
5150 pf->msix_entries = NULL; in i40e_reset_interrupt_capability()
5151 kfree(pf->irq_pile); in i40e_reset_interrupt_capability()
5152 pf->irq_pile = NULL; in i40e_reset_interrupt_capability()
5153 } else if (test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { in i40e_reset_interrupt_capability()
5154 pci_disable_msi(pf->pdev); in i40e_reset_interrupt_capability()
5156 clear_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_reset_interrupt_capability()
5157 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_reset_interrupt_capability()
5161 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
5165 * to pre-load conditions
5171 if (test_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) in i40e_clear_interrupt_scheme()
5174 i40e_put_lump(pf->irq_pile, pf->iwarp_base_vector, in i40e_clear_interrupt_scheme()
5177 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); in i40e_clear_interrupt_scheme()
5178 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_clear_interrupt_scheme()
5179 if (pf->vsi[i]) in i40e_clear_interrupt_scheme()
5180 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_clear_interrupt_scheme()
5185 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
5192 if (!vsi->netdev) in i40e_napi_enable_all()
5195 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_enable_all()
5196 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_enable_all()
5198 if (q_vector->rx.ring || q_vector->tx.ring) in i40e_napi_enable_all()
5199 napi_enable(&q_vector->napi); in i40e_napi_enable_all()
5204 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
5211 if (!vsi->netdev) in i40e_napi_disable_all()
5214 for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { in i40e_napi_disable_all()
5215 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx]; in i40e_napi_disable_all()
5217 if (q_vector->rx.ring || q_vector->tx.ring) in i40e_napi_disable_all()
5218 napi_disable(&q_vector->napi); in i40e_napi_disable_all()
5223 * i40e_vsi_close - Shut down a VSI
5228 struct i40e_pf *pf = vsi->back; in i40e_vsi_close()
5229 if (!test_and_set_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_close()
5234 vsi->current_netdev_flags = 0; in i40e_vsi_close()
5235 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_vsi_close()
5236 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_vsi_close()
5237 set_bit(__I40E_CLIENT_RESET, pf->state); in i40e_vsi_close()
5241 * i40e_quiesce_vsi - Pause a given VSI
5246 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_quiesce_vsi()
5249 set_bit(__I40E_VSI_NEEDS_RESTART, vsi->state); in i40e_quiesce_vsi()
5250 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_quiesce_vsi()
5251 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); in i40e_quiesce_vsi()
5257 * i40e_unquiesce_vsi - Resume a given VSI
5262 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART, vsi->state)) in i40e_unquiesce_vsi()
5265 if (vsi->netdev && netif_running(vsi->netdev)) in i40e_unquiesce_vsi()
5266 vsi->netdev->netdev_ops->ndo_open(vsi->netdev); in i40e_unquiesce_vsi()
5272 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
5279 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_quiesce_all_vsi()
5280 if (pf->vsi[v]) in i40e_pf_quiesce_all_vsi()
5281 i40e_quiesce_vsi(pf->vsi[v]); in i40e_pf_quiesce_all_vsi()
5286 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
5293 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_unquiesce_all_vsi()
5294 if (pf->vsi[v]) in i40e_pf_unquiesce_all_vsi()
5295 i40e_unquiesce_vsi(pf->vsi[v]); in i40e_pf_unquiesce_all_vsi()
5300 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
5303 * Wait until all queues on a given VSI have been disabled.
5307 struct i40e_pf *pf = vsi->back; in i40e_vsi_wait_queues_disabled()
5310 pf_q = vsi->base_queue; in i40e_vsi_wait_queues_disabled()
5311 for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { in i40e_vsi_wait_queues_disabled()
5312 /* Check and wait for the Tx queue */ in i40e_vsi_wait_queues_disabled()
5315 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5317 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5324 /* Check and wait for the XDP Tx queue */ in i40e_vsi_wait_queues_disabled()
5325 ret = i40e_pf_txq_wait(pf, pf_q + vsi->alloc_queue_pairs, in i40e_vsi_wait_queues_disabled()
5328 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5330 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5334 /* Check and wait for the Rx queue */ in i40e_vsi_wait_queues_disabled()
5337 dev_info(&pf->pdev->dev, in i40e_vsi_wait_queues_disabled()
5339 vsi->seid, pf_q); in i40e_vsi_wait_queues_disabled()
5349 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
5359 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_pf_wait_queues_disabled()
5360 if (pf->vsi[v]) { in i40e_pf_wait_queues_disabled()
5361 ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); in i40e_pf_wait_queues_disabled()
5373 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
5382 struct i40e_hw *hw = &pf->hw; in i40e_get_iscsi_tc_map()
5386 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_get_iscsi_tc_map()
5388 for (i = 0; i < dcbcfg->numapps; i++) { in i40e_get_iscsi_tc_map()
5389 app = dcbcfg->app[i]; in i40e_get_iscsi_tc_map()
5392 tc = dcbcfg->etscfg.prioritytable[app.priority]; in i40e_get_iscsi_tc_map()
5402 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
5405 * Return the number of TCs from given DCBx configuration
5415 * and create a bitmask of enabled TCs in i40e_dcb_get_num_tc()
5418 num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]); in i40e_dcb_get_num_tc()
5421 * contiguous TCs starting with TC0 in i40e_dcb_get_num_tc()
5428 pr_err("Non-contiguous TC - Disabling DCB\n"); in i40e_dcb_get_num_tc()
5444 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5463 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5471 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_mqprio_get_enabled_tc()
5472 u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; in i40e_mqprio_get_enabled_tc()
5481 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5488 struct i40e_hw *hw = &pf->hw; in i40e_pf_get_num_tc()
5491 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_pf_get_num_tc()
5494 return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; in i40e_pf_get_num_tc()
5497 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) in i40e_pf_get_num_tc()
5500 /* SFP mode will be enabled for all TCs on port */ in i40e_pf_get_num_tc()
5501 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) in i40e_pf_get_num_tc()
5504 /* MFP mode return count of enabled TCs for this PF */ in i40e_pf_get_num_tc()
5505 if (pf->hw.func_caps.iscsi) in i40e_pf_get_num_tc()
5518 * i40e_pf_get_tc_map - Get bitmap for enabled traffic classes
5531 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) in i40e_pf_get_tc_map()
5534 /* SFP mode we want PF to be enabled for all TCs */ in i40e_pf_get_tc_map()
5535 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) in i40e_pf_get_tc_map()
5536 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); in i40e_pf_get_tc_map()
5539 if (pf->hw.func_caps.iscsi) in i40e_pf_get_tc_map()
5546 * i40e_vsi_get_bw_info - Query VSI BW Information
5555 struct i40e_pf *pf = vsi->back; in i40e_vsi_get_bw_info()
5556 struct i40e_hw *hw = &pf->hw; in i40e_vsi_get_bw_info()
5562 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL); in i40e_vsi_get_bw_info()
5564 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5567 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5568 return -EINVAL; in i40e_vsi_get_bw_info()
5572 ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config, in i40e_vsi_get_bw_info()
5575 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5578 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_vsi_get_bw_info()
5579 return -EINVAL; in i40e_vsi_get_bw_info()
5583 dev_info(&pf->pdev->dev, in i40e_vsi_get_bw_info()
5584 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n", in i40e_vsi_get_bw_info()
5590 vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit); in i40e_vsi_get_bw_info()
5591 vsi->bw_max_quanta = bw_config.max_bw; in i40e_vsi_get_bw_info()
5595 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i]; in i40e_vsi_get_bw_info()
5596 vsi->bw_ets_limit_credits[i] = in i40e_vsi_get_bw_info()
5599 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7); in i40e_vsi_get_bw_info()
5606 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5617 struct i40e_pf *pf = vsi->back; in i40e_vsi_configure_bw_alloc()
5624 if (!vsi->mqprio_qopt.qopt.hw && !test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_vsi_configure_bw_alloc()
5625 ret = i40e_set_bw_limit(vsi, vsi->seid, 0); in i40e_vsi_configure_bw_alloc()
5627 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5628 "Failed to reset tx rate for vsi->seid %u\n", in i40e_vsi_configure_bw_alloc()
5629 vsi->seid); in i40e_vsi_configure_bw_alloc()
5637 ret = i40e_aq_config_vsi_tc_bw(&pf->hw, vsi->seid, &bw_data, NULL); in i40e_vsi_configure_bw_alloc()
5639 dev_info(&pf->pdev->dev, in i40e_vsi_configure_bw_alloc()
5641 pf->hw.aq.asq_last_status); in i40e_vsi_configure_bw_alloc()
5642 return -EINVAL; in i40e_vsi_configure_bw_alloc()
5646 vsi->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_vsi_configure_bw_alloc()
5652 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5659 struct net_device *netdev = vsi->netdev; in i40e_vsi_config_netdev_tc()
5660 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_netdev_tc()
5661 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_netdev_tc()
5664 struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; in i40e_vsi_config_netdev_tc()
5674 /* Set up actual enabled TCs on the VSI */ in i40e_vsi_config_netdev_tc()
5675 if (netdev_set_num_tc(netdev, vsi->tc_config.numtc)) in i40e_vsi_config_netdev_tc()
5680 /* Only set TC queues for enabled tcs in i40e_vsi_config_netdev_tc()
5687 if (vsi->tc_config.enabled_tc & BIT(i)) in i40e_vsi_config_netdev_tc()
5689 vsi->tc_config.tc_info[i].netdev_tc, in i40e_vsi_config_netdev_tc()
5690 vsi->tc_config.tc_info[i].qcount, in i40e_vsi_config_netdev_tc()
5691 vsi->tc_config.tc_info[i].qoffset); in i40e_vsi_config_netdev_tc()
5700 u8 ets_tc = dcbcfg->etscfg.prioritytable[i]; in i40e_vsi_config_netdev_tc()
5702 netdev_tc = vsi->tc_config.tc_info[ets_tc].netdev_tc; in i40e_vsi_config_netdev_tc()
5708 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5719 vsi->info.mapping_flags = ctxt->info.mapping_flags; in i40e_vsi_update_queue_map()
5720 memcpy(&vsi->info.queue_mapping, in i40e_vsi_update_queue_map()
5721 &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping)); in i40e_vsi_update_queue_map()
5722 memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping, in i40e_vsi_update_queue_map()
5723 sizeof(vsi->info.tc_mapping)); in i40e_vsi_update_queue_map()
5727 * i40e_update_adq_vsi_queues - update queue mapping for ADq VSI
5739 return -EINVAL; in i40e_update_adq_vsi_queues()
5740 pf = vsi->back; in i40e_update_adq_vsi_queues()
5741 hw = &pf->hw; in i40e_update_adq_vsi_queues()
5743 ctxt.seid = vsi->seid; in i40e_update_adq_vsi_queues()
5744 ctxt.pf_num = hw->pf_id; in i40e_update_adq_vsi_queues()
5745 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id + vsi_offset; in i40e_update_adq_vsi_queues()
5746 ctxt.uplink_seid = vsi->uplink_seid; in i40e_update_adq_vsi_queues()
5749 ctxt.info = vsi->info; in i40e_update_adq_vsi_queues()
5751 i40e_vsi_setup_queue_map(vsi, &ctxt, vsi->tc_config.enabled_tc, in i40e_update_adq_vsi_queues()
5753 if (vsi->reconfig_rss) { in i40e_update_adq_vsi_queues()
5754 vsi->rss_size = min_t(int, pf->alloc_rss_size, in i40e_update_adq_vsi_queues()
5755 vsi->num_queue_pairs); in i40e_update_adq_vsi_queues()
5758 dev_info(&pf->pdev->dev, "Failed to reconfig rss for num_queues\n"); in i40e_update_adq_vsi_queues()
5761 vsi->reconfig_rss = false; in i40e_update_adq_vsi_queues()
5766 dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n", in i40e_update_adq_vsi_queues()
5768 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_update_adq_vsi_queues()
5773 vsi->info.valid_sections = 0; in i40e_update_adq_vsi_queues()
5779 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5783 * This configures a particular VSI for TCs that are mapped to the
5784 * given TC bitmap. It uses default bandwidth share for TCs across
5794 struct i40e_pf *pf = vsi->back; in i40e_vsi_config_tc()
5795 struct i40e_hw *hw = &pf->hw; in i40e_vsi_config_tc()
5800 /* Check if enabled_tc is same as existing or new TCs */ in i40e_vsi_config_tc()
5801 if (vsi->tc_config.enabled_tc == enabled_tc && in i40e_vsi_config_tc()
5802 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) in i40e_vsi_config_tc()
5805 /* Enable ETS TCs with equal BW Share for now across all VSIs */ in i40e_vsi_config_tc()
5815 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5817 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5818 ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, in i40e_vsi_config_tc()
5821 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5824 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5834 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5842 dev_err(&pf->pdev->dev, in i40e_vsi_config_tc()
5844 enabled_tc, vsi->seid); in i40e_vsi_config_tc()
5850 ctxt.seid = vsi->seid; in i40e_vsi_config_tc()
5851 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_vsi_config_tc()
5853 ctxt.uplink_seid = vsi->uplink_seid; in i40e_vsi_config_tc()
5854 ctxt.info = vsi->info; in i40e_vsi_config_tc()
5863 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled in i40e_vsi_config_tc()
5866 if (!vsi->mqprio_qopt.qopt.hw && vsi->reconfig_rss) { in i40e_vsi_config_tc()
5867 vsi->rss_size = min_t(int, vsi->back->alloc_rss_size, in i40e_vsi_config_tc()
5868 vsi->num_queue_pairs); in i40e_vsi_config_tc()
5871 dev_info(&vsi->back->pdev->dev, in i40e_vsi_config_tc()
5875 vsi->reconfig_rss = false; in i40e_vsi_config_tc()
5877 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { in i40e_vsi_config_tc()
5883 /* Update the VSI after updating the VSI queue-mapping in i40e_vsi_config_tc()
5888 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5891 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5896 vsi->info.valid_sections = 0; in i40e_vsi_config_tc()
5901 dev_info(&pf->pdev->dev, in i40e_vsi_config_tc()
5904 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_config_tc()
5915 * i40e_get_link_speed - Returns link speed for the interface
5921 struct i40e_pf *pf = vsi->back; in i40e_get_link_speed()
5923 switch (pf->hw.phy.link_info.link_speed) { in i40e_get_link_speed()
5935 return -EINVAL; in i40e_get_link_speed()
5940 * i40e_bw_bytes_to_mbits - Convert max_tx_rate from bytes to mbits
5949 dev_warn(&vsi->back->pdev->dev, in i40e_bw_bytes_to_mbits()
5960 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5969 struct i40e_pf *pf = vsi->back; in i40e_set_bw_limit()
5976 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5979 return -EINVAL; in i40e_set_bw_limit()
5982 dev_warn(&pf->pdev->dev, in i40e_set_bw_limit()
5990 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, seid, credits, in i40e_set_bw_limit()
5993 dev_err(&pf->pdev->dev, in i40e_set_bw_limit()
5994 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n", in i40e_set_bw_limit()
5996 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_set_bw_limit()
6001 * i40e_remove_queue_channels - Remove queue channels for the TCs
6004 * Remove queue channels for the TCs
6011 struct i40e_pf *pf = vsi->back; in i40e_remove_queue_channels()
6016 * channel VSIs with non-power-of-2 queue count. in i40e_remove_queue_channels()
6018 vsi->current_rss_size = 0; in i40e_remove_queue_channels()
6021 if (list_empty(&vsi->ch_list)) in i40e_remove_queue_channels()
6024 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_remove_queue_channels()
6027 list_del(&ch->list); in i40e_remove_queue_channels()
6028 p_vsi = ch->parent_vsi; in i40e_remove_queue_channels()
6029 if (!p_vsi || !ch->initialized) { in i40e_remove_queue_channels()
6034 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_remove_queue_channels()
6038 pf_q = ch->base_queue + i; in i40e_remove_queue_channels()
6039 tx_ring = vsi->tx_rings[pf_q]; in i40e_remove_queue_channels()
6040 tx_ring->ch = NULL; in i40e_remove_queue_channels()
6042 rx_ring = vsi->rx_rings[pf_q]; in i40e_remove_queue_channels()
6043 rx_ring->ch = NULL; in i40e_remove_queue_channels()
6047 ret = i40e_set_bw_limit(vsi, ch->seid, 0); in i40e_remove_queue_channels()
6049 dev_info(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6050 "Failed to reset tx rate for ch->seid %u\n", in i40e_remove_queue_channels()
6051 ch->seid); in i40e_remove_queue_channels()
6055 &pf->cloud_filter_list, cloud_node) { in i40e_remove_queue_channels()
6056 if (cfilter->seid != ch->seid) in i40e_remove_queue_channels()
6059 hash_del(&cfilter->cloud_node); in i40e_remove_queue_channels()
6060 if (cfilter->dst_port) in i40e_remove_queue_channels()
6067 last_aq_status = pf->hw.aq.asq_last_status; in i40e_remove_queue_channels()
6069 dev_info(&pf->pdev->dev, in i40e_remove_queue_channels()
6072 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_remove_queue_channels()
6077 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_remove_queue_channels()
6080 dev_err(&vsi->back->pdev->dev, in i40e_remove_queue_channels()
6082 ch->seid, p_vsi->seid); in i40e_remove_queue_channels()
6085 INIT_LIST_HEAD(&vsi->ch_list); in i40e_remove_queue_channels()
6093 * channels/TCs created.
6100 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_get_max_queues_for_channel()
6101 if (!ch->initialized) in i40e_get_max_queues_for_channel()
6103 if (ch->num_queue_pairs > max) in i40e_get_max_queues_for_channel()
6104 max = ch->num_queue_pairs; in i40e_get_max_queues_for_channel()
6111 * i40e_validate_num_queues - validate num_queues w.r.t channel
6127 return -EINVAL; in i40e_validate_num_queues()
6130 if (vsi->current_rss_size) { in i40e_validate_num_queues()
6131 if (num_queues > vsi->current_rss_size) { in i40e_validate_num_queues()
6132 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6134 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6135 return -EINVAL; in i40e_validate_num_queues()
6136 } else if ((num_queues < vsi->current_rss_size) && in i40e_validate_num_queues()
6138 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6140 num_queues, vsi->current_rss_size); in i40e_validate_num_queues()
6141 return -EINVAL; in i40e_validate_num_queues()
6153 dev_dbg(&pf->pdev->dev, in i40e_validate_num_queues()
6156 return -EINVAL; in i40e_validate_num_queues()
6165 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
6173 struct i40e_pf *pf = vsi->back; in i40e_vsi_reconfig_rss()
6175 struct i40e_hw *hw = &pf->hw; in i40e_vsi_reconfig_rss()
6180 if (!vsi->rss_size) in i40e_vsi_reconfig_rss()
6181 return -EINVAL; in i40e_vsi_reconfig_rss()
6183 if (rss_size > vsi->rss_size) in i40e_vsi_reconfig_rss()
6184 return -EINVAL; in i40e_vsi_reconfig_rss()
6186 local_rss_size = min_t(int, vsi->rss_size, rss_size); in i40e_vsi_reconfig_rss()
6187 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_vsi_reconfig_rss()
6189 return -ENOMEM; in i40e_vsi_reconfig_rss()
6192 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, local_rss_size); in i40e_vsi_reconfig_rss()
6197 if (vsi->rss_hkey_user) in i40e_vsi_reconfig_rss()
6198 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_vsi_reconfig_rss()
6202 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_vsi_reconfig_rss()
6204 dev_info(&pf->pdev->dev, in i40e_vsi_reconfig_rss()
6207 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_vsi_reconfig_rss()
6214 if (!vsi->orig_rss_size) in i40e_vsi_reconfig_rss()
6215 vsi->orig_rss_size = vsi->rss_size; in i40e_vsi_reconfig_rss()
6216 vsi->current_rss_size = local_rss_size; in i40e_vsi_reconfig_rss()
6222 * i40e_channel_setup_queue_map - Setup a channel queue map
6240 qcount = min_t(int, ch->num_queue_pairs, pf->num_lan_msix); in i40e_channel_setup_queue_map()
6241 ch->num_queue_pairs = qcount; in i40e_channel_setup_queue_map()
6243 /* find the next higher power-of-2 of num queue pairs */ in i40e_channel_setup_queue_map()
6252 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); in i40e_channel_setup_queue_map()
6254 ctxt->info.up_enable_bits = 0x1; /* TC0 enabled */ in i40e_channel_setup_queue_map()
6255 ctxt->info.mapping_flags |= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG); in i40e_channel_setup_queue_map()
6256 ctxt->info.queue_mapping[0] = cpu_to_le16(ch->base_queue); in i40e_channel_setup_queue_map()
6257 ctxt->info.valid_sections |= cpu_to_le16(sections); in i40e_channel_setup_queue_map()
6261 * i40e_add_channel - add a channel by adding VSI
6271 struct i40e_hw *hw = &pf->hw; in i40e_add_channel()
6276 if (ch->type != I40E_VSI_VMDQ2) { in i40e_add_channel()
6277 dev_info(&pf->pdev->dev, in i40e_add_channel()
6278 "add new vsi failed, ch->type %d\n", ch->type); in i40e_add_channel()
6279 return -EINVAL; in i40e_add_channel()
6283 ctxt.pf_num = hw->pf_id; in i40e_add_channel()
6287 if (ch->type == I40E_VSI_VMDQ2) in i40e_add_channel()
6290 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { in i40e_add_channel()
6303 dev_info(&pf->pdev->dev, in i40e_add_channel()
6306 i40e_aq_str(&pf->hw, in i40e_add_channel()
6307 pf->hw.aq.asq_last_status)); in i40e_add_channel()
6308 return -ENOENT; in i40e_add_channel()
6314 ch->enabled_tc = !i40e_is_channel_macvlan(ch) && enabled_tc; in i40e_add_channel()
6315 ch->seid = ctxt.seid; in i40e_add_channel()
6316 ch->vsi_number = ctxt.vsi_number; in i40e_add_channel()
6317 ch->stat_counter_idx = le16_to_cpu(ctxt.info.stat_counter_idx); in i40e_add_channel()
6323 ch->info.mapping_flags = ctxt.info.mapping_flags; in i40e_add_channel()
6324 memcpy(&ch->info.queue_mapping, in i40e_add_channel()
6326 memcpy(&ch->info.tc_mapping, ctxt.info.tc_mapping, in i40e_add_channel()
6340 bw_data.tc_valid_bits = ch->enabled_tc; in i40e_channel_config_bw()
6344 ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, ch->seid, in i40e_channel_config_bw()
6347 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_bw()
6348 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n", in i40e_channel_config_bw()
6349 vsi->back->hw.aq.asq_last_status, ch->seid); in i40e_channel_config_bw()
6350 return -EINVAL; in i40e_channel_config_bw()
6354 ch->info.qs_handle[i] = bw_data.qs_handles[i]; in i40e_channel_config_bw()
6360 * i40e_channel_config_tx_ring - config TX ring associated with new channel
6376 /* Enable ETS TCs with equal BW Share for now across all VSIs */ in i40e_channel_config_tx_ring()
6378 if (ch->enabled_tc & BIT(i)) in i40e_channel_config_tx_ring()
6385 dev_info(&vsi->back->pdev->dev, in i40e_channel_config_tx_ring()
6387 ch->enabled_tc, ch->seid); in i40e_channel_config_tx_ring()
6391 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_channel_config_tx_ring()
6395 pf_q = ch->base_queue + i; in i40e_channel_config_tx_ring()
6397 /* Get to TX ring ptr of main VSI, for re-setup TX queue in i40e_channel_config_tx_ring()
6400 tx_ring = vsi->tx_rings[pf_q]; in i40e_channel_config_tx_ring()
6401 tx_ring->ch = ch; in i40e_channel_config_tx_ring()
6404 rx_ring = vsi->rx_rings[pf_q]; in i40e_channel_config_tx_ring()
6405 rx_ring->ch = ch; in i40e_channel_config_tx_ring()
6412 * i40e_setup_hw_channel - setup new channel
6429 ch->initialized = false; in i40e_setup_hw_channel()
6430 ch->base_queue = vsi->next_base_queue; in i40e_setup_hw_channel()
6431 ch->type = type; in i40e_setup_hw_channel()
6436 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6443 ch->initialized = true; in i40e_setup_hw_channel()
6448 dev_info(&pf->pdev->dev, in i40e_setup_hw_channel()
6450 ch->seid); in i40e_setup_hw_channel()
6455 vsi->next_base_queue = vsi->next_base_queue + ch->num_queue_pairs; in i40e_setup_hw_channel()
6456 dev_dbg(&pf->pdev->dev, in i40e_setup_hw_channel()
6457 …"Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base… in i40e_setup_hw_channel()
6458 ch->seid, ch->vsi_number, ch->stat_counter_idx, in i40e_setup_hw_channel()
6459 ch->num_queue_pairs, in i40e_setup_hw_channel()
6460 vsi->next_base_queue); in i40e_setup_hw_channel()
6465 * i40e_setup_channel - setup new channel using uplink element
6480 if (vsi->type == I40E_VSI_MAIN) { in i40e_setup_channel()
6483 dev_err(&pf->pdev->dev, "unsupported parent vsi type(%d)\n", in i40e_setup_channel()
6484 vsi->type); in i40e_setup_channel()
6489 seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_setup_channel()
6494 dev_err(&pf->pdev->dev, "failed to setup hw_channel\n"); in i40e_setup_channel()
6498 return ch->initialized ? true : false; in i40e_setup_channel()
6502 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6511 struct i40e_pf *pf = vsi->back; in i40e_validate_and_set_switch_mode()
6512 struct i40e_hw *hw = &pf->hw; in i40e_validate_and_set_switch_mode()
6517 return -EINVAL; in i40e_validate_and_set_switch_mode()
6519 if (hw->dev_caps.switch_mode) { in i40e_validate_and_set_switch_mode()
6520 /* if switch mode is set, support mode2 (non-tunneled for in i40e_validate_and_set_switch_mode()
6523 u32 switch_mode = hw->dev_caps.switch_mode & in i40e_validate_and_set_switch_mode()
6528 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6529 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n", in i40e_validate_and_set_switch_mode()
6530 hw->dev_caps.switch_mode); in i40e_validate_and_set_switch_mode()
6531 return -EINVAL; in i40e_validate_and_set_switch_mode()
6545 ret = i40e_aq_set_switch_config(hw, pf->last_sw_conf_flags, in i40e_validate_and_set_switch_mode()
6546 pf->last_sw_conf_valid_flags, in i40e_validate_and_set_switch_mode()
6548 if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH) in i40e_validate_and_set_switch_mode()
6549 dev_err(&pf->pdev->dev, in i40e_validate_and_set_switch_mode()
6553 hw->aq.asq_last_status)); in i40e_validate_and_set_switch_mode()
6559 * i40e_create_queue_channel - function to create channel
6569 struct i40e_pf *pf = vsi->back; in i40e_create_queue_channel()
6574 return -EINVAL; in i40e_create_queue_channel()
6576 if (!ch->num_queue_pairs) { in i40e_create_queue_channel()
6577 dev_err(&pf->pdev->dev, "Invalid num_queues requested: %d\n", in i40e_create_queue_channel()
6578 ch->num_queue_pairs); in i40e_create_queue_channel()
6579 return -EINVAL; in i40e_create_queue_channel()
6583 err = i40e_validate_num_queues(pf, ch->num_queue_pairs, vsi, in i40e_create_queue_channel()
6586 dev_info(&pf->pdev->dev, "Failed to validate num_queues (%d)\n", in i40e_create_queue_channel()
6587 ch->num_queue_pairs); in i40e_create_queue_channel()
6588 return -EINVAL; in i40e_create_queue_channel()
6595 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { in i40e_create_queue_channel()
6596 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_create_queue_channel()
6598 if (vsi->type == I40E_VSI_MAIN) { in i40e_create_queue_channel()
6609 /* By this time, vsi->cnt_q_avail shall be set to non-zero and in i40e_create_queue_channel()
6612 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_queue_pairs) { in i40e_create_queue_channel()
6613 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6615 vsi->cnt_q_avail, ch->num_queue_pairs); in i40e_create_queue_channel()
6616 return -EINVAL; in i40e_create_queue_channel()
6620 if (reconfig_rss && (vsi->type == I40E_VSI_MAIN)) { in i40e_create_queue_channel()
6621 err = i40e_vsi_reconfig_rss(vsi, ch->num_queue_pairs); in i40e_create_queue_channel()
6623 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6625 ch->num_queue_pairs); in i40e_create_queue_channel()
6626 return -EINVAL; in i40e_create_queue_channel()
6631 dev_info(&pf->pdev->dev, "Failed to setup channel\n"); in i40e_create_queue_channel()
6632 return -EINVAL; in i40e_create_queue_channel()
6635 dev_info(&pf->pdev->dev, in i40e_create_queue_channel()
6637 ch->seid, ch->num_queue_pairs); in i40e_create_queue_channel()
6640 if (ch->max_tx_rate) { in i40e_create_queue_channel()
6641 u64 credits = ch->max_tx_rate; in i40e_create_queue_channel()
6643 if (i40e_set_bw_limit(vsi, ch->seid, ch->max_tx_rate)) in i40e_create_queue_channel()
6644 return -EINVAL; in i40e_create_queue_channel()
6647 dev_dbg(&pf->pdev->dev, in i40e_create_queue_channel()
6648 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_create_queue_channel()
6649 ch->max_tx_rate, in i40e_create_queue_channel()
6651 ch->seid); in i40e_create_queue_channel()
6655 ch->parent_vsi = vsi; in i40e_create_queue_channel()
6658 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_create_queue_channel()
6664 * i40e_configure_queue_channels - Add queue channel for the given TCs
6667 * Configures queue channel mapping to the given TCs
6675 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */ in i40e_configure_queue_channels()
6676 vsi->tc_seid_map[0] = vsi->seid; in i40e_configure_queue_channels()
6678 if (vsi->tc_config.enabled_tc & BIT(i)) { in i40e_configure_queue_channels()
6681 ret = -ENOMEM; in i40e_configure_queue_channels()
6685 INIT_LIST_HEAD(&ch->list); in i40e_configure_queue_channels()
6686 ch->num_queue_pairs = in i40e_configure_queue_channels()
6687 vsi->tc_config.tc_info[i].qcount; in i40e_configure_queue_channels()
6688 ch->base_queue = in i40e_configure_queue_channels()
6689 vsi->tc_config.tc_info[i].qoffset; in i40e_configure_queue_channels()
6694 max_rate = vsi->mqprio_qopt.max_rate[i]; in i40e_configure_queue_channels()
6696 ch->max_tx_rate = max_rate; in i40e_configure_queue_channels()
6698 list_add_tail(&ch->list, &vsi->ch_list); in i40e_configure_queue_channels()
6702 dev_err(&vsi->back->pdev->dev, in i40e_configure_queue_channels()
6704 i, ch->num_queue_pairs); in i40e_configure_queue_channels()
6707 vsi->tc_seid_map[i] = ch->seid; in i40e_configure_queue_channels()
6712 i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true); in i40e_configure_queue_channels()
6721 * i40e_veb_config_tc - Configure TCs for given VEB
6730 struct i40e_pf *pf = veb->pf; in i40e_veb_config_tc()
6734 /* No TCs or already enabled TCs just return */ in i40e_veb_config_tc()
6735 if (!enabled_tc || veb->enabled_tc == enabled_tc) in i40e_veb_config_tc()
6741 /* Enable ETS TCs with equal BW Share for now */ in i40e_veb_config_tc()
6747 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, in i40e_veb_config_tc()
6750 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6753 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6760 dev_info(&pf->pdev->dev, in i40e_veb_config_tc()
6763 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_veb_config_tc()
6772 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6785 /* Enable the TCs available on PF to all VEBs */ in i40e_dcb_reconfigure()
6791 if (!pf->veb[v]) in i40e_dcb_reconfigure()
6793 ret = i40e_veb_config_tc(pf->veb[v], tc_map); in i40e_dcb_reconfigure()
6795 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6797 pf->veb[v]->seid); in i40e_dcb_reconfigure()
6803 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_dcb_reconfigure()
6804 if (!pf->vsi[v]) in i40e_dcb_reconfigure()
6807 /* - Enable all TCs for the LAN VSI in i40e_dcb_reconfigure()
6808 * - For all others keep them at TC0 for now in i40e_dcb_reconfigure()
6810 if (v == pf->lan_vsi) in i40e_dcb_reconfigure()
6815 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); in i40e_dcb_reconfigure()
6817 dev_info(&pf->pdev->dev, in i40e_dcb_reconfigure()
6819 pf->vsi[v]->seid); in i40e_dcb_reconfigure()
6822 /* Re-configure VSI vectors based on updated TC map */ in i40e_dcb_reconfigure()
6823 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); in i40e_dcb_reconfigure()
6824 if (pf->vsi[v]->netdev) in i40e_dcb_reconfigure()
6825 i40e_dcbnl_set_all(pf->vsi[v]); in i40e_dcb_reconfigure()
6831 * i40e_resume_port_tx - Resume port Tx
6839 struct i40e_hw *hw = &pf->hw; in i40e_resume_port_tx()
6844 dev_info(&pf->pdev->dev, in i40e_resume_port_tx()
6847 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_resume_port_tx()
6849 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_resume_port_tx()
6857 * i40e_suspend_port_tx - Suspend port Tx
6864 struct i40e_hw *hw = &pf->hw; in i40e_suspend_port_tx()
6867 ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL); in i40e_suspend_port_tx()
6869 dev_info(&pf->pdev->dev, in i40e_suspend_port_tx()
6872 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_suspend_port_tx()
6874 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_suspend_port_tx()
6882 * i40e_hw_set_dcb_config - Program new DCBX settings into HW
6892 struct i40e_dcbx_config *old_cfg = &pf->hw.local_dcbx_config; in i40e_hw_set_dcb_config()
6897 dev_dbg(&pf->pdev->dev, "No Change in DCB Config required.\n"); in i40e_hw_set_dcb_config()
6906 old_cfg->etsrec = old_cfg->etscfg; in i40e_hw_set_dcb_config()
6907 ret = i40e_set_dcb_config(&pf->hw); in i40e_hw_set_dcb_config()
6909 dev_info(&pf->pdev->dev, in i40e_hw_set_dcb_config()
6912 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_set_dcb_config()
6920 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) { in i40e_hw_set_dcb_config()
6921 /* Re-start the VSIs if disabled */ in i40e_hw_set_dcb_config()
6933 * i40e_hw_dcb_config - Program new DCBX settings into HW
6948 struct i40e_hw *hw = &pf->hw; in i40e_hw_dcb_config()
6949 u8 num_ports = hw->num_ports; in i40e_hw_dcb_config()
6951 int ret = -EINVAL; in i40e_hw_dcb_config()
6957 dev_dbg(&pf->pdev->dev, "Configuring DCB registers directly\n"); in i40e_hw_dcb_config()
6958 /* Un-pack information to Program ETS HW via shared API in i40e_hw_dcb_config()
6961 * ETS/NON-ETS arbiter mode in i40e_hw_dcb_config()
6964 * PFC priority bit-map in i40e_hw_dcb_config()
6968 * TSA table (ETS or non-ETS) in i40e_hw_dcb_config()
6978 switch (new_cfg->etscfg.tsatable[i]) { in i40e_hw_dcb_config()
6982 new_cfg->etscfg.tcbwtable[i]; in i40e_hw_dcb_config()
6997 old_cfg = &hw->local_dcbx_config; in i40e_hw_dcb_config()
7007 set_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_hw_dcb_config()
7009 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_hw_dcb_config()
7011 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7023 (hw, pf->mac_seid, &ets_data, in i40e_hw_dcb_config()
7026 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7029 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7041 i40e_dcb_hw_rx_ets_bw_config(hw, new_cfg->etscfg.tcbwtable, mode, in i40e_hw_dcb_config()
7043 i40e_dcb_hw_pfc_config(hw, new_cfg->pfc.pfcenable, in i40e_hw_dcb_config()
7044 new_cfg->etscfg.prioritytable); in i40e_hw_dcb_config()
7045 i40e_dcb_hw_rx_up2tc_config(hw, new_cfg->etscfg.prioritytable); in i40e_hw_dcb_config()
7049 mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; in i40e_hw_dcb_config()
7054 false, new_cfg->pfc.pfcenable, in i40e_hw_dcb_config()
7056 i40e_dcb_hw_rx_pb_config(hw, &pf->pb_cfg, &pb_cfg); in i40e_hw_dcb_config()
7059 pf->pb_cfg = pb_cfg; in i40e_hw_dcb_config()
7062 ret = i40e_aq_dcb_updated(&pf->hw, NULL); in i40e_hw_dcb_config()
7064 dev_info(&pf->pdev->dev, in i40e_hw_dcb_config()
7067 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_hw_dcb_config()
7077 /* Re-start the VSIs if disabled */ in i40e_hw_dcb_config()
7081 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_hw_dcb_config()
7086 /* Wait for the PF's queues to be disabled */ in i40e_hw_dcb_config()
7090 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_hw_dcb_config()
7095 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_hw_dcb_config()
7096 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_hw_dcb_config()
7099 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) in i40e_hw_dcb_config()
7108 * i40e_dcb_sw_default_config - Set default DCB configuration when DCB in SW
7115 struct i40e_dcbx_config *dcb_cfg = &pf->hw.local_dcbx_config; in i40e_dcb_sw_default_config()
7117 struct i40e_hw *hw = &pf->hw; in i40e_dcb_sw_default_config()
7120 if (test_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, pf->hw.caps)) { in i40e_dcb_sw_default_config()
7122 memset(&pf->tmp_cfg, 0, sizeof(struct i40e_dcbx_config)); in i40e_dcb_sw_default_config()
7123 pf->tmp_cfg.etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
7124 pf->tmp_cfg.etscfg.maxtcs = 0; in i40e_dcb_sw_default_config()
7125 pf->tmp_cfg.etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7126 pf->tmp_cfg.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; in i40e_dcb_sw_default_config()
7127 pf->tmp_cfg.pfc.willing = I40E_IEEE_DEFAULT_PFC_WILLING; in i40e_dcb_sw_default_config()
7128 pf->tmp_cfg.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
7130 pf->tmp_cfg.numapps = I40E_IEEE_DEFAULT_NUM_APPS; in i40e_dcb_sw_default_config()
7131 pf->tmp_cfg.app[0].selector = I40E_APP_SEL_ETHTYPE; in i40e_dcb_sw_default_config()
7132 pf->tmp_cfg.app[0].priority = I40E_IEEE_DEFAULT_APP_PRIO; in i40e_dcb_sw_default_config()
7133 pf->tmp_cfg.app[0].protocolid = I40E_APP_PROTOID_FCOE; in i40e_dcb_sw_default_config()
7135 return i40e_hw_set_dcb_config(pf, &pf->tmp_cfg); in i40e_dcb_sw_default_config()
7145 (hw, pf->mac_seid, &ets_data, in i40e_dcb_sw_default_config()
7148 dev_info(&pf->pdev->dev, in i40e_dcb_sw_default_config()
7151 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_dcb_sw_default_config()
7152 err = -ENOENT; in i40e_dcb_sw_default_config()
7157 dcb_cfg->etscfg.willing = I40E_IEEE_DEFAULT_ETS_WILLING; in i40e_dcb_sw_default_config()
7158 dcb_cfg->etscfg.cbs = 0; in i40e_dcb_sw_default_config()
7159 dcb_cfg->etscfg.maxtcs = I40E_MAX_TRAFFIC_CLASS; in i40e_dcb_sw_default_config()
7160 dcb_cfg->etscfg.tcbwtable[0] = I40E_IEEE_DEFAULT_ETS_TCBW; in i40e_dcb_sw_default_config()
7167 * i40e_init_pf_dcb - Initialize DCB configuration
7175 struct i40e_hw *hw = &pf->hw; in i40e_init_pf_dcb()
7181 if (test_bit(I40E_HW_CAP_NO_DCB_SUPPORT, pf->hw.caps)) { in i40e_init_pf_dcb()
7182 dev_info(&pf->pdev->dev, "DCB is not supported.\n"); in i40e_init_pf_dcb()
7183 err = -EOPNOTSUPP; in i40e_init_pf_dcb()
7186 if (test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) { in i40e_init_pf_dcb()
7187 dev_info(&pf->pdev->dev, "FW LLDP is disabled, attempting SW DCB\n"); in i40e_init_pf_dcb()
7190 dev_info(&pf->pdev->dev, "Could not initialize SW DCB\n"); in i40e_init_pf_dcb()
7193 dev_info(&pf->pdev->dev, "SW DCB initialization succeeded.\n"); in i40e_init_pf_dcb()
7194 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_init_pf_dcb()
7197 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_init_pf_dcb()
7198 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_pf_dcb()
7204 if ((!hw->func_caps.dcb) || in i40e_init_pf_dcb()
7205 (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) { in i40e_init_pf_dcb()
7206 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7210 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | in i40e_init_pf_dcb()
7213 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_init_pf_dcb()
7217 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) in i40e_init_pf_dcb()
7218 set_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_pf_dcb()
7220 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_pf_dcb()
7221 dev_dbg(&pf->pdev->dev, in i40e_init_pf_dcb()
7224 } else if (pf->hw.aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_init_pf_dcb()
7225 dev_info(&pf->pdev->dev, "FW LLDP disabled for this PF.\n"); in i40e_init_pf_dcb()
7226 set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags); in i40e_init_pf_dcb()
7228 dev_info(&pf->pdev->dev, in i40e_init_pf_dcb()
7231 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_init_pf_dcb()
7240 * i40e_print_link_message - print link up or down
7247 struct i40e_pf *pf = vsi->back; in i40e_print_link_message()
7255 new_speed = pf->hw.phy.link_info.link_speed; in i40e_print_link_message()
7259 if ((vsi->current_isup == isup) && (vsi->current_speed == new_speed)) in i40e_print_link_message()
7261 vsi->current_isup = isup; in i40e_print_link_message()
7262 vsi->current_speed = new_speed; in i40e_print_link_message()
7264 netdev_info(vsi->netdev, "NIC Link is Down\n"); in i40e_print_link_message()
7271 if (pf->hw.func_caps.npar_enable && in i40e_print_link_message()
7272 (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB || in i40e_print_link_message()
7273 pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB)) in i40e_print_link_message()
7274 netdev_warn(vsi->netdev, in i40e_print_link_message()
7277 switch (pf->hw.phy.link_info.link_speed) { in i40e_print_link_message()
7306 switch (pf->hw.fc.current_mode) { in i40e_print_link_message()
7321 if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) { in i40e_print_link_message()
7326 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7329 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7331 fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7332 else if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7334 fec = "CL108 RS-FEC"; in i40e_print_link_message()
7336 /* 'CL108 RS-FEC' should be displayed when RS is requested, or in i40e_print_link_message()
7339 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7341 if (vsi->back->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7343 req_fec = "CL108 RS-FEC"; in i40e_print_link_message()
7345 req_fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7347 netdev_info(vsi->netdev, in i40e_print_link_message()
7350 } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) { in i40e_print_link_message()
7355 if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED) in i40e_print_link_message()
7358 if (pf->hw.phy.link_info.fec_info & in i40e_print_link_message()
7360 fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7362 if (pf->hw.phy.link_info.req_fec_info & in i40e_print_link_message()
7364 req_fec = "CL74 FC-FEC/BASE-R"; in i40e_print_link_message()
7366 netdev_info(vsi->netdev, in i40e_print_link_message()
7370 netdev_info(vsi->netdev, in i40e_print_link_message()
7378 * i40e_up_complete - Finish the last steps of bringing up a connection
7383 struct i40e_pf *pf = vsi->back; in i40e_up_complete()
7386 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_up_complete()
7396 clear_bit(__I40E_VSI_DOWN, vsi->state); in i40e_up_complete()
7400 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && in i40e_up_complete()
7401 (vsi->netdev)) { in i40e_up_complete()
7403 netif_tx_start_all_queues(vsi->netdev); in i40e_up_complete()
7404 netif_carrier_on(vsi->netdev); in i40e_up_complete()
7408 if (vsi->type == I40E_VSI_FDIR) { in i40e_up_complete()
7410 pf->fd_add_err = 0; in i40e_up_complete()
7411 pf->fd_atr_cnt = 0; in i40e_up_complete()
7418 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_up_complete()
7425 * i40e_vsi_reinit_locked - Reset the VSI
7433 struct i40e_pf *pf = vsi->back; in i40e_vsi_reinit_locked()
7435 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_vsi_reinit_locked()
7440 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_vsi_reinit_locked()
7444 * i40e_force_link_state - Force the link status
7453 struct i40e_hw *hw = &pf->hw; in i40e_force_link_state()
7467 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7470 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_force_link_state()
7479 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7482 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_force_link_state()
7490 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) in i40e_force_link_state()
7506 if (test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags)) { in i40e_force_link_state()
7524 dev_err(&pf->pdev->dev, in i40e_force_link_state()
7527 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_force_link_state()
7534 /* Wait a little bit (on 40G cards it sometimes takes a really in i40e_force_link_state()
7548 * i40e_up - Bring the connection back up after being down
7555 if (vsi->type == I40E_VSI_MAIN && in i40e_up()
7556 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || in i40e_up()
7557 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) in i40e_up()
7558 i40e_force_link_state(vsi->back, true); in i40e_up()
7568 * i40e_down - Shutdown the connection processing
7576 * sets the vsi->state __I40E_VSI_DOWN bit. in i40e_down()
7578 if (vsi->netdev) { in i40e_down()
7579 netif_carrier_off(vsi->netdev); in i40e_down()
7580 netif_tx_disable(vsi->netdev); in i40e_down()
7584 if (vsi->type == I40E_VSI_MAIN && in i40e_down()
7585 (test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags) || in i40e_down()
7586 test_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, vsi->back->flags))) in i40e_down()
7587 i40e_force_link_state(vsi->back, false); in i40e_down()
7590 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_down()
7591 i40e_clean_tx_ring(vsi->tx_rings[i]); in i40e_down()
7593 /* Make sure that in-progress ndo_xdp_xmit and in i40e_down()
7597 i40e_clean_tx_ring(vsi->xdp_rings[i]); in i40e_down()
7599 i40e_clean_rx_ring(vsi->rx_rings[i]); in i40e_down()
7605 * i40e_validate_mqprio_qopt- validate queue mapping info
7616 if (mqprio_qopt->qopt.offset[0] != 0 || in i40e_validate_mqprio_qopt()
7617 mqprio_qopt->qopt.num_tc < 1 || in i40e_validate_mqprio_qopt()
7618 mqprio_qopt->qopt.num_tc > I40E_MAX_TRAFFIC_CLASS) in i40e_validate_mqprio_qopt()
7619 return -EINVAL; in i40e_validate_mqprio_qopt()
7621 if (!mqprio_qopt->qopt.count[i]) in i40e_validate_mqprio_qopt()
7622 return -EINVAL; in i40e_validate_mqprio_qopt()
7623 if (mqprio_qopt->min_rate[i]) { in i40e_validate_mqprio_qopt()
7624 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7626 return -EINVAL; in i40e_validate_mqprio_qopt()
7628 max_rate = mqprio_qopt->max_rate[i]; in i40e_validate_mqprio_qopt()
7632 if (i >= mqprio_qopt->qopt.num_tc - 1) in i40e_validate_mqprio_qopt()
7634 if (mqprio_qopt->qopt.offset[i + 1] != in i40e_validate_mqprio_qopt()
7635 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in i40e_validate_mqprio_qopt()
7636 return -EINVAL; in i40e_validate_mqprio_qopt()
7638 if (vsi->num_queue_pairs < in i40e_validate_mqprio_qopt()
7639 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) { in i40e_validate_mqprio_qopt()
7640 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7642 return -EINVAL; in i40e_validate_mqprio_qopt()
7645 dev_err(&vsi->back->pdev->dev, in i40e_validate_mqprio_qopt()
7647 return -EINVAL; in i40e_validate_mqprio_qopt()
7653 * i40e_vsi_set_default_tc_config - set default values for tc configuration
7662 vsi->tc_config.numtc = 1; in i40e_vsi_set_default_tc_config()
7663 vsi->tc_config.enabled_tc = 1; in i40e_vsi_set_default_tc_config()
7664 qcount = min_t(int, vsi->alloc_queue_pairs, in i40e_vsi_set_default_tc_config()
7665 i40e_pf_get_max_q_per_tc(vsi->back)); in i40e_vsi_set_default_tc_config()
7670 vsi->tc_config.tc_info[i].qoffset = 0; in i40e_vsi_set_default_tc_config()
7672 vsi->tc_config.tc_info[i].qcount = qcount; in i40e_vsi_set_default_tc_config()
7674 vsi->tc_config.tc_info[i].qcount = 1; in i40e_vsi_set_default_tc_config()
7675 vsi->tc_config.tc_info[i].netdev_tc = 0; in i40e_vsi_set_default_tc_config()
7700 *aq_err = hw->aq.asq_last_status; in i40e_del_macvlan_filter()
7729 *aq_err = hw->aq.asq_last_status; in i40e_add_macvlan_filter()
7735 * i40e_reset_ch_rings - Reset the queue contexts in a channel
7745 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_reset_ch_rings()
7746 pf_q = ch->base_queue + i; in i40e_reset_ch_rings()
7747 tx_ring = vsi->tx_rings[pf_q]; in i40e_reset_ch_rings()
7748 tx_ring->ch = NULL; in i40e_reset_ch_rings()
7749 rx_ring = vsi->rx_rings[pf_q]; in i40e_reset_ch_rings()
7750 rx_ring->ch = NULL; in i40e_reset_ch_rings()
7767 if (list_empty(&vsi->macvlan_list)) in i40e_free_macvlan_channels()
7770 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_free_macvlan_channels()
7775 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_free_macvlan_channels()
7776 netdev_unbind_sb_channel(vsi->netdev, ch->fwd->netdev); in i40e_free_macvlan_channels()
7777 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_free_macvlan_channels()
7778 kfree(ch->fwd); in i40e_free_macvlan_channels()
7779 ch->fwd = NULL; in i40e_free_macvlan_channels()
7782 list_del(&ch->list); in i40e_free_macvlan_channels()
7783 parent_vsi = ch->parent_vsi; in i40e_free_macvlan_channels()
7784 if (!parent_vsi || !ch->initialized) { in i40e_free_macvlan_channels()
7790 ret = i40e_aq_delete_element(&vsi->back->hw, ch->seid, in i40e_free_macvlan_channels()
7793 dev_err(&vsi->back->pdev->dev, in i40e_free_macvlan_channels()
7795 ch->seid, parent_vsi->seid); in i40e_free_macvlan_channels()
7798 vsi->macvlan_cnt = 0; in i40e_free_macvlan_channels()
7802 * i40e_fwd_ring_up - bring the macvlan device up
7812 struct i40e_pf *pf = vsi->back; in i40e_fwd_ring_up()
7813 struct i40e_hw *hw = &pf->hw; in i40e_fwd_ring_up()
7816 list_for_each_entry_safe(iter, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_ring_up()
7818 iter->fwd = fwd; in i40e_fwd_ring_up()
7821 netdev_bind_sb_channel_queue(vsi->netdev, vdev, in i40e_fwd_ring_up()
7823 iter->num_queue_pairs, in i40e_fwd_ring_up()
7824 iter->base_queue); in i40e_fwd_ring_up()
7825 for (i = 0; i < iter->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7829 pf_q = iter->base_queue + i; in i40e_fwd_ring_up()
7832 tx_ring = vsi->tx_rings[pf_q]; in i40e_fwd_ring_up()
7833 tx_ring->ch = iter; in i40e_fwd_ring_up()
7836 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7837 rx_ring->ch = iter; in i40e_fwd_ring_up()
7845 return -EINVAL; in i40e_fwd_ring_up()
7853 ret = i40e_add_macvlan_filter(hw, ch->seid, vdev->dev_addr, &aq_err); in i40e_fwd_ring_up()
7857 for (i = 0; i < ch->num_queue_pairs; i++) { in i40e_fwd_ring_up()
7861 pf_q = ch->base_queue + i; in i40e_fwd_ring_up()
7862 rx_ring = vsi->rx_rings[pf_q]; in i40e_fwd_ring_up()
7863 rx_ring->netdev = NULL; in i40e_fwd_ring_up()
7865 dev_info(&pf->pdev->dev, in i40e_fwd_ring_up()
7876 * i40e_setup_macvlans - create the channels which will be macvlans
7885 struct i40e_pf *pf = vsi->back; in i40e_setup_macvlans()
7886 struct i40e_hw *hw = &pf->hw; in i40e_setup_macvlans()
7893 if (vsi->type != I40E_VSI_MAIN || !macvlan_cnt) in i40e_setup_macvlans()
7894 return -EINVAL; in i40e_setup_macvlans()
7896 num_qps = vsi->num_queue_pairs - (macvlan_cnt * qcnt); in i40e_setup_macvlans()
7898 /* find the next higher power-of-2 of num queue pairs */ in i40e_setup_macvlans()
7899 pow = fls(roundup_pow_of_two(num_qps) - 1); in i40e_setup_macvlans()
7908 ctxt.seid = vsi->seid; in i40e_setup_macvlans()
7909 ctxt.pf_num = vsi->back->hw.pf_id; in i40e_setup_macvlans()
7911 ctxt.uplink_seid = vsi->uplink_seid; in i40e_setup_macvlans()
7912 ctxt.info = vsi->info; in i40e_setup_macvlans()
7915 ctxt.info.queue_mapping[0] = cpu_to_le16(vsi->base_queue); in i40e_setup_macvlans()
7919 vsi->rss_size = max_t(u16, num_qps, qcnt); in i40e_setup_macvlans()
7922 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7924 vsi->rss_size); in i40e_setup_macvlans()
7927 vsi->reconfig_rss = true; in i40e_setup_macvlans()
7928 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_macvlans()
7929 "Reconfigured RSS with num_queues (%u)\n", vsi->rss_size); in i40e_setup_macvlans()
7930 vsi->next_base_queue = num_qps; in i40e_setup_macvlans()
7931 vsi->cnt_q_avail = vsi->num_queue_pairs - num_qps; in i40e_setup_macvlans()
7933 /* Update the VSI after updating the VSI queue-mapping in i40e_setup_macvlans()
7938 dev_info(&pf->pdev->dev, in i40e_setup_macvlans()
7941 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_setup_macvlans()
7946 vsi->info.valid_sections = 0; in i40e_setup_macvlans()
7949 INIT_LIST_HEAD(&vsi->macvlan_list); in i40e_setup_macvlans()
7953 ret = -ENOMEM; in i40e_setup_macvlans()
7956 INIT_LIST_HEAD(&ch->list); in i40e_setup_macvlans()
7957 ch->num_queue_pairs = qcnt; in i40e_setup_macvlans()
7959 ret = -EINVAL; in i40e_setup_macvlans()
7963 ch->parent_vsi = vsi; in i40e_setup_macvlans()
7964 vsi->cnt_q_avail -= ch->num_queue_pairs; in i40e_setup_macvlans()
7965 vsi->macvlan_cnt++; in i40e_setup_macvlans()
7966 list_add_tail(&ch->list, &vsi->macvlan_list); in i40e_setup_macvlans()
7972 dev_info(&pf->pdev->dev, "Failed to setup macvlans\n"); in i40e_setup_macvlans()
7979 * i40e_fwd_add - configure macvlans
7987 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_add()
7988 struct i40e_pf *pf = vsi->back; in i40e_fwd_add()
7992 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_fwd_add()
7994 return ERR_PTR(-EINVAL); in i40e_fwd_add()
7998 return ERR_PTR(-EINVAL); in i40e_fwd_add()
8000 if (pf->num_lan_msix < I40E_MIN_MACVLAN_VECTORS) { in i40e_fwd_add()
8002 return ERR_PTR(-EINVAL); in i40e_fwd_add()
8009 return ERR_PTR(-ERANGE); in i40e_fwd_add()
8011 if (!vsi->macvlan_cnt) { in i40e_fwd_add()
8013 set_bit(0, vsi->fwd_bitmask); in i40e_fwd_add()
8019 vectors = pf->num_lan_msix; in i40e_fwd_add()
8023 macvlan_cnt = (vectors - 32) / 4; in i40e_fwd_add()
8027 macvlan_cnt = (vectors - 16) / 2; in i40e_fwd_add()
8031 macvlan_cnt = vectors - 16; in i40e_fwd_add()
8035 macvlan_cnt = vectors - 8; in i40e_fwd_add()
8039 macvlan_cnt = vectors - 1; in i40e_fwd_add()
8043 return ERR_PTR(-EBUSY); in i40e_fwd_add()
8057 avail_macvlan = find_first_zero_bit(vsi->fwd_bitmask, in i40e_fwd_add()
8058 vsi->macvlan_cnt); in i40e_fwd_add()
8060 return ERR_PTR(-EBUSY); in i40e_fwd_add()
8065 return ERR_PTR(-ENOMEM); in i40e_fwd_add()
8067 set_bit(avail_macvlan, vsi->fwd_bitmask); in i40e_fwd_add()
8068 fwd->bit_no = avail_macvlan; in i40e_fwd_add()
8070 fwd->netdev = vdev; in i40e_fwd_add()
8083 return ERR_PTR(-EINVAL); in i40e_fwd_add()
8090 * i40e_del_all_macvlans - Delete all the mac filters on the channels
8096 struct i40e_pf *pf = vsi->back; in i40e_del_all_macvlans()
8097 struct i40e_hw *hw = &pf->hw; in i40e_del_all_macvlans()
8100 if (list_empty(&vsi->macvlan_list)) in i40e_del_all_macvlans()
8103 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_del_all_macvlans()
8105 ret = i40e_del_macvlan_filter(hw, ch->seid, in i40e_del_all_macvlans()
8111 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_del_all_macvlans()
8112 netdev_unbind_sb_channel(vsi->netdev, in i40e_del_all_macvlans()
8113 ch->fwd->netdev); in i40e_del_all_macvlans()
8114 netdev_set_sb_channel(ch->fwd->netdev, 0); in i40e_del_all_macvlans()
8115 kfree(ch->fwd); in i40e_del_all_macvlans()
8116 ch->fwd = NULL; in i40e_del_all_macvlans()
8123 * i40e_fwd_del - delete macvlan interfaces
8132 struct i40e_vsi *vsi = np->vsi; in i40e_fwd_del()
8133 struct i40e_pf *pf = vsi->back; in i40e_fwd_del()
8134 struct i40e_hw *hw = &pf->hw; in i40e_fwd_del()
8138 list_for_each_entry_safe(ch, ch_tmp, &vsi->macvlan_list, list) { in i40e_fwd_del()
8141 fwd->netdev->dev_addr)) { in i40e_fwd_del()
8142 ret = i40e_del_macvlan_filter(hw, ch->seid, in i40e_fwd_del()
8148 clear_bit(ch->fwd->bit_no, vsi->fwd_bitmask); in i40e_fwd_del()
8149 netdev_unbind_sb_channel(netdev, fwd->netdev); in i40e_fwd_del()
8150 netdev_set_sb_channel(fwd->netdev, 0); in i40e_fwd_del()
8151 kfree(ch->fwd); in i40e_fwd_del()
8152 ch->fwd = NULL; in i40e_fwd_del()
8154 dev_info(&pf->pdev->dev, in i40e_fwd_del()
8165 * i40e_setup_tc - configure multiple traffic classes
8173 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc()
8174 struct i40e_pf *pf = vsi->back; in i40e_setup_tc()
8178 int ret = -EINVAL; in i40e_setup_tc()
8182 old_queue_pairs = vsi->num_queue_pairs; in i40e_setup_tc()
8183 num_tc = mqprio_qopt->qopt.num_tc; in i40e_setup_tc()
8184 hw = mqprio_qopt->qopt.hw; in i40e_setup_tc()
8185 mode = mqprio_qopt->mode; in i40e_setup_tc()
8187 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); in i40e_setup_tc()
8188 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in i40e_setup_tc()
8193 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_setup_tc()
8200 clear_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); in i40e_setup_tc()
8203 if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_setup_tc()
8217 if (test_bit(I40E_FLAG_DCB_ENA, pf->flags)) { in i40e_setup_tc()
8222 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_setup_tc()
8227 memcpy(&vsi->mqprio_qopt, mqprio_qopt, in i40e_setup_tc()
8229 set_bit(I40E_FLAG_TC_MQPRIO_ENA, pf->flags); in i40e_setup_tc()
8230 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_setup_tc()
8233 return -EINVAL; in i40e_setup_tc()
8242 if (enabled_tc == vsi->tc_config.enabled_tc && in i40e_setup_tc()
8252 /* Configure VSI for enabled TCs */ in i40e_setup_tc()
8256 vsi->seid); in i40e_setup_tc()
8260 (!is_power_of_2(vsi->tc_config.tc_info[0].qcount))) { in i40e_setup_tc()
8263 vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8264 ret = -EINVAL; in i40e_setup_tc()
8269 dev_info(&vsi->back->pdev->dev, in i40e_setup_tc()
8271 vsi->seid, vsi->tc_config.tc_info[0].qcount); in i40e_setup_tc()
8274 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_setup_tc()
8276 vsi->mqprio_qopt.max_rate[0]); in i40e_setup_tc()
8278 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_setup_tc()
8283 dev_dbg(&vsi->back->pdev->dev, in i40e_setup_tc()
8284 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_setup_tc()
8287 vsi->seid); in i40e_setup_tc()
8295 vsi->num_queue_pairs = old_queue_pairs; in i40e_setup_tc()
8316 * i40e_set_cld_element - sets cloud filter element data
8330 ether_addr_copy(cld->outer_mac, filter->dst_mac); in i40e_set_cld_element()
8331 ether_addr_copy(cld->inner_mac, filter->src_mac); in i40e_set_cld_element()
8333 if (filter->n_proto != ETH_P_IP && filter->n_proto != ETH_P_IPV6) in i40e_set_cld_element()
8336 if (filter->n_proto == ETH_P_IPV6) { in i40e_set_cld_element()
8337 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1) in i40e_set_cld_element()
8338 for (i = 0; i < ARRAY_SIZE(filter->dst_ipv6); i++) { in i40e_set_cld_element()
8339 ipa = be32_to_cpu(filter->dst_ipv6[IPV6_MAX_INDEX - i]); in i40e_set_cld_element()
8341 *(__le32 *)&cld->ipaddr.raw_v6.data[i * 2] = cpu_to_le32(ipa); in i40e_set_cld_element()
8344 ipa = be32_to_cpu(filter->dst_ipv4); in i40e_set_cld_element()
8346 memcpy(&cld->ipaddr.v4.data, &ipa, sizeof(ipa)); in i40e_set_cld_element()
8349 cld->inner_vlan = cpu_to_le16(ntohs(filter->vlan_id)); in i40e_set_cld_element()
8352 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id) in i40e_set_cld_element()
8354 if (filter->tenant_id) in i40e_set_cld_element()
8359 * i40e_add_del_cloud_filter - Add/del cloud filter
8371 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter()
8390 if (filter->flags >= ARRAY_SIZE(flag_table)) in i40e_add_del_cloud_filter()
8391 return -EIO; in i40e_add_del_cloud_filter()
8398 if (filter->tunnel_type != I40E_CLOUD_TNL_TYPE_NONE) in i40e_add_del_cloud_filter()
8399 cld_filter.flags = cpu_to_le16(filter->tunnel_type << in i40e_add_del_cloud_filter()
8402 if (filter->n_proto == ETH_P_IPV6) in i40e_add_del_cloud_filter()
8403 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | in i40e_add_del_cloud_filter()
8406 cld_filter.flags |= cpu_to_le16(flag_table[filter->flags] | in i40e_add_del_cloud_filter()
8410 ret = i40e_aq_add_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8413 ret = i40e_aq_rem_cloud_filters(&pf->hw, filter->seid, in i40e_add_del_cloud_filter()
8416 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8418 add ? "add" : "delete", filter->dst_port, ret, in i40e_add_del_cloud_filter()
8419 pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter()
8421 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter()
8423 add ? "Added" : "Deleted", filter->seid); in i40e_add_del_cloud_filter()
8428 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
8441 struct i40e_pf *pf = vsi->back; in i40e_add_del_cloud_filter_big_buf()
8445 if ((is_valid_ether_addr(filter->dst_mac) && in i40e_add_del_cloud_filter_big_buf()
8446 is_valid_ether_addr(filter->src_mac)) || in i40e_add_del_cloud_filter_big_buf()
8447 (is_multicast_ether_addr(filter->dst_mac) && in i40e_add_del_cloud_filter_big_buf()
8448 is_multicast_ether_addr(filter->src_mac))) in i40e_add_del_cloud_filter_big_buf()
8449 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8451 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP in i40e_add_del_cloud_filter_big_buf()
8454 if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP) in i40e_add_del_cloud_filter_big_buf()
8455 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8458 if (filter->src_port || in i40e_add_del_cloud_filter_big_buf()
8459 (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) || in i40e_add_del_cloud_filter_big_buf()
8460 !ipv6_addr_any(&filter->ip.v6.src_ip6)) in i40e_add_del_cloud_filter_big_buf()
8461 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8468 if (is_valid_ether_addr(filter->dst_mac) || in i40e_add_del_cloud_filter_big_buf()
8469 is_valid_ether_addr(filter->src_mac) || in i40e_add_del_cloud_filter_big_buf()
8470 is_multicast_ether_addr(filter->dst_mac) || in i40e_add_del_cloud_filter_big_buf()
8471 is_multicast_ether_addr(filter->src_mac)) { in i40e_add_del_cloud_filter_big_buf()
8473 if (filter->dst_ipv4) in i40e_add_del_cloud_filter_big_buf()
8474 return -EOPNOTSUPP; in i40e_add_del_cloud_filter_big_buf()
8483 if (filter->vlan_id) { in i40e_add_del_cloud_filter_big_buf()
8488 } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) || in i40e_add_del_cloud_filter_big_buf()
8489 !ipv6_addr_any(&filter->ip.v6.dst_ip6)) { in i40e_add_del_cloud_filter_big_buf()
8492 if (filter->n_proto == ETH_P_IPV6) in i40e_add_del_cloud_filter_big_buf()
8499 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8501 return -EINVAL; in i40e_add_del_cloud_filter_big_buf()
8506 be16_to_cpu(filter->dst_port); in i40e_add_del_cloud_filter_big_buf()
8512 dev_err(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8518 ret = i40e_aq_add_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8521 ret = i40e_aq_rem_cloud_filters_bb(&pf->hw, filter->seid, in i40e_add_del_cloud_filter_big_buf()
8526 dev_dbg(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8528 add ? "add" : "delete", ret, pf->hw.aq.asq_last_status); in i40e_add_del_cloud_filter_big_buf()
8530 dev_info(&pf->pdev->dev, in i40e_add_del_cloud_filter_big_buf()
8532 add ? "add" : "delete", filter->seid, in i40e_add_del_cloud_filter_big_buf()
8533 ntohs(filter->dst_port)); in i40e_add_del_cloud_filter_big_buf()
8538 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
8549 struct flow_dissector *dissector = rule->match.dissector; in i40e_parse_cls_flower()
8551 struct i40e_pf *pf = vsi->back; in i40e_parse_cls_flower()
8554 if (dissector->used_keys & in i40e_parse_cls_flower()
8563 dev_err(&pf->pdev->dev, "Unsupported key used: 0x%llx\n", in i40e_parse_cls_flower()
8564 dissector->used_keys); in i40e_parse_cls_flower()
8565 return -EOPNOTSUPP; in i40e_parse_cls_flower()
8572 if (match.mask->keyid != 0) in i40e_parse_cls_flower()
8575 filter->tenant_id = be32_to_cpu(match.key->keyid); in i40e_parse_cls_flower()
8582 n_proto_key = ntohs(match.key->n_proto); in i40e_parse_cls_flower()
8583 n_proto_mask = ntohs(match.mask->n_proto); in i40e_parse_cls_flower()
8589 filter->n_proto = n_proto_key & n_proto_mask; in i40e_parse_cls_flower()
8590 filter->ip_proto = match.key->ip_proto; in i40e_parse_cls_flower()
8599 if (!is_zero_ether_addr(match.mask->dst)) { in i40e_parse_cls_flower()
8600 if (is_broadcast_ether_addr(match.mask->dst)) { in i40e_parse_cls_flower()
8603 dev_err(&pf->pdev->dev, "Bad ether dest mask %pM\n", in i40e_parse_cls_flower()
8604 match.mask->dst); in i40e_parse_cls_flower()
8605 return -EIO; in i40e_parse_cls_flower()
8609 if (!is_zero_ether_addr(match.mask->src)) { in i40e_parse_cls_flower()
8610 if (is_broadcast_ether_addr(match.mask->src)) { in i40e_parse_cls_flower()
8613 dev_err(&pf->pdev->dev, "Bad ether src mask %pM\n", in i40e_parse_cls_flower()
8614 match.mask->src); in i40e_parse_cls_flower()
8615 return -EIO; in i40e_parse_cls_flower()
8618 ether_addr_copy(filter->dst_mac, match.key->dst); in i40e_parse_cls_flower()
8619 ether_addr_copy(filter->src_mac, match.key->src); in i40e_parse_cls_flower()
8626 if (match.mask->vlan_id) { in i40e_parse_cls_flower()
8627 if (match.mask->vlan_id == VLAN_VID_MASK) { in i40e_parse_cls_flower()
8631 dev_err(&pf->pdev->dev, "Bad vlan mask 0x%04x\n", in i40e_parse_cls_flower()
8632 match.mask->vlan_id); in i40e_parse_cls_flower()
8633 return -EIO; in i40e_parse_cls_flower()
8637 filter->vlan_id = cpu_to_be16(match.key->vlan_id); in i40e_parse_cls_flower()
8644 addr_type = match.key->addr_type; in i40e_parse_cls_flower()
8651 if (match.mask->dst) { in i40e_parse_cls_flower()
8652 if (match.mask->dst == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8655 dev_err(&pf->pdev->dev, "Bad ip dst mask %pI4b\n", in i40e_parse_cls_flower()
8656 &match.mask->dst); in i40e_parse_cls_flower()
8657 return -EIO; in i40e_parse_cls_flower()
8661 if (match.mask->src) { in i40e_parse_cls_flower()
8662 if (match.mask->src == cpu_to_be32(0xffffffff)) { in i40e_parse_cls_flower()
8665 dev_err(&pf->pdev->dev, "Bad ip src mask %pI4b\n", in i40e_parse_cls_flower()
8666 &match.mask->src); in i40e_parse_cls_flower()
8667 return -EIO; in i40e_parse_cls_flower()
8672 dev_err(&pf->pdev->dev, "Tenant id not allowed for ip filter\n"); in i40e_parse_cls_flower()
8673 return -EIO; in i40e_parse_cls_flower()
8675 filter->dst_ipv4 = match.key->dst; in i40e_parse_cls_flower()
8676 filter->src_ipv4 = match.key->src; in i40e_parse_cls_flower()
8687 if (ipv6_addr_loopback(&match.key->dst) || in i40e_parse_cls_flower()
8688 ipv6_addr_loopback(&match.key->src)) { in i40e_parse_cls_flower()
8689 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8691 return -EIO; in i40e_parse_cls_flower()
8693 if (!ipv6_addr_any(&match.mask->dst) || in i40e_parse_cls_flower()
8694 !ipv6_addr_any(&match.mask->src)) in i40e_parse_cls_flower()
8697 memcpy(&filter->src_ipv6, &match.key->src.s6_addr32, in i40e_parse_cls_flower()
8698 sizeof(filter->src_ipv6)); in i40e_parse_cls_flower()
8699 memcpy(&filter->dst_ipv6, &match.key->dst.s6_addr32, in i40e_parse_cls_flower()
8700 sizeof(filter->dst_ipv6)); in i40e_parse_cls_flower()
8707 if (match.mask->src) { in i40e_parse_cls_flower()
8708 if (match.mask->src == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8711 dev_err(&pf->pdev->dev, "Bad src port mask 0x%04x\n", in i40e_parse_cls_flower()
8712 be16_to_cpu(match.mask->src)); in i40e_parse_cls_flower()
8713 return -EIO; in i40e_parse_cls_flower()
8717 if (match.mask->dst) { in i40e_parse_cls_flower()
8718 if (match.mask->dst == cpu_to_be16(0xffff)) { in i40e_parse_cls_flower()
8721 dev_err(&pf->pdev->dev, "Bad dst port mask 0x%04x\n", in i40e_parse_cls_flower()
8722 be16_to_cpu(match.mask->dst)); in i40e_parse_cls_flower()
8723 return -EIO; in i40e_parse_cls_flower()
8727 filter->dst_port = match.key->dst; in i40e_parse_cls_flower()
8728 filter->src_port = match.key->src; in i40e_parse_cls_flower()
8730 switch (filter->ip_proto) { in i40e_parse_cls_flower()
8735 dev_err(&pf->pdev->dev, in i40e_parse_cls_flower()
8737 return -EINVAL; in i40e_parse_cls_flower()
8740 filter->flags = field_flags; in i40e_parse_cls_flower()
8758 filter->seid = vsi->seid; in i40e_handle_tclass()
8760 } else if (vsi->tc_config.enabled_tc & BIT(tc)) { in i40e_handle_tclass()
8761 if (!filter->dst_port) { in i40e_handle_tclass()
8762 dev_err(&vsi->back->pdev->dev, in i40e_handle_tclass()
8764 return -EINVAL; in i40e_handle_tclass()
8766 if (list_empty(&vsi->ch_list)) in i40e_handle_tclass()
8767 return -EINVAL; in i40e_handle_tclass()
8768 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, in i40e_handle_tclass()
8770 if (ch->seid == vsi->tc_seid_map[tc]) in i40e_handle_tclass()
8771 filter->seid = ch->seid; in i40e_handle_tclass()
8775 dev_err(&vsi->back->pdev->dev, "TC is not enabled\n"); in i40e_handle_tclass()
8776 return -EINVAL; in i40e_handle_tclass()
8780 * i40e_configure_clsflower - Configure tc flower filters
8788 int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid); in i40e_configure_clsflower()
8790 struct i40e_pf *pf = vsi->back; in i40e_configure_clsflower()
8794 dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n"); in i40e_configure_clsflower()
8795 return -EOPNOTSUPP; in i40e_configure_clsflower()
8799 dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); in i40e_configure_clsflower()
8800 return -EINVAL; in i40e_configure_clsflower()
8803 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_configure_clsflower()
8804 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_configure_clsflower()
8805 return -EBUSY; in i40e_configure_clsflower()
8807 if (pf->fdir_pf_active_filters || in i40e_configure_clsflower()
8808 (!hlist_empty(&pf->fdir_filter_list))) { in i40e_configure_clsflower()
8809 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8811 return -EINVAL; in i40e_configure_clsflower()
8814 if (test_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags)) { in i40e_configure_clsflower()
8815 dev_err(&vsi->back->pdev->dev, in i40e_configure_clsflower()
8816 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n"); in i40e_configure_clsflower()
8817 clear_bit(I40E_FLAG_FD_SB_ENA, vsi->back->flags); in i40e_configure_clsflower()
8818 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, vsi->back->flags); in i40e_configure_clsflower()
8823 return -ENOMEM; in i40e_configure_clsflower()
8825 filter->cookie = cls_flower->cookie; in i40e_configure_clsflower()
8836 if (filter->dst_port) in i40e_configure_clsflower()
8842 dev_err(&pf->pdev->dev, "Failed to add cloud filter, err %d\n", in i40e_configure_clsflower()
8848 INIT_HLIST_NODE(&filter->cloud_node); in i40e_configure_clsflower()
8850 hlist_add_head(&filter->cloud_node, &pf->cloud_filter_list); in i40e_configure_clsflower()
8852 pf->num_cloud_filters++; in i40e_configure_clsflower()
8861 * i40e_find_cloud_filter - Find the could filter in the list
8873 &vsi->back->cloud_filter_list, cloud_node) in i40e_find_cloud_filter()
8874 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie))) in i40e_find_cloud_filter()
8880 * i40e_delete_clsflower - Remove tc flower filters
8889 struct i40e_pf *pf = vsi->back; in i40e_delete_clsflower()
8892 filter = i40e_find_cloud_filter(vsi, &cls_flower->cookie); in i40e_delete_clsflower()
8895 return -EINVAL; in i40e_delete_clsflower()
8897 hash_del(&filter->cloud_node); in i40e_delete_clsflower()
8899 if (filter->dst_port) in i40e_delete_clsflower()
8906 dev_err(&pf->pdev->dev, in i40e_delete_clsflower()
8909 return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status); in i40e_delete_clsflower()
8912 pf->num_cloud_filters--; in i40e_delete_clsflower()
8913 if (!pf->num_cloud_filters) in i40e_delete_clsflower()
8914 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && in i40e_delete_clsflower()
8915 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { in i40e_delete_clsflower()
8916 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_delete_clsflower()
8917 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); in i40e_delete_clsflower()
8918 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_delete_clsflower()
8924 * i40e_setup_tc_cls_flower - flower classifier offloads
8931 struct i40e_vsi *vsi = np->vsi; in i40e_setup_tc_cls_flower()
8933 switch (cls_flower->command) { in i40e_setup_tc_cls_flower()
8939 return -EOPNOTSUPP; in i40e_setup_tc_cls_flower()
8941 return -EOPNOTSUPP; in i40e_setup_tc_cls_flower()
8950 if (!tc_cls_can_offload_and_chain0(np->vsi->netdev, type_data)) in i40e_setup_tc_block_cb()
8951 return -EOPNOTSUPP; in i40e_setup_tc_block_cb()
8958 return -EOPNOTSUPP; in i40e_setup_tc_block_cb()
8978 return -EOPNOTSUPP; in __i40e_setup_tc()
8983 * i40e_open - Called when a network interface is made active
8997 struct i40e_vsi *vsi = np->vsi; in i40e_open()
8998 struct i40e_pf *pf = vsi->back; in i40e_open()
9002 if (test_bit(__I40E_TESTING, pf->state) || in i40e_open()
9003 test_bit(__I40E_BAD_EEPROM, pf->state)) in i40e_open()
9004 return -EBUSY; in i40e_open()
9009 return -EAGAIN; in i40e_open()
9016 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
9018 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | in i40e_open()
9021 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); in i40e_open()
9028 * i40e_netif_set_realnum_tx_rx_queues - Update number of tx/rx queues
9039 ret = netif_set_real_num_rx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9040 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9044 return netif_set_real_num_tx_queues(vsi->netdev, in i40e_netif_set_realnum_tx_rx_queues()
9045 vsi->num_queue_pairs); in i40e_netif_set_realnum_tx_rx_queues()
9049 * i40e_vsi_open -
9060 struct i40e_pf *pf = vsi->back; in i40e_vsi_open()
9076 if (vsi->netdev) { in i40e_vsi_open()
9077 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", in i40e_vsi_open()
9078 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); in i40e_vsi_open()
9088 } else if (vsi->type == I40E_VSI_FDIR) { in i40e_vsi_open()
9089 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir", in i40e_vsi_open()
9090 dev_driver_string(&pf->pdev->dev), in i40e_vsi_open()
9091 dev_name(&pf->pdev->dev)); in i40e_vsi_open()
9097 err = -EINVAL; in i40e_vsi_open()
9115 if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_open()
9122 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
9135 &pf->fdir_filter_list, fdir_node) { in i40e_fdir_filter_exit()
9136 hlist_del(&filter->fdir_node); in i40e_fdir_filter_exit()
9140 list_for_each_entry_safe(pit_entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_fdir_filter_exit()
9141 list_del(&pit_entry->list); in i40e_fdir_filter_exit()
9144 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_fdir_filter_exit()
9146 list_for_each_entry_safe(pit_entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_fdir_filter_exit()
9147 list_del(&pit_entry->list); in i40e_fdir_filter_exit()
9150 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_fdir_filter_exit()
9152 pf->fdir_pf_active_filters = 0; in i40e_fdir_filter_exit()
9201 * i40e_cloud_filter_exit - Cleans up the cloud filters
9213 &pf->cloud_filter_list, cloud_node) { in i40e_cloud_filter_exit()
9214 hlist_del(&cfilter->cloud_node); in i40e_cloud_filter_exit()
9217 pf->num_cloud_filters = 0; in i40e_cloud_filter_exit()
9219 if (test_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags) && in i40e_cloud_filter_exit()
9220 !test_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags)) { in i40e_cloud_filter_exit()
9221 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_cloud_filter_exit()
9222 clear_bit(I40E_FLAG_FD_SB_TO_CLOUD_FILTER, pf->flags); in i40e_cloud_filter_exit()
9223 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_cloud_filter_exit()
9228 * i40e_close - Disables a network interface
9231 * The close entry point is called when an interface is de-activated
9240 struct i40e_vsi *vsi = np->vsi; in i40e_close()
9248 * i40e_do_reset - Start a PF or Core Reset sequence
9273 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); in i40e_do_reset()
9274 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9276 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9284 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); in i40e_do_reset()
9285 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_do_reset()
9287 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); in i40e_do_reset()
9288 i40e_flush(&pf->hw); in i40e_do_reset()
9294 * Resets only the PF-specific registers in i40e_do_reset()
9296 * This goes directly to the tear-down and rebuild of in i40e_do_reset()
9300 dev_dbg(&pf->pdev->dev, "PFR requested\n"); in i40e_do_reset()
9310 dev_info(&pf->pdev->dev, in i40e_do_reset()
9311 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? in i40e_do_reset()
9318 /* Find the VSI(s) that requested a re-init */ in i40e_do_reset()
9319 dev_info(&pf->pdev->dev, in i40e_do_reset()
9321 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9322 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9326 vsi->state)) in i40e_do_reset()
9327 i40e_vsi_reinit_locked(pf->vsi[v]); in i40e_do_reset()
9333 dev_info(&pf->pdev->dev, "VSI down requested\n"); in i40e_do_reset()
9334 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_do_reset()
9335 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_do_reset()
9339 vsi->state)) { in i40e_do_reset()
9340 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_do_reset()
9345 dev_info(&pf->pdev->dev, in i40e_do_reset()
9352 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
9364 if (memcmp(&new_cfg->etscfg, in i40e_dcb_need_reconfig()
9365 &old_cfg->etscfg, in i40e_dcb_need_reconfig()
9366 sizeof(new_cfg->etscfg))) { in i40e_dcb_need_reconfig()
9368 if (memcmp(&new_cfg->etscfg.prioritytable, in i40e_dcb_need_reconfig()
9369 &old_cfg->etscfg.prioritytable, in i40e_dcb_need_reconfig()
9370 sizeof(new_cfg->etscfg.prioritytable))) { in i40e_dcb_need_reconfig()
9372 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); in i40e_dcb_need_reconfig()
9375 if (memcmp(&new_cfg->etscfg.tcbwtable, in i40e_dcb_need_reconfig()
9376 &old_cfg->etscfg.tcbwtable, in i40e_dcb_need_reconfig()
9377 sizeof(new_cfg->etscfg.tcbwtable))) in i40e_dcb_need_reconfig()
9378 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); in i40e_dcb_need_reconfig()
9380 if (memcmp(&new_cfg->etscfg.tsatable, in i40e_dcb_need_reconfig()
9381 &old_cfg->etscfg.tsatable, in i40e_dcb_need_reconfig()
9382 sizeof(new_cfg->etscfg.tsatable))) in i40e_dcb_need_reconfig()
9383 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); in i40e_dcb_need_reconfig()
9387 if (memcmp(&new_cfg->pfc, in i40e_dcb_need_reconfig()
9388 &old_cfg->pfc, in i40e_dcb_need_reconfig()
9389 sizeof(new_cfg->pfc))) { in i40e_dcb_need_reconfig()
9391 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); in i40e_dcb_need_reconfig()
9395 if (memcmp(&new_cfg->app, in i40e_dcb_need_reconfig()
9396 &old_cfg->app, in i40e_dcb_need_reconfig()
9397 sizeof(new_cfg->app))) { in i40e_dcb_need_reconfig()
9399 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); in i40e_dcb_need_reconfig()
9402 dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig); in i40e_dcb_need_reconfig()
9407 * i40e_handle_lldp_event - Handle LLDP Change MIB event
9415 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw; in i40e_handle_lldp_event()
9416 struct i40e_hw *hw = &pf->hw; in i40e_handle_lldp_event()
9422 /* X710-T*L 2.5G and 5G speeds don't support DCB */ in i40e_handle_lldp_event()
9423 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_handle_lldp_event()
9424 (hw->phy.link_info.link_speed & in i40e_handle_lldp_event()
9426 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) in i40e_handle_lldp_event()
9428 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_handle_lldp_event()
9431 if (!test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) in i40e_handle_lldp_event()
9435 type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) in i40e_handle_lldp_event()
9437 dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type); in i40e_handle_lldp_event()
9442 type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK; in i40e_handle_lldp_event()
9443 dev_dbg(&pf->pdev->dev, in i40e_handle_lldp_event()
9449 &hw->remote_dcbx_config); in i40e_handle_lldp_event()
9454 tmp_dcbx_cfg = hw->local_dcbx_config; in i40e_handle_lldp_event()
9457 memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config)); in i40e_handle_lldp_event()
9459 ret = i40e_get_dcb_config(&pf->hw); in i40e_handle_lldp_event()
9461 /* X710-T*L 2.5G and 5G speeds don't support DCB */ in i40e_handle_lldp_event()
9462 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_handle_lldp_event()
9463 (hw->phy.link_info.link_speed & in i40e_handle_lldp_event()
9465 dev_warn(&pf->pdev->dev, in i40e_handle_lldp_event()
9466 "DCB is not supported for X710-T*L 2.5/5G speeds\n"); in i40e_handle_lldp_event()
9467 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_handle_lldp_event()
9469 dev_info(&pf->pdev->dev, in i40e_handle_lldp_event()
9472 i40e_aq_str(&pf->hw, in i40e_handle_lldp_event()
9473 pf->hw.aq.asq_last_status)); in i40e_handle_lldp_event()
9479 if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config, in i40e_handle_lldp_event()
9481 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); in i40e_handle_lldp_event()
9486 &hw->local_dcbx_config); in i40e_handle_lldp_event()
9488 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); in i40e_handle_lldp_event()
9494 if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1) in i40e_handle_lldp_event()
9495 set_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_handle_lldp_event()
9497 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_handle_lldp_event()
9499 set_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9508 clear_bit(__I40E_PORT_SUSPENDED, pf->state); in i40e_handle_lldp_event()
9513 /* Wait for the PF's queues to be disabled */ in i40e_handle_lldp_event()
9517 set_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_handle_lldp_event()
9521 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); in i40e_handle_lldp_event()
9522 set_bit(__I40E_CLIENT_L2_CHANGE, pf->state); in i40e_handle_lldp_event()
9531 * i40e_do_reset_safe - Protected reset path for userland calls.
9544 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
9555 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw; in i40e_handle_lan_overflow_event()
9556 u32 queue = le32_to_cpu(data->prtdcb_rupto); in i40e_handle_lan_overflow_event()
9557 u32 qtx_ctl = le32_to_cpu(data->otx_ctl); in i40e_handle_lan_overflow_event()
9558 struct i40e_hw *hw = &pf->hw; in i40e_handle_lan_overflow_event()
9562 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", in i40e_handle_lan_overflow_event()
9571 vf_id -= hw->func_caps.vf_base_id; in i40e_handle_lan_overflow_event()
9572 vf = &pf->vf[vf_id]; in i40e_handle_lan_overflow_event()
9580 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
9587 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_cur_guaranteed_fd_count()
9593 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
9600 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); in i40e_get_current_fd_count()
9607 * i40e_get_global_fd_count - Get total FD filters programmed on device
9614 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); in i40e_get_global_fd_count()
9621 * i40e_reenable_fdir_sb - Restore FDir SB capability
9626 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_reenable_fdir_sb()
9627 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_reenable_fdir_sb()
9628 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_sb()
9629 …dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now… in i40e_reenable_fdir_sb()
9633 * i40e_reenable_fdir_atr - Restore FDir ATR capability
9638 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) { in i40e_reenable_fdir_atr()
9648 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_reenable_fdir_atr()
9649 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_reenable_fdir_atr()
9650 …dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table and there are no c… in i40e_reenable_fdir_atr()
9655 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
9663 pf->fdir_pf_active_filters--; in i40e_delete_invalid_filter()
9664 pf->fd_inv = 0; in i40e_delete_invalid_filter()
9666 switch (filter->flow_type) { in i40e_delete_invalid_filter()
9668 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9671 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9674 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9677 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9680 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9683 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9686 switch (filter->ipl4_proto) { in i40e_delete_invalid_filter()
9688 pf->fd_tcp4_filter_cnt--; in i40e_delete_invalid_filter()
9691 pf->fd_udp4_filter_cnt--; in i40e_delete_invalid_filter()
9694 pf->fd_sctp4_filter_cnt--; in i40e_delete_invalid_filter()
9697 pf->fd_ip4_filter_cnt--; in i40e_delete_invalid_filter()
9702 switch (filter->ipl4_proto) { in i40e_delete_invalid_filter()
9704 pf->fd_tcp6_filter_cnt--; in i40e_delete_invalid_filter()
9707 pf->fd_udp6_filter_cnt--; in i40e_delete_invalid_filter()
9710 pf->fd_sctp6_filter_cnt--; in i40e_delete_invalid_filter()
9713 pf->fd_ip6_filter_cnt--; in i40e_delete_invalid_filter()
9720 hlist_del(&filter->fdir_node); in i40e_delete_invalid_filter()
9725 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
9734 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_check_and_reenable()
9737 /* Check if we have enough room to re-enable FDir SB capability. */ in i40e_fdir_check_and_reenable()
9739 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fdir_check_and_reenable()
9740 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) || in i40e_fdir_check_and_reenable()
9741 (pf->fd_add_err == 0) || in i40e_fdir_check_and_reenable()
9742 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) in i40e_fdir_check_and_reenable()
9745 /* We should wait for even more space before re-enabling ATR. in i40e_fdir_check_and_reenable()
9749 if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) && in i40e_fdir_check_and_reenable()
9750 pf->fd_tcp4_filter_cnt == 0 && pf->fd_tcp6_filter_cnt == 0) in i40e_fdir_check_and_reenable()
9754 if (pf->fd_inv > 0) { in i40e_fdir_check_and_reenable()
9756 &pf->fdir_filter_list, fdir_node) in i40e_fdir_check_and_reenable()
9757 if (filter->fd_id == pf->fd_inv) in i40e_fdir_check_and_reenable()
9765 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
9776 if (!time_after(jiffies, pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9781 * should not re-enable ATR for some time. in i40e_fdir_flush_and_replay()
9783 min_flush_time = pf->fd_flush_timestamp + in i40e_fdir_flush_and_replay()
9785 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; in i40e_fdir_flush_and_replay()
9789 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9790 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); in i40e_fdir_flush_and_replay()
9794 pf->fd_flush_timestamp = jiffies; in i40e_fdir_flush_and_replay()
9795 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9797 wr32(&pf->hw, I40E_PFQF_CTL_1, in i40e_fdir_flush_and_replay()
9799 i40e_flush(&pf->hw); in i40e_fdir_flush_and_replay()
9800 pf->fd_flush_cnt++; in i40e_fdir_flush_and_replay()
9801 pf->fd_add_err = 0; in i40e_fdir_flush_and_replay()
9803 /* Check FD flush status every 5-6msec */ in i40e_fdir_flush_and_replay()
9805 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); in i40e_fdir_flush_and_replay()
9808 } while (flush_wait_retry--); in i40e_fdir_flush_and_replay()
9810 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); in i40e_fdir_flush_and_replay()
9813 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); in i40e_fdir_flush_and_replay()
9814 if (!disable_atr && !pf->fd_tcp4_filter_cnt) in i40e_fdir_flush_and_replay()
9815 clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fdir_flush_and_replay()
9816 clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fdir_flush_and_replay()
9817 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fdir_flush_and_replay()
9818 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); in i40e_fdir_flush_and_replay()
9823 * i40e_get_current_atr_cnt - Get the count of total FD ATR filters programmed
9828 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; in i40e_get_current_atr_cnt()
9832 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
9839 if (test_bit(__I40E_DOWN, pf->state)) in i40e_fdir_reinit_subtask()
9842 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fdir_reinit_subtask()
9850 * i40e_vsi_link_event - notify VSI of a link event
9856 if (!vsi || test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_vsi_link_event()
9859 switch (vsi->type) { in i40e_vsi_link_event()
9861 if (!vsi->netdev || !vsi->netdev_registered) in i40e_vsi_link_event()
9865 netif_carrier_on(vsi->netdev); in i40e_vsi_link_event()
9866 netif_tx_wake_all_queues(vsi->netdev); in i40e_vsi_link_event()
9868 netif_carrier_off(vsi->netdev); in i40e_vsi_link_event()
9869 netif_tx_stop_all_queues(vsi->netdev); in i40e_vsi_link_event()
9885 * i40e_veb_link_event - notify elements on the veb of a link event
9894 if (!veb || !veb->pf) in i40e_veb_link_event()
9896 pf = veb->pf; in i40e_veb_link_event()
9900 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9901 i40e_veb_link_event(pf->veb[i], link_up); in i40e_veb_link_event()
9904 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_veb_link_event()
9905 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) in i40e_veb_link_event()
9906 i40e_vsi_link_event(pf->vsi[i], link_up); in i40e_veb_link_event()
9910 * i40e_link_event - Update netif_carrier status
9915 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_link_event()
9924 pf->hw.phy.get_link_info = true; in i40e_link_event()
9925 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); in i40e_link_event()
9926 status = i40e_get_link_status(&pf->hw, &new_link); in i40e_link_event()
9930 clear_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9935 set_bit(__I40E_TEMP_LINK_POLLING, pf->state); in i40e_link_event()
9936 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n", in i40e_link_event()
9941 old_link_speed = pf->hw.phy.link_info_old.link_speed; in i40e_link_event()
9942 new_link_speed = pf->hw.phy.link_info.link_speed; in i40e_link_event()
9946 (test_bit(__I40E_VSI_DOWN, vsi->state) || in i40e_link_event()
9947 new_link == netif_carrier_ok(vsi->netdev))) in i40e_link_event()
9955 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_link_event()
9956 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); in i40e_link_event()
9960 if (pf->vf) in i40e_link_event()
9963 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_link_event()
9969 if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) in i40e_link_event()
9976 dev_dbg(&pf->pdev->dev, "Reconfig DCB to single TC as result of Link Down\n"); in i40e_link_event()
9977 memset(&pf->tmp_cfg, 0, sizeof(pf->tmp_cfg)); in i40e_link_event()
9980 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_link_event()
9981 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_link_event()
9983 pf->dcbx_cap = DCB_CAP_DCBX_HOST | in i40e_link_event()
9985 set_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_link_event()
9986 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_link_event()
9993 * i40e_watchdog_subtask - periodic checks not using event driven response
10001 if (test_bit(__I40E_DOWN, pf->state) || in i40e_watchdog_subtask()
10002 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_watchdog_subtask()
10006 if (time_before(jiffies, (pf->service_timer_previous + in i40e_watchdog_subtask()
10007 pf->service_timer_period))) in i40e_watchdog_subtask()
10009 pf->service_timer_previous = jiffies; in i40e_watchdog_subtask()
10011 if (test_bit(I40E_FLAG_LINK_POLLING_ENA, pf->flags) || in i40e_watchdog_subtask()
10012 test_bit(__I40E_TEMP_LINK_POLLING, pf->state)) in i40e_watchdog_subtask()
10018 for (i = 0; i < pf->num_alloc_vsi; i++) in i40e_watchdog_subtask()
10019 if (pf->vsi[i] && pf->vsi[i]->netdev) in i40e_watchdog_subtask()
10020 i40e_update_stats(pf->vsi[i]); in i40e_watchdog_subtask()
10022 if (test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)) { in i40e_watchdog_subtask()
10025 if (pf->veb[i]) in i40e_watchdog_subtask()
10026 i40e_update_veb_stats(pf->veb[i]); in i40e_watchdog_subtask()
10034 * i40e_reset_subtask - Set up for resetting the device and driver
10041 if (test_bit(__I40E_REINIT_REQUESTED, pf->state)) { in i40e_reset_subtask()
10043 clear_bit(__I40E_REINIT_REQUESTED, pf->state); in i40e_reset_subtask()
10045 if (test_bit(__I40E_PF_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10047 clear_bit(__I40E_PF_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10049 if (test_bit(__I40E_CORE_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10051 clear_bit(__I40E_CORE_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10053 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state)) { in i40e_reset_subtask()
10055 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); in i40e_reset_subtask()
10057 if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { in i40e_reset_subtask()
10059 clear_bit(__I40E_DOWN_REQUESTED, pf->state); in i40e_reset_subtask()
10065 if (test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_reset_subtask()
10073 !test_bit(__I40E_DOWN, pf->state) && in i40e_reset_subtask()
10074 !test_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_reset_subtask()
10080 * i40e_handle_link_event - Handle link event
10088 (struct i40e_aqc_get_link_status *)&e->desc.params.raw; in i40e_handle_link_event()
10090 /* Do a new status request to re-enable LSE reporting in i40e_handle_link_event()
10099 if (status->phy_type == I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP) { in i40e_handle_link_event()
10100 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10102 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10108 if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) && in i40e_handle_link_event()
10109 (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) && in i40e_handle_link_event()
10110 (!(status->link_info & I40E_AQ_LINK_UP)) && in i40e_handle_link_event()
10111 (!test_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))) { in i40e_handle_link_event()
10112 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10114 dev_err(&pf->pdev->dev, in i40e_handle_link_event()
10121 * i40e_clean_adminq_subtask - Clean the AdminQ rings
10127 struct i40e_hw *hw = &pf->hw; in i40e_clean_adminq_subtask()
10135 if (test_bit(__I40E_RESET_FAILED, pf->state)) in i40e_clean_adminq_subtask()
10139 val = rd32(&pf->hw, I40E_PF_ARQLEN); in i40e_clean_adminq_subtask()
10142 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10143 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10147 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10148 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10150 pf->arq_overflows++; in i40e_clean_adminq_subtask()
10153 if (hw->debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10154 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10158 wr32(&pf->hw, I40E_PF_ARQLEN, val); in i40e_clean_adminq_subtask()
10160 val = rd32(&pf->hw, I40E_PF_ATQLEN); in i40e_clean_adminq_subtask()
10163 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10164 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); in i40e_clean_adminq_subtask()
10168 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10169 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); in i40e_clean_adminq_subtask()
10173 if (pf->hw.debug_mask & I40E_DEBUG_AQ) in i40e_clean_adminq_subtask()
10174 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); in i40e_clean_adminq_subtask()
10178 wr32(&pf->hw, I40E_PF_ATQLEN, val); in i40e_clean_adminq_subtask()
10187 if (ret == -EALREADY) in i40e_clean_adminq_subtask()
10190 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); in i40e_clean_adminq_subtask()
10211 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); in i40e_clean_adminq_subtask()
10219 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); in i40e_clean_adminq_subtask()
10223 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); in i40e_clean_adminq_subtask()
10228 i40e_debug(&pf->hw, I40E_DEBUG_NVM, in i40e_clean_adminq_subtask()
10233 dev_info(&pf->pdev->dev, in i40e_clean_adminq_subtask()
10241 clear_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state); in i40e_clean_adminq_subtask()
10243 /* re-enable Admin queue interrupt cause */ in i40e_clean_adminq_subtask()
10253 * i40e_verify_eeprom - make sure eeprom is good to use
10260 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10263 err = i40e_diag_eeprom_test(&pf->hw); in i40e_verify_eeprom()
10265 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", in i40e_verify_eeprom()
10267 set_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10271 if (!err && test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_verify_eeprom()
10272 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); in i40e_verify_eeprom()
10273 clear_bit(__I40E_BAD_EEPROM, pf->state); in i40e_verify_eeprom()
10281 * enable switch loop back or die - no point in a return value
10285 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_enable_pf_switch_lb()
10289 ctxt.seid = pf->main_vsi_seid; in i40e_enable_pf_switch_lb()
10290 ctxt.pf_num = pf->hw.pf_id; in i40e_enable_pf_switch_lb()
10292 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10294 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10297 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10304 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_enable_pf_switch_lb()
10306 dev_info(&pf->pdev->dev, in i40e_enable_pf_switch_lb()
10309 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_enable_pf_switch_lb()
10317 * disable switch loop back or die - no point in a return value
10321 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_disable_pf_switch_lb()
10325 ctxt.seid = pf->main_vsi_seid; in i40e_disable_pf_switch_lb()
10326 ctxt.pf_num = pf->hw.pf_id; in i40e_disable_pf_switch_lb()
10328 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10330 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10333 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10340 ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL); in i40e_disable_pf_switch_lb()
10342 dev_info(&pf->pdev->dev, in i40e_disable_pf_switch_lb()
10345 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_disable_pf_switch_lb()
10350 * i40e_config_bridge_mode - Configure the HW bridge mode
10359 struct i40e_pf *pf = veb->pf; in i40e_config_bridge_mode()
10361 if (pf->hw.debug_mask & I40E_DEBUG_LAN) in i40e_config_bridge_mode()
10362 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", in i40e_config_bridge_mode()
10363 veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); in i40e_config_bridge_mode()
10364 if (veb->bridge_mode & BRIDGE_MODE_VEPA) in i40e_config_bridge_mode()
10371 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
10382 struct i40e_pf *pf = veb->pf; in i40e_reconstitute_veb()
10387 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { in i40e_reconstitute_veb()
10388 if (pf->vsi[v] && in i40e_reconstitute_veb()
10389 pf->vsi[v]->veb_idx == veb->idx && in i40e_reconstitute_veb()
10390 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_reconstitute_veb()
10391 ctl_vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10396 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10397 "missing owner VSI for veb_idx %d\n", veb->idx); in i40e_reconstitute_veb()
10398 ret = -ENOENT; in i40e_reconstitute_veb()
10401 if (ctl_vsi != pf->vsi[pf->lan_vsi]) in i40e_reconstitute_veb()
10402 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_reconstitute_veb()
10405 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10407 veb->idx, ret); in i40e_reconstitute_veb()
10417 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) in i40e_reconstitute_veb()
10418 veb->bridge_mode = BRIDGE_MODE_VEB; in i40e_reconstitute_veb()
10420 veb->bridge_mode = BRIDGE_MODE_VEPA; in i40e_reconstitute_veb()
10424 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_reconstitute_veb()
10425 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) in i40e_reconstitute_veb()
10428 if (pf->vsi[v]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10429 struct i40e_vsi *vsi = pf->vsi[v]; in i40e_reconstitute_veb()
10431 vsi->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10434 dev_info(&pf->pdev->dev, in i40e_reconstitute_veb()
10443 /* create any VEBs attached to this VEB - RECURSION */ in i40e_reconstitute_veb()
10445 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { in i40e_reconstitute_veb()
10446 pf->veb[veb_idx]->uplink_seid = veb->seid; in i40e_reconstitute_veb()
10447 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); in i40e_reconstitute_veb()
10458 * i40e_get_capabilities - get info about the HW
10474 return -ENOMEM; in i40e_get_capabilities()
10477 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, in i40e_get_capabilities()
10483 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { in i40e_get_capabilities()
10486 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { in i40e_get_capabilities()
10487 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10490 i40e_aq_str(&pf->hw, in i40e_get_capabilities()
10491 pf->hw.aq.asq_last_status)); in i40e_get_capabilities()
10492 return -ENODEV; in i40e_get_capabilities()
10496 if (pf->hw.debug_mask & I40E_DEBUG_USER) { in i40e_get_capabilities()
10498 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10500 pf->hw.pf_id, pf->hw.func_caps.num_vfs, in i40e_get_capabilities()
10501 pf->hw.func_caps.num_msix_vectors, in i40e_get_capabilities()
10502 pf->hw.func_caps.num_msix_vectors_vf, in i40e_get_capabilities()
10503 pf->hw.func_caps.fd_filters_guaranteed, in i40e_get_capabilities()
10504 pf->hw.func_caps.fd_filters_best_effort, in i40e_get_capabilities()
10505 pf->hw.func_caps.num_tx_qp, in i40e_get_capabilities()
10506 pf->hw.func_caps.num_vsis); in i40e_get_capabilities()
10508 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10510 pf->hw.dev_caps.switch_mode, in i40e_get_capabilities()
10511 pf->hw.dev_caps.valid_functions); in i40e_get_capabilities()
10512 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10513 "SR-IOV=%d, num_vfs for all function=%u\n", in i40e_get_capabilities()
10514 pf->hw.dev_caps.sr_iov_1_1, in i40e_get_capabilities()
10515 pf->hw.dev_caps.num_vfs); in i40e_get_capabilities()
10516 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10518 pf->hw.dev_caps.num_vsis, in i40e_get_capabilities()
10519 pf->hw.dev_caps.num_rx_qp, in i40e_get_capabilities()
10520 pf->hw.dev_caps.num_tx_qp); in i40e_get_capabilities()
10524 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ in i40e_get_capabilities()
10525 + pf->hw.func_caps.num_vfs) in i40e_get_capabilities()
10526 if (pf->hw.revision_id == 0 && in i40e_get_capabilities()
10527 pf->hw.func_caps.num_vsis < DEF_NUM_VSI) { in i40e_get_capabilities()
10528 dev_info(&pf->pdev->dev, in i40e_get_capabilities()
10530 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); in i40e_get_capabilities()
10531 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; in i40e_get_capabilities()
10540 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
10550 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { in i40e_fdir_sb_setup()
10559 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); in i40e_fdir_sb_setup()
10562 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_fdir_sb_setup()
10571 pf->vsi[pf->lan_vsi]->seid, 0); in i40e_fdir_sb_setup()
10573 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); in i40e_fdir_sb_setup()
10574 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_fdir_sb_setup()
10575 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_fdir_sb_setup()
10584 * i40e_fdir_teardown - release the Flow Director resources
10598 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
10608 struct i40e_pf *pf = vsi->back; in i40e_rebuild_cloud_filters()
10613 hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list, in i40e_rebuild_cloud_filters()
10615 if (cfilter->seid != seid) in i40e_rebuild_cloud_filters()
10618 if (cfilter->dst_port) in i40e_rebuild_cloud_filters()
10625 dev_dbg(&pf->pdev->dev, in i40e_rebuild_cloud_filters()
10628 i40e_aq_str(&pf->hw, in i40e_rebuild_cloud_filters()
10629 pf->hw.aq.asq_last_status)); in i40e_rebuild_cloud_filters()
10637 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
10647 if (list_empty(&vsi->ch_list)) in i40e_rebuild_channels()
10650 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in i40e_rebuild_channels()
10651 if (!ch->initialized) in i40e_rebuild_channels()
10654 ret = i40e_add_channel(vsi->back, vsi->uplink_seid, ch); in i40e_rebuild_channels()
10656 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10658 vsi->uplink_seid); in i40e_rebuild_channels()
10662 ret = i40e_channel_config_tx_ring(vsi->back, vsi, ch); in i40e_rebuild_channels()
10664 dev_info(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10666 ch->seid); in i40e_rebuild_channels()
10670 vsi->next_base_queue = vsi->next_base_queue + in i40e_rebuild_channels()
10671 ch->num_queue_pairs; in i40e_rebuild_channels()
10672 if (ch->max_tx_rate) { in i40e_rebuild_channels()
10673 u64 credits = ch->max_tx_rate; in i40e_rebuild_channels()
10675 if (i40e_set_bw_limit(vsi, ch->seid, in i40e_rebuild_channels()
10676 ch->max_tx_rate)) in i40e_rebuild_channels()
10677 return -EINVAL; in i40e_rebuild_channels()
10680 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10681 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_rebuild_channels()
10682 ch->max_tx_rate, in i40e_rebuild_channels()
10684 ch->seid); in i40e_rebuild_channels()
10686 ret = i40e_rebuild_cloud_filters(vsi, ch->seid); in i40e_rebuild_channels()
10688 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild_channels()
10690 ch->seid); in i40e_rebuild_channels()
10698 * i40e_clean_xps_state - clean xps state for every tx_ring
10705 if (vsi->tx_rings) in i40e_clean_xps_state()
10706 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_clean_xps_state()
10707 if (vsi->tx_rings[i]) in i40e_clean_xps_state()
10709 vsi->tx_rings[i]->state); in i40e_clean_xps_state()
10713 * i40e_prep_for_reset - prep for the core to reset
10720 struct i40e_hw *hw = &pf->hw; in i40e_prep_for_reset()
10724 clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state); in i40e_prep_for_reset()
10725 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_prep_for_reset()
10727 if (i40e_check_asq_alive(&pf->hw)) in i40e_prep_for_reset()
10730 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); in i40e_prep_for_reset()
10735 for (v = 0; v < pf->num_alloc_vsi; v++) { in i40e_prep_for_reset()
10736 if (pf->vsi[v]) { in i40e_prep_for_reset()
10737 i40e_clean_xps_state(pf->vsi[v]); in i40e_prep_for_reset()
10738 pf->vsi[v]->seid = 0; in i40e_prep_for_reset()
10742 i40e_shutdown_adminq(&pf->hw); in i40e_prep_for_reset()
10745 if (hw->hmc.hmc_obj) { in i40e_prep_for_reset()
10748 dev_warn(&pf->pdev->dev, in i40e_prep_for_reset()
10759 * i40e_send_version - update firmware with driver version
10771 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); in i40e_send_version()
10775 * i40e_get_oem_version - get OEM specific version information
10815 hw->nvm.oem_ver = in i40e_get_oem_version()
10818 hw->nvm.eetrack = I40E_OEM_EETRACK_ID; in i40e_get_oem_version()
10822 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
10827 struct i40e_hw *hw = &pf->hw; in i40e_reset()
10832 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); in i40e_reset()
10833 set_bit(__I40E_RESET_FAILED, pf->state); in i40e_reset()
10834 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_reset()
10836 pf->pfr_count++; in i40e_reset()
10842 * i40e_rebuild - rebuild using a saved config
10844 * @reinit: if the Main VSI needs to re-initialized.
10851 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_rebuild()
10852 struct i40e_hw *hw = &pf->hw; in i40e_rebuild()
10857 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && in i40e_rebuild()
10859 i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); in i40e_rebuild()
10861 if (test_bit(__I40E_DOWN, pf->state) && in i40e_rebuild()
10862 !test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_rebuild()
10864 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); in i40e_rebuild()
10867 ret = i40e_init_adminq(&pf->hw); in i40e_rebuild()
10869 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n", in i40e_rebuild()
10871 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10874 i40e_get_oem_version(&pf->hw); in i40e_rebuild()
10876 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) { in i40e_rebuild()
10881 /* re-verify the eeprom if we just had an EMP reset */ in i40e_rebuild()
10882 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) in i40e_rebuild()
10889 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_rebuild()
10907 free_irq(pf->pdev->irq, pf); in i40e_rebuild()
10927 ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, in i40e_rebuild()
10928 hw->func_caps.num_rx_qp, 0, 0); in i40e_rebuild()
10930 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10935 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); in i40e_rebuild()
10940 /* Enable FW to write a default DCB config on link-up in i40e_rebuild()
10947 if (I40E_IS_X710TL_DEVICE(hw->device_id) && in i40e_rebuild()
10948 (hw->phy.link_info.link_speed & in i40e_rebuild()
10951 dev_warn(&pf->pdev->dev, in i40e_rebuild()
10952 "DCB is not supported for X710-T*L 2.5/5G speeds\n"); in i40e_rebuild()
10953 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_rebuild()
10958 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", in i40e_rebuild()
10960 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_rebuild()
10976 ret = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_rebuild()
10981 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", in i40e_rebuild()
10983 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
10992 if (vsi->uplink_seid != pf->mac_seid) { in i40e_rebuild()
10993 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); in i40e_rebuild()
10996 if (!pf->veb[v]) in i40e_rebuild()
10999 if (pf->veb[v]->uplink_seid == pf->mac_seid || in i40e_rebuild()
11000 pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
11001 ret = i40e_reconstitute_veb(pf->veb[v]); in i40e_rebuild()
11012 if (pf->veb[v]->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11013 dev_info(&pf->pdev->dev, in i40e_rebuild()
11016 vsi->uplink_seid = pf->mac_seid; in i40e_rebuild()
11018 } else if (pf->veb[v]->uplink_seid == 0) { in i40e_rebuild()
11019 dev_info(&pf->pdev->dev, in i40e_rebuild()
11027 if (vsi->uplink_seid == pf->mac_seid) { in i40e_rebuild()
11028 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); in i40e_rebuild()
11032 dev_info(&pf->pdev->dev, in i40e_rebuild()
11038 if (vsi->mqprio_qopt.max_rate[0]) { in i40e_rebuild()
11040 vsi->mqprio_qopt.max_rate[0]); in i40e_rebuild()
11043 ret = i40e_set_bw_limit(vsi, vsi->seid, max_tx_rate); in i40e_rebuild()
11049 dev_dbg(&vsi->back->pdev->dev, in i40e_rebuild()
11050 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n", in i40e_rebuild()
11053 vsi->seid); in i40e_rebuild()
11056 ret = i40e_rebuild_cloud_filters(vsi, vsi->seid); in i40e_rebuild()
11081 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { in i40e_rebuild()
11083 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_rebuild()
11085 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", in i40e_rebuild()
11087 i40e_aq_str(&pf->hw, in i40e_rebuild()
11088 pf->hw.aq.asq_last_status)); in i40e_rebuild()
11091 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_rebuild()
11103 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_rebuild()
11104 pf->main_vsi_seid); in i40e_rebuild()
11114 ret = i40e_set_promiscuous(pf, pf->cur_promisc); in i40e_rebuild()
11116 dev_warn(&pf->pdev->dev, in i40e_rebuild()
11118 pf->cur_promisc ? "on" : "off", in i40e_rebuild()
11120 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_rebuild()
11134 clear_bit(__I40E_RESET_FAILED, pf->state); in i40e_rebuild()
11136 clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11137 clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); in i40e_rebuild()
11141 * i40e_reset_and_rebuild - reset and rebuild using a saved config
11143 * @reinit: if the Main VSI needs to re-initialized.
11152 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reset_and_rebuild()
11154 /* Now we wait for GRST to settle out. in i40e_reset_and_rebuild()
11164 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
11186 struct i40e_hw *hw = &pf->hw; in i40e_handle_mdd_event()
11192 if (!test_bit(__I40E_MDD_EVENT_PENDING, pf->state)) in i40e_handle_mdd_event()
11201 u16 queue = FIELD_GET(I40E_GL_MDET_TX_QUEUE_MASK, reg) - in i40e_handle_mdd_event()
11202 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11204 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x … in i40e_handle_mdd_event()
11213 u16 queue = FIELD_GET(I40E_GL_MDET_RX_QUEUE_MASK, reg) - in i40e_handle_mdd_event()
11214 pf->hw.func_caps.base_queue; in i40e_handle_mdd_event()
11216 …dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02… in i40e_handle_mdd_event()
11226 dev_dbg(&pf->pdev->dev, "TX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11231 dev_dbg(&pf->pdev->dev, "RX driver issue detected on PF\n"); in i40e_handle_mdd_event()
11236 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { in i40e_handle_mdd_event()
11237 vf = &(pf->vf[i]); in i40e_handle_mdd_event()
11241 vf->num_mdd_events++; in i40e_handle_mdd_event()
11242 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11244 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11245 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11246 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); in i40e_handle_mdd_event()
11252 vf->num_mdd_events++; in i40e_handle_mdd_event()
11253 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", in i40e_handle_mdd_event()
11255 dev_info(&pf->pdev->dev, in i40e_handle_mdd_event()
11256 "Use PF Control I/F to re-enable the VF\n"); in i40e_handle_mdd_event()
11257 set_bit(I40E_VF_STATE_DISABLED, &vf->vf_states); in i40e_handle_mdd_event()
11261 /* re-enable mdd interrupt cause */ in i40e_handle_mdd_event()
11262 clear_bit(__I40E_MDD_EVENT_PENDING, pf->state); in i40e_handle_mdd_event()
11270 * i40e_service_task - Run the driver's async subtasks
11281 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_service_task()
11282 test_bit(__I40E_SUSPENDED, pf->state)) in i40e_service_task()
11285 if (test_and_set_bit(__I40E_SERVICE_SCHED, pf->state)) in i40e_service_task()
11288 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_service_task()
11289 i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); in i40e_service_task()
11296 if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { in i40e_service_task()
11298 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], in i40e_service_task()
11303 pf->state)) in i40e_service_task()
11305 pf->vsi[pf->lan_vsi]); in i40e_service_task()
11316 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_service_task()
11320 * rather than wait for the timer to tick again. in i40e_service_task()
11322 if (time_after(jiffies, (start_time + pf->service_timer_period)) || in i40e_service_task()
11323 test_bit(__I40E_ADMINQ_EVENT_PENDING, pf->state) || in i40e_service_task()
11324 test_bit(__I40E_MDD_EVENT_PENDING, pf->state) || in i40e_service_task()
11325 test_bit(__I40E_VFLR_EVENT_PENDING, pf->state)) in i40e_service_task()
11330 * i40e_service_timer - timer callback
11337 mod_timer(&pf->service_timer, in i40e_service_timer()
11338 round_jiffies(jiffies + pf->service_timer_period)); in i40e_service_timer()
11343 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
11348 struct i40e_pf *pf = vsi->back; in i40e_set_num_rings_in_vsi()
11350 switch (vsi->type) { in i40e_set_num_rings_in_vsi()
11352 vsi->alloc_queue_pairs = pf->num_lan_qps; in i40e_set_num_rings_in_vsi()
11353 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11354 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11356 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11357 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11359 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_set_num_rings_in_vsi()
11360 vsi->num_q_vectors = pf->num_lan_msix; in i40e_set_num_rings_in_vsi()
11362 vsi->num_q_vectors = 1; in i40e_set_num_rings_in_vsi()
11367 vsi->alloc_queue_pairs = 1; in i40e_set_num_rings_in_vsi()
11368 vsi->num_tx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11370 vsi->num_rx_desc = ALIGN(I40E_FDIR_RING_COUNT, in i40e_set_num_rings_in_vsi()
11372 vsi->num_q_vectors = pf->num_fdsb_msix; in i40e_set_num_rings_in_vsi()
11376 vsi->alloc_queue_pairs = pf->num_vmdq_qps; in i40e_set_num_rings_in_vsi()
11377 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11378 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11380 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11381 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11383 vsi->num_q_vectors = pf->num_vmdq_msix; in i40e_set_num_rings_in_vsi()
11387 vsi->alloc_queue_pairs = pf->num_vf_qps; in i40e_set_num_rings_in_vsi()
11388 if (!vsi->num_tx_desc) in i40e_set_num_rings_in_vsi()
11389 vsi->num_tx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11391 if (!vsi->num_rx_desc) in i40e_set_num_rings_in_vsi()
11392 vsi->num_rx_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS, in i40e_set_num_rings_in_vsi()
11398 return -ENODATA; in i40e_set_num_rings_in_vsi()
11402 vsi->num_tx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11403 vsi->num_rx_desc = I40E_MIN_NUM_DESCRIPTORS; in i40e_set_num_rings_in_vsi()
11410 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
11424 size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * in i40e_vsi_alloc_arrays()
11426 vsi->tx_rings = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11427 if (!vsi->tx_rings) in i40e_vsi_alloc_arrays()
11428 return -ENOMEM; in i40e_vsi_alloc_arrays()
11429 next_rings = vsi->tx_rings + vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11431 vsi->xdp_rings = next_rings; in i40e_vsi_alloc_arrays()
11432 next_rings += vsi->alloc_queue_pairs; in i40e_vsi_alloc_arrays()
11434 vsi->rx_rings = next_rings; in i40e_vsi_alloc_arrays()
11438 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; in i40e_vsi_alloc_arrays()
11439 vsi->q_vectors = kzalloc(size, GFP_KERNEL); in i40e_vsi_alloc_arrays()
11440 if (!vsi->q_vectors) { in i40e_vsi_alloc_arrays()
11441 ret = -ENOMEM; in i40e_vsi_alloc_arrays()
11448 kfree(vsi->tx_rings); in i40e_vsi_alloc_arrays()
11453 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
11462 int ret = -ENODEV; in i40e_vsi_mem_alloc()
11468 mutex_lock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11476 i = pf->next_vsi; in i40e_vsi_mem_alloc()
11477 while (i < pf->num_alloc_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11479 if (i >= pf->num_alloc_vsi) { in i40e_vsi_mem_alloc()
11481 while (i < pf->next_vsi && pf->vsi[i]) in i40e_vsi_mem_alloc()
11485 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { in i40e_vsi_mem_alloc()
11488 ret = -ENODEV; in i40e_vsi_mem_alloc()
11491 pf->next_vsi = ++i; in i40e_vsi_mem_alloc()
11495 ret = -ENOMEM; in i40e_vsi_mem_alloc()
11498 vsi->type = type; in i40e_vsi_mem_alloc()
11499 vsi->back = pf; in i40e_vsi_mem_alloc()
11500 set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_vsi_mem_alloc()
11501 vsi->flags = 0; in i40e_vsi_mem_alloc()
11502 vsi->idx = vsi_idx; in i40e_vsi_mem_alloc()
11503 vsi->int_rate_limit = 0; in i40e_vsi_mem_alloc()
11504 vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? in i40e_vsi_mem_alloc()
11505 pf->rss_table_size : 64; in i40e_vsi_mem_alloc()
11506 vsi->netdev_registered = false; in i40e_vsi_mem_alloc()
11507 vsi->work_limit = I40E_DEFAULT_IRQ_WORK; in i40e_vsi_mem_alloc()
11508 hash_init(vsi->mac_filter_hash); in i40e_vsi_mem_alloc()
11509 vsi->irqs_ready = false; in i40e_vsi_mem_alloc()
11512 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL); in i40e_vsi_mem_alloc()
11513 if (!vsi->af_xdp_zc_qps) in i40e_vsi_mem_alloc()
11529 spin_lock_init(&vsi->mac_filter_hash_lock); in i40e_vsi_mem_alloc()
11530 pf->vsi[vsi_idx] = vsi; in i40e_vsi_mem_alloc()
11535 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_mem_alloc()
11536 pf->next_vsi = i - 1; in i40e_vsi_mem_alloc()
11539 mutex_unlock(&pf->switch_mutex); in i40e_vsi_mem_alloc()
11544 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
11555 kfree(vsi->q_vectors); in i40e_vsi_free_arrays()
11556 vsi->q_vectors = NULL; in i40e_vsi_free_arrays()
11558 kfree(vsi->tx_rings); in i40e_vsi_free_arrays()
11559 vsi->tx_rings = NULL; in i40e_vsi_free_arrays()
11560 vsi->rx_rings = NULL; in i40e_vsi_free_arrays()
11561 vsi->xdp_rings = NULL; in i40e_vsi_free_arrays()
11565 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
11574 kfree(vsi->rss_hkey_user); in i40e_clear_rss_config_user()
11575 vsi->rss_hkey_user = NULL; in i40e_clear_rss_config_user()
11577 kfree(vsi->rss_lut_user); in i40e_clear_rss_config_user()
11578 vsi->rss_lut_user = NULL; in i40e_clear_rss_config_user()
11582 * i40e_vsi_clear - Deallocate the VSI provided
11583 * @vsi: the VSI being un-configured
11592 if (!vsi->back) in i40e_vsi_clear()
11594 pf = vsi->back; in i40e_vsi_clear()
11596 mutex_lock(&pf->switch_mutex); in i40e_vsi_clear()
11597 if (!pf->vsi[vsi->idx]) { in i40e_vsi_clear()
11598 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n", in i40e_vsi_clear()
11599 vsi->idx, vsi->idx, vsi->type); in i40e_vsi_clear()
11603 if (pf->vsi[vsi->idx] != vsi) { in i40e_vsi_clear()
11604 dev_err(&pf->pdev->dev, in i40e_vsi_clear()
11605 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n", in i40e_vsi_clear()
11606 pf->vsi[vsi->idx]->idx, in i40e_vsi_clear()
11607 pf->vsi[vsi->idx]->type, in i40e_vsi_clear()
11608 vsi->idx, vsi->type); in i40e_vsi_clear()
11613 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_clear()
11614 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); in i40e_vsi_clear()
11616 bitmap_free(vsi->af_xdp_zc_qps); in i40e_vsi_clear()
11620 pf->vsi[vsi->idx] = NULL; in i40e_vsi_clear()
11621 if (vsi->idx < pf->next_vsi) in i40e_vsi_clear()
11622 pf->next_vsi = vsi->idx; in i40e_vsi_clear()
11625 mutex_unlock(&pf->switch_mutex); in i40e_vsi_clear()
11633 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
11640 if (vsi->tx_rings && vsi->tx_rings[0]) { in i40e_vsi_clear_rings()
11641 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_vsi_clear_rings()
11642 kfree_rcu(vsi->tx_rings[i], rcu); in i40e_vsi_clear_rings()
11643 WRITE_ONCE(vsi->tx_rings[i], NULL); in i40e_vsi_clear_rings()
11644 WRITE_ONCE(vsi->rx_rings[i], NULL); in i40e_vsi_clear_rings()
11645 if (vsi->xdp_rings) in i40e_vsi_clear_rings()
11646 WRITE_ONCE(vsi->xdp_rings[i], NULL); in i40e_vsi_clear_rings()
11652 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
11658 struct i40e_pf *pf = vsi->back; in i40e_alloc_rings()
11662 for (i = 0; i < vsi->alloc_queue_pairs; i++) { in i40e_alloc_rings()
11668 ring->queue_index = i; in i40e_alloc_rings()
11669 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11670 ring->ring_active = false; in i40e_alloc_rings()
11671 ring->vsi = vsi; in i40e_alloc_rings()
11672 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11673 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11674 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11675 ring->size = 0; in i40e_alloc_rings()
11676 ring->dcb_tc = 0; in i40e_alloc_rings()
11677 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) in i40e_alloc_rings()
11678 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; in i40e_alloc_rings()
11679 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11680 WRITE_ONCE(vsi->tx_rings[i], ring++); in i40e_alloc_rings()
11685 ring->queue_index = vsi->alloc_queue_pairs + i; in i40e_alloc_rings()
11686 ring->reg_idx = vsi->base_queue + ring->queue_index; in i40e_alloc_rings()
11687 ring->ring_active = false; in i40e_alloc_rings()
11688 ring->vsi = vsi; in i40e_alloc_rings()
11689 ring->netdev = NULL; in i40e_alloc_rings()
11690 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11691 ring->count = vsi->num_tx_desc; in i40e_alloc_rings()
11692 ring->size = 0; in i40e_alloc_rings()
11693 ring->dcb_tc = 0; in i40e_alloc_rings()
11694 if (test_bit(I40E_HW_CAP_WB_ON_ITR, vsi->back->hw.caps)) in i40e_alloc_rings()
11695 ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; in i40e_alloc_rings()
11697 ring->itr_setting = pf->tx_itr_default; in i40e_alloc_rings()
11698 WRITE_ONCE(vsi->xdp_rings[i], ring++); in i40e_alloc_rings()
11701 ring->queue_index = i; in i40e_alloc_rings()
11702 ring->reg_idx = vsi->base_queue + i; in i40e_alloc_rings()
11703 ring->ring_active = false; in i40e_alloc_rings()
11704 ring->vsi = vsi; in i40e_alloc_rings()
11705 ring->netdev = vsi->netdev; in i40e_alloc_rings()
11706 ring->dev = &pf->pdev->dev; in i40e_alloc_rings()
11707 ring->count = vsi->num_rx_desc; in i40e_alloc_rings()
11708 ring->size = 0; in i40e_alloc_rings()
11709 ring->dcb_tc = 0; in i40e_alloc_rings()
11710 ring->itr_setting = pf->rx_itr_default; in i40e_alloc_rings()
11711 WRITE_ONCE(vsi->rx_rings[i], ring); in i40e_alloc_rings()
11718 return -ENOMEM; in i40e_alloc_rings()
11722 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
11724 * @vectors: the number of MSI-X vectors to request
11730 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, in i40e_reserve_msix_vectors()
11733 dev_info(&pf->pdev->dev, in i40e_reserve_msix_vectors()
11734 "MSI-X vector reservation failed: %d\n", vectors); in i40e_reserve_msix_vectors()
11742 * i40e_init_msix - Setup the MSIX capability
11751 struct i40e_hw *hw = &pf->hw; in i40e_init_msix()
11758 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_init_msix()
11759 return -ENODEV; in i40e_init_msix()
11762 * - Add 1 for "other" cause for Admin Queue events, etc. in i40e_init_msix()
11763 * - The number of LAN queue pairs in i40e_init_msix()
11764 * - Queues being used for RSS. in i40e_init_msix()
11768 * - assumes symmetric Tx/Rx pairing in i40e_init_msix()
11769 * - The number of VMDq pairs in i40e_init_msix()
11770 * - The CPU count within the NUMA node if iWARP is enabled in i40e_init_msix()
11776 vectors_left = hw->func_caps.num_msix_vectors; in i40e_init_msix()
11782 vectors_left--; in i40e_init_msix()
11793 pf->num_lan_msix = min_t(int, cpus, vectors_left / 2); in i40e_init_msix()
11794 vectors_left -= pf->num_lan_msix; in i40e_init_msix()
11797 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_init_msix()
11799 pf->num_fdsb_msix = 1; in i40e_init_msix()
11801 vectors_left--; in i40e_init_msix()
11803 pf->num_fdsb_msix = 0; in i40e_init_msix()
11808 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_init_msix()
11809 iwarp_requested = pf->num_iwarp_msix; in i40e_init_msix()
11812 pf->num_iwarp_msix = 0; in i40e_init_msix()
11813 else if (vectors_left < pf->num_iwarp_msix) in i40e_init_msix()
11814 pf->num_iwarp_msix = 1; in i40e_init_msix()
11815 v_budget += pf->num_iwarp_msix; in i40e_init_msix()
11816 vectors_left -= pf->num_iwarp_msix; in i40e_init_msix()
11820 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags)) { in i40e_init_msix()
11822 pf->num_vmdq_msix = 0; in i40e_init_msix()
11823 pf->num_vmdq_qps = 0; in i40e_init_msix()
11826 pf->num_vmdq_vsis * pf->num_vmdq_qps; in i40e_init_msix()
11837 pf->num_vmdq_qps = 1; in i40e_init_msix()
11838 vmdq_vecs_wanted = pf->num_vmdq_vsis; in i40e_init_msix()
11843 pf->num_vmdq_msix = pf->num_vmdq_qps; in i40e_init_msix()
11846 vectors_left -= vmdq_vecs; in i40e_init_msix()
11859 extra_vectors = min_t(int, cpus - pf->num_lan_msix, vectors_left); in i40e_init_msix()
11860 pf->num_lan_msix += extra_vectors; in i40e_init_msix()
11861 vectors_left -= extra_vectors; in i40e_init_msix()
11864 …remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n"); in i40e_init_msix()
11866 v_budget += pf->num_lan_msix; in i40e_init_msix()
11867 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), in i40e_init_msix()
11869 if (!pf->msix_entries) in i40e_init_msix()
11870 return -ENOMEM; in i40e_init_msix()
11873 pf->msix_entries[i].entry = i; in i40e_init_msix()
11877 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_init_msix()
11878 kfree(pf->msix_entries); in i40e_init_msix()
11879 pf->msix_entries = NULL; in i40e_init_msix()
11880 pci_disable_msix(pf->pdev); in i40e_init_msix()
11881 return -ENODEV; in i40e_init_msix()
11885 pf->num_vmdq_vsis = 0; in i40e_init_msix()
11886 pf->num_vmdq_qps = 0; in i40e_init_msix()
11887 pf->num_lan_qps = 1; in i40e_init_msix()
11888 pf->num_lan_msix = 1; in i40e_init_msix()
11898 dev_info(&pf->pdev->dev, in i40e_init_msix()
11899 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n", in i40e_init_msix()
11902 vec = v_actual - 1; in i40e_init_msix()
11905 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ in i40e_init_msix()
11906 pf->num_vmdq_vsis = 1; in i40e_init_msix()
11907 pf->num_vmdq_qps = 1; in i40e_init_msix()
11912 pf->num_lan_msix = 1; in i40e_init_msix()
11915 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_init_msix()
11916 pf->num_lan_msix = 1; in i40e_init_msix()
11917 pf->num_iwarp_msix = 1; in i40e_init_msix()
11919 pf->num_lan_msix = 2; in i40e_init_msix()
11923 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_init_msix()
11924 pf->num_iwarp_msix = min_t(int, (vec / 3), in i40e_init_msix()
11926 pf->num_vmdq_vsis = min_t(int, (vec / 3), in i40e_init_msix()
11929 pf->num_vmdq_vsis = min_t(int, (vec / 2), in i40e_init_msix()
11932 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_init_msix()
11933 pf->num_fdsb_msix = 1; in i40e_init_msix()
11934 vec--; in i40e_init_msix()
11936 pf->num_lan_msix = min_t(int, in i40e_init_msix()
11937 (vec - (pf->num_iwarp_msix + pf->num_vmdq_vsis)), in i40e_init_msix()
11938 pf->num_lan_msix); in i40e_init_msix()
11939 pf->num_lan_qps = pf->num_lan_msix; in i40e_init_msix()
11944 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && pf->num_fdsb_msix == 0) { in i40e_init_msix()
11945 dev_info(&pf->pdev->dev, "Sideband Flowdir disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11946 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_init_msix()
11947 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_init_msix()
11949 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && pf->num_vmdq_msix == 0) { in i40e_init_msix()
11950 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11951 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_init_msix()
11954 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags) && in i40e_init_msix()
11955 pf->num_iwarp_msix == 0) { in i40e_init_msix()
11956 dev_info(&pf->pdev->dev, "IWARP disabled, not enough MSI-X vectors\n"); in i40e_init_msix()
11957 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_init_msix()
11959 i40e_debug(&pf->hw, I40E_DEBUG_INIT, in i40e_init_msix()
11960 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n", in i40e_init_msix()
11961 pf->num_lan_msix, in i40e_init_msix()
11962 pf->num_vmdq_msix * pf->num_vmdq_vsis, in i40e_init_msix()
11963 pf->num_fdsb_msix, in i40e_init_msix()
11964 pf->num_iwarp_msix); in i40e_init_msix()
11970 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
11974 * We allocate one q_vector. If allocation fails we return -ENOMEM.
11983 return -ENOMEM; in i40e_vsi_alloc_q_vector()
11985 q_vector->vsi = vsi; in i40e_vsi_alloc_q_vector()
11986 q_vector->v_idx = v_idx; in i40e_vsi_alloc_q_vector()
11987 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask); in i40e_vsi_alloc_q_vector()
11989 if (vsi->netdev) in i40e_vsi_alloc_q_vector()
11990 netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll); in i40e_vsi_alloc_q_vector()
11993 vsi->q_vectors[v_idx] = q_vector; in i40e_vsi_alloc_q_vector()
11999 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
12003 * return -ENOMEM.
12007 struct i40e_pf *pf = vsi->back; in i40e_vsi_alloc_q_vectors()
12011 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_alloc_q_vectors()
12012 num_q_vectors = vsi->num_q_vectors; in i40e_vsi_alloc_q_vectors()
12013 else if (vsi == pf->vsi[pf->lan_vsi]) in i40e_vsi_alloc_q_vectors()
12016 return -EINVAL; in i40e_vsi_alloc_q_vectors()
12027 while (v_idx--) in i40e_vsi_alloc_q_vectors()
12034 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
12042 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_init_interrupt_scheme()
12045 clear_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_init_interrupt_scheme()
12046 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_init_interrupt_scheme()
12047 clear_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_init_interrupt_scheme()
12048 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_init_interrupt_scheme()
12049 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_init_interrupt_scheme()
12050 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_init_interrupt_scheme()
12051 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_init_interrupt_scheme()
12052 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_init_interrupt_scheme()
12053 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_init_interrupt_scheme()
12054 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_init_interrupt_scheme()
12061 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && in i40e_init_interrupt_scheme()
12062 test_bit(I40E_FLAG_MSI_ENA, pf->flags)) { in i40e_init_interrupt_scheme()
12063 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); in i40e_init_interrupt_scheme()
12064 vectors = pci_enable_msi(pf->pdev); in i40e_init_interrupt_scheme()
12066 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", in i40e_init_interrupt_scheme()
12068 clear_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_init_interrupt_scheme()
12073 if (!test_bit(I40E_FLAG_MSI_ENA, pf->flags) && in i40e_init_interrupt_scheme()
12074 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_init_interrupt_scheme()
12075 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); in i40e_init_interrupt_scheme()
12079 pf->irq_pile = kzalloc(size, GFP_KERNEL); in i40e_init_interrupt_scheme()
12080 if (!pf->irq_pile) in i40e_init_interrupt_scheme()
12081 return -ENOMEM; in i40e_init_interrupt_scheme()
12083 pf->irq_pile->num_entries = vectors; in i40e_init_interrupt_scheme()
12086 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); in i40e_init_interrupt_scheme()
12092 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
12096 * device. This should be called during resume to re-allocate the q_vectors
12103 /* We cleared the MSI and MSI-X flags when disabling the old interrupt in i40e_restore_interrupt_scheme()
12104 * scheme. We need to re-enabled them here in order to attempt to in i40e_restore_interrupt_scheme()
12105 * re-acquire the MSI or MSI-X vectors in i40e_restore_interrupt_scheme()
12107 set_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_restore_interrupt_scheme()
12108 set_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_restore_interrupt_scheme()
12114 /* Now that we've re-acquired IRQs, we need to remap the vectors and in i40e_restore_interrupt_scheme()
12117 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_restore_interrupt_scheme()
12118 if (pf->vsi[i]) { in i40e_restore_interrupt_scheme()
12119 err = i40e_vsi_alloc_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12122 i40e_vsi_map_rings_to_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12130 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) in i40e_restore_interrupt_scheme()
12136 while (i--) { in i40e_restore_interrupt_scheme()
12137 if (pf->vsi[i]) in i40e_restore_interrupt_scheme()
12138 i40e_vsi_free_q_vectors(pf->vsi[i]); in i40e_restore_interrupt_scheme()
12145 * i40e_setup_misc_vector_for_recovery_mode - Setup the misc vector to handle
12150 * the non-queue interrupts, e.g. AdminQ and errors in recovery mode.
12158 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_setup_misc_vector_for_recovery_mode()
12162 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12163 "MSI-X misc vector request failed, error %d\n", in i40e_setup_misc_vector_for_recovery_mode()
12168 u32 flags = test_bit(I40E_FLAG_MSI_ENA, pf->flags) ? 0 : IRQF_SHARED; in i40e_setup_misc_vector_for_recovery_mode()
12170 err = request_irq(pf->pdev->irq, i40e_intr, flags, in i40e_setup_misc_vector_for_recovery_mode()
12171 pf->int_name, pf); in i40e_setup_misc_vector_for_recovery_mode()
12174 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector_for_recovery_mode()
12187 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
12191 * non-queue interrupts, e.g. AdminQ and errors. This is not used
12196 struct i40e_hw *hw = &pf->hw; in i40e_setup_misc_vector()
12200 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED, pf->state)) { in i40e_setup_misc_vector()
12201 err = request_irq(pf->msix_entries[0].vector, in i40e_setup_misc_vector()
12202 i40e_intr, 0, pf->int_name, pf); in i40e_setup_misc_vector()
12204 clear_bit(__I40E_MISC_IRQ_REQUESTED, pf->state); in i40e_setup_misc_vector()
12205 dev_info(&pf->pdev->dev, in i40e_setup_misc_vector()
12207 pf->int_name, err); in i40e_setup_misc_vector()
12208 return -EFAULT; in i40e_setup_misc_vector()
12226 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
12237 struct i40e_pf *pf = vsi->back; in i40e_get_rss_aq()
12238 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_aq()
12242 ret = i40e_aq_get_rss_key(hw, vsi->id, in i40e_get_rss_aq()
12245 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12248 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12249 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12255 bool pf_lut = vsi->type == I40E_VSI_MAIN; in i40e_get_rss_aq()
12257 ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); in i40e_get_rss_aq()
12259 dev_info(&pf->pdev->dev, in i40e_get_rss_aq()
12262 i40e_aq_str(&pf->hw, in i40e_get_rss_aq()
12263 pf->hw.aq.asq_last_status)); in i40e_get_rss_aq()
12272 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
12283 struct i40e_pf *pf = vsi->back; in i40e_config_rss_reg()
12284 struct i40e_hw *hw = &pf->hw; in i40e_config_rss_reg()
12285 u16 vf_id = vsi->vf_id; in i40e_config_rss_reg()
12292 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12295 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12299 dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n"); in i40e_config_rss_reg()
12306 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_rss_reg()
12308 return -EINVAL; in i40e_config_rss_reg()
12311 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_config_rss_reg()
12313 return -EINVAL; in i40e_config_rss_reg()
12317 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_config_rss_reg()
12326 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
12337 struct i40e_pf *pf = vsi->back; in i40e_get_rss_reg()
12338 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_reg()
12351 return -EINVAL; in i40e_get_rss_reg()
12360 * i40e_config_rss - Configure RSS keys and lut
12370 struct i40e_pf *pf = vsi->back; in i40e_config_rss()
12372 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) in i40e_config_rss()
12379 * i40e_get_rss - Get RSS keys and lut
12389 struct i40e_pf *pf = vsi->back; in i40e_get_rss()
12391 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps)) in i40e_get_rss()
12398 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
12414 * i40e_pf_config_rss - Prepare for RSS if used
12419 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_pf_config_rss()
12422 struct i40e_hw *hw = &pf->hw; in i40e_pf_config_rss()
12437 reg_val = (pf->rss_table_size == 512) ? in i40e_pf_config_rss()
12443 if (!vsi->rss_size) { in i40e_pf_config_rss()
12446 * could end up with zero TCs. Check for that to avoid in i40e_pf_config_rss()
12447 * divide-by-zero. It probably won't pass traffic, but it also in i40e_pf_config_rss()
12450 qcount = vsi->num_queue_pairs / in i40e_pf_config_rss()
12451 (vsi->tc_config.numtc ? vsi->tc_config.numtc : 1); in i40e_pf_config_rss()
12452 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_pf_config_rss()
12454 if (!vsi->rss_size) in i40e_pf_config_rss()
12455 return -EINVAL; in i40e_pf_config_rss()
12457 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); in i40e_pf_config_rss()
12459 return -ENOMEM; in i40e_pf_config_rss()
12462 if (vsi->rss_lut_user) in i40e_pf_config_rss()
12463 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size); in i40e_pf_config_rss()
12465 i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); in i40e_pf_config_rss()
12470 if (vsi->rss_hkey_user) in i40e_pf_config_rss()
12471 memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE); in i40e_pf_config_rss()
12474 ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); in i40e_pf_config_rss()
12481 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
12491 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; in i40e_reconfig_rss_queues()
12494 if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) in i40e_reconfig_rss_queues()
12498 new_rss_size = min_t(int, queue_count, pf->rss_size_max); in i40e_reconfig_rss_queues()
12500 if (queue_count != vsi->num_queue_pairs) { in i40e_reconfig_rss_queues()
12503 vsi->req_queue_pairs = queue_count; in i40e_reconfig_rss_queues()
12505 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_reconfig_rss_queues()
12506 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12508 pf->alloc_rss_size = new_rss_size; in i40e_reconfig_rss_queues()
12515 if (queue_count < vsi->rss_size) { in i40e_reconfig_rss_queues()
12517 dev_dbg(&pf->pdev->dev, in i40e_reconfig_rss_queues()
12521 /* Reset vsi->rss_size, as number of enabled queues changed */ in i40e_reconfig_rss_queues()
12522 qcount = vsi->num_queue_pairs / vsi->tc_config.numtc; in i40e_reconfig_rss_queues()
12523 vsi->rss_size = min_t(int, pf->alloc_rss_size, qcount); in i40e_reconfig_rss_queues()
12527 dev_info(&pf->pdev->dev, "User requested queue count/HW max RSS count: %d/%d\n", in i40e_reconfig_rss_queues()
12528 vsi->req_queue_pairs, pf->rss_size_max); in i40e_reconfig_rss_queues()
12529 return pf->alloc_rss_size; in i40e_reconfig_rss_queues()
12533 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
12542 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, in i40e_get_partition_bw_setting()
12547 pf->min_bw = min_bw; in i40e_get_partition_bw_setting()
12549 pf->max_bw = max_bw; in i40e_get_partition_bw_setting()
12556 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
12567 bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id)); in i40e_set_partition_bw_setting()
12568 bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12569 bw_data.min_bw[pf->hw.pf_id] = pf->min_bw & I40E_ALT_BW_VALUE_MASK; in i40e_set_partition_bw_setting()
12572 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); in i40e_set_partition_bw_setting()
12578 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
12588 if (pf->hw.partition_id != 1) { in i40e_commit_partition_bw_setting()
12589 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12591 pf->hw.partition_id); in i40e_commit_partition_bw_setting()
12592 ret = -EOPNOTSUPP; in i40e_commit_partition_bw_setting()
12597 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); in i40e_commit_partition_bw_setting()
12598 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12600 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12603 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12607 /* Read word 0x10 of NVM - SW compatibility word 1 */ in i40e_commit_partition_bw_setting()
12608 ret = i40e_aq_read_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12615 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12616 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12618 dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n", in i40e_commit_partition_bw_setting()
12620 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12624 /* Wait a bit for NVM release to complete */ in i40e_commit_partition_bw_setting()
12628 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); in i40e_commit_partition_bw_setting()
12629 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12631 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12634 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12639 * the NVM - thus storing the bandwidth values permanently. in i40e_commit_partition_bw_setting()
12641 ret = i40e_aq_update_nvm(&pf->hw, in i40e_commit_partition_bw_setting()
12648 last_aq_status = pf->hw.aq.asq_last_status; in i40e_commit_partition_bw_setting()
12649 i40e_release_nvm(&pf->hw); in i40e_commit_partition_bw_setting()
12651 dev_info(&pf->pdev->dev, in i40e_commit_partition_bw_setting()
12654 i40e_aq_str(&pf->hw, last_aq_status)); in i40e_commit_partition_bw_setting()
12661 * i40e_is_total_port_shutdown_enabled - read NVM and return value
12680 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12685 read_status = i40e_read_nvm_word(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12692 read_status = i40e_read_nvm_module_data(&pf->hw, in i40e_is_total_port_shutdown_enabled()
12700 link_behavior >>= (pf->hw.port * I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH); in i40e_is_total_port_shutdown_enabled()
12706 dev_warn(&pf->pdev->dev, in i40e_is_total_port_shutdown_enabled()
12707 "total-port-shutdown feature is off due to read nvm error: %pe\n", in i40e_is_total_port_shutdown_enabled()
12713 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
12727 bitmap_zero(pf->flags, I40E_PF_FLAGS_NBITS); in i40e_sw_init()
12728 set_bit(I40E_FLAG_MSI_ENA, pf->flags); in i40e_sw_init()
12729 set_bit(I40E_FLAG_MSIX_ENA, pf->flags); in i40e_sw_init()
12732 pf->rx_itr_default = I40E_ITR_RX_DEF; in i40e_sw_init()
12733 pf->tx_itr_default = I40E_ITR_TX_DEF; in i40e_sw_init()
12738 pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); in i40e_sw_init()
12739 pf->alloc_rss_size = 1; in i40e_sw_init()
12740 pf->rss_table_size = pf->hw.func_caps.rss_table_size; in i40e_sw_init()
12741 pf->rss_size_max = min_t(int, pf->rss_size_max, in i40e_sw_init()
12742 pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12744 /* find the next higher power-of-2 of num cpus */ in i40e_sw_init()
12746 pf->rss_size_max = min_t(int, pf->rss_size_max, pow); in i40e_sw_init()
12748 if (pf->hw.func_caps.rss) { in i40e_sw_init()
12749 set_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_sw_init()
12750 pf->alloc_rss_size = min_t(int, pf->rss_size_max, in i40e_sw_init()
12755 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) { in i40e_sw_init()
12756 set_bit(I40E_FLAG_MFP_ENA, pf->flags); in i40e_sw_init()
12757 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); in i40e_sw_init()
12759 dev_warn(&pf->pdev->dev, in i40e_sw_init()
12762 dev_info(&pf->pdev->dev, in i40e_sw_init()
12764 pf->min_bw, pf->max_bw); in i40e_sw_init()
12771 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || in i40e_sw_init()
12772 (pf->hw.func_caps.fd_filters_best_effort > 0)) { in i40e_sw_init()
12773 set_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_sw_init()
12774 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && in i40e_sw_init()
12775 pf->hw.num_partitions > 1) in i40e_sw_init()
12776 dev_info(&pf->pdev->dev, in i40e_sw_init()
12779 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_sw_init()
12780 pf->fdir_pf_filter_count = in i40e_sw_init()
12781 pf->hw.func_caps.fd_filters_guaranteed; in i40e_sw_init()
12782 pf->hw.fdir_shared_filter_count = in i40e_sw_init()
12783 pf->hw.func_caps.fd_filters_best_effort; in i40e_sw_init()
12787 if (test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) in i40e_sw_init()
12788 set_bit(I40E_FLAG_HW_ATR_EVICT_ENA, pf->flags); in i40e_sw_init()
12790 if (pf->hw.func_caps.vmdq && num_online_cpus() != 1) { in i40e_sw_init()
12791 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; in i40e_sw_init()
12792 set_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_sw_init()
12793 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf); in i40e_sw_init()
12796 if (pf->hw.func_caps.iwarp && num_online_cpus() != 1) { in i40e_sw_init()
12797 set_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_sw_init()
12799 pf->num_iwarp_msix = (int)num_online_cpus() + 1; in i40e_sw_init()
12806 if (pf->hw.mac.type == I40E_MAC_XL710 && in i40e_sw_init()
12807 pf->hw.func_caps.npar_enable) in i40e_sw_init()
12808 clear_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps); in i40e_sw_init()
12811 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { in i40e_sw_init()
12812 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; in i40e_sw_init()
12813 set_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_sw_init()
12814 pf->num_req_vfs = min_t(int, in i40e_sw_init()
12815 pf->hw.func_caps.num_vfs, in i40e_sw_init()
12819 pf->lan_veb = I40E_NO_VEB; in i40e_sw_init()
12820 pf->lan_vsi = I40E_NO_VSI; in i40e_sw_init()
12823 clear_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); in i40e_sw_init()
12827 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); in i40e_sw_init()
12828 pf->qp_pile = kzalloc(size, GFP_KERNEL); in i40e_sw_init()
12829 if (!pf->qp_pile) { in i40e_sw_init()
12830 err = -ENOMEM; in i40e_sw_init()
12833 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; in i40e_sw_init()
12835 pf->tx_timeout_recovery_level = 1; in i40e_sw_init()
12837 if (pf->hw.mac.type != I40E_MAC_X722 && in i40e_sw_init()
12842 set_bit(I40E_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in i40e_sw_init()
12843 set_bit(I40E_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in i40e_sw_init()
12844 dev_info(&pf->pdev->dev, in i40e_sw_init()
12845 "total-port-shutdown was enabled, link-down-on-close is forced on\n"); in i40e_sw_init()
12847 mutex_init(&pf->switch_mutex); in i40e_sw_init()
12854 * i40e_set_ntuple - set the ntuple feature flag and take action
12864 /* Check if Flow Director n-tuple support was enabled or disabled. If in i40e_set_ntuple()
12869 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_set_ntuple()
12871 /* enable FD_SB only if there is MSI-X vector and no cloud in i40e_set_ntuple()
12874 if (pf->num_fdsb_msix > 0 && !pf->num_cloud_filters) { in i40e_set_ntuple()
12875 set_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_set_ntuple()
12876 clear_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_set_ntuple()
12880 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_set_ntuple()
12884 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_set_ntuple()
12885 clear_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state); in i40e_set_ntuple()
12886 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_set_ntuple()
12889 pf->fd_add_err = 0; in i40e_set_ntuple()
12890 pf->fd_atr_cnt = 0; in i40e_set_ntuple()
12891 /* if ATR was auto disabled it can be re-enabled. */ in i40e_set_ntuple()
12892 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_set_ntuple()
12893 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_set_ntuple()
12894 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_set_ntuple()
12895 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); in i40e_set_ntuple()
12901 * i40e_clear_rss_lut - clear the rx hash lookup table
12906 struct i40e_pf *pf = vsi->back; in i40e_clear_rss_lut()
12907 struct i40e_hw *hw = &pf->hw; in i40e_clear_rss_lut()
12908 u16 vf_id = vsi->vf_id; in i40e_clear_rss_lut()
12911 if (vsi->type == I40E_VSI_MAIN) { in i40e_clear_rss_lut()
12914 } else if (vsi->type == I40E_VSI_SRIOV) { in i40e_clear_rss_lut()
12918 dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n"); in i40e_clear_rss_lut()
12923 * i40e_set_loopback - turn on/off loopback mode on underlying PF
12929 bool if_running = netif_running(vsi->netdev) && in i40e_set_loopback()
12930 !test_and_set_bit(__I40E_VSI_DOWN, vsi->state); in i40e_set_loopback()
12936 ret = i40e_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in i40e_set_loopback()
12938 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in i40e_set_loopback()
12946 * i40e_set_features - set the netdev feature flags
12955 struct i40e_vsi *vsi = np->vsi; in i40e_set_features()
12956 struct i40e_pf *pf = vsi->back; in i40e_set_features()
12959 if (features & NETIF_F_RXHASH && !(netdev->features & NETIF_F_RXHASH)) in i40e_set_features()
12962 netdev->features & NETIF_F_RXHASH) in i40e_set_features()
12971 (netdev->features & NETIF_F_HW_TC) && pf->num_cloud_filters) { in i40e_set_features()
12972 dev_err(&pf->pdev->dev, in i40e_set_features()
12974 return -EINVAL; in i40e_set_features()
12977 if (!(features & NETIF_F_HW_L2FW_DOFFLOAD) && vsi->macvlan_cnt) in i40e_set_features()
12985 if ((features ^ netdev->features) & NETIF_F_LOOPBACK) in i40e_set_features()
12996 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_set_port()
13000 type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN : in i40e_udp_tunnel_set_port()
13003 ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index, in i40e_udp_tunnel_set_port()
13008 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_udp_tunnel_set_port()
13009 return -EIO; in i40e_udp_tunnel_set_port()
13021 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_udp_tunnel_unset_port()
13024 ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL); in i40e_udp_tunnel_unset_port()
13028 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_udp_tunnel_unset_port()
13029 return -EIO; in i40e_udp_tunnel_unset_port()
13039 struct i40e_pf *pf = np->vsi->back; in i40e_get_phys_port_id()
13040 struct i40e_hw *hw = &pf->hw; in i40e_get_phys_port_id()
13042 if (!test_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps)) in i40e_get_phys_port_id()
13043 return -EOPNOTSUPP; in i40e_get_phys_port_id()
13045 ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id)); in i40e_get_phys_port_id()
13046 memcpy(ppid->id, hw->mac.port_addr, ppid->id_len); in i40e_get_phys_port_id()
13052 * i40e_ndo_fdb_add - add an entry to the hardware database
13068 struct i40e_pf *pf = np->vsi->back; in i40e_ndo_fdb_add()
13071 if (!test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) in i40e_ndo_fdb_add()
13072 return -EOPNOTSUPP; in i40e_ndo_fdb_add()
13075 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name); in i40e_ndo_fdb_add()
13076 return -EINVAL; in i40e_ndo_fdb_add()
13082 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { in i40e_ndo_fdb_add()
13084 return -EINVAL; in i40e_ndo_fdb_add()
13092 err = -EINVAL; in i40e_ndo_fdb_add()
13095 if (err == -EEXIST && !(flags & NLM_F_EXCL)) in i40e_ndo_fdb_add()
13102 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
13123 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_setlink()
13124 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_setlink()
13130 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_setlink()
13131 return -EOPNOTSUPP; in i40e_ndo_bridge_setlink()
13135 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_setlink()
13136 veb = pf->veb[i]; in i40e_ndo_bridge_setlink()
13141 return -EINVAL; in i40e_ndo_bridge_setlink()
13152 return -EINVAL; in i40e_ndo_bridge_setlink()
13156 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_ndo_bridge_setlink()
13157 vsi->tc_config.enabled_tc); in i40e_ndo_bridge_setlink()
13159 veb->bridge_mode = mode; in i40e_ndo_bridge_setlink()
13163 return -ENOENT; in i40e_ndo_bridge_setlink()
13166 } else if (mode != veb->bridge_mode) { in i40e_ndo_bridge_setlink()
13168 veb->bridge_mode = mode; in i40e_ndo_bridge_setlink()
13171 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_ndo_bridge_setlink()
13173 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_ndo_bridge_setlink()
13183 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
13200 struct i40e_vsi *vsi = np->vsi; in i40e_ndo_bridge_getlink()
13201 struct i40e_pf *pf = vsi->back; in i40e_ndo_bridge_getlink()
13206 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) in i40e_ndo_bridge_getlink()
13207 return -EOPNOTSUPP; in i40e_ndo_bridge_getlink()
13211 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_ndo_bridge_getlink()
13212 veb = pf->veb[i]; in i40e_ndo_bridge_getlink()
13218 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode, in i40e_ndo_bridge_getlink()
13223 * i40e_features_check - Validate encapsulated packet conforms to limits
13238 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_features_check()
13244 if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) in i40e_features_check()
13248 len = skb_network_header(skb) - skb->data; in i40e_features_check()
13253 len = skb_transport_header(skb) - skb_network_header(skb); in i40e_features_check()
13257 if (skb->encapsulation) { in i40e_features_check()
13259 len = skb_inner_network_header(skb) - skb_transport_header(skb); in i40e_features_check()
13264 len = skb_inner_transport_header(skb) - in i40e_features_check()
13281 * i40e_xdp_setup - add/remove an XDP program
13290 struct i40e_pf *pf = vsi->back; in i40e_xdp_setup()
13296 if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { in i40e_xdp_setup()
13298 return -EINVAL; in i40e_xdp_setup()
13301 /* When turning XDP on->off/off->on we reset and rebuild the rings. */ in i40e_xdp_setup()
13308 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_xdp_setup()
13309 return -EINVAL; in i40e_xdp_setup()
13311 old_prog = xchg(&vsi->xdp_prog, prog); in i40e_xdp_setup()
13315 xdp_features_clear_redirect_target(vsi->netdev); in i40e_xdp_setup()
13316 /* Wait until ndo_xsk_wakeup completes. */ in i40e_xdp_setup()
13324 return -ENOMEM; in i40e_xdp_setup()
13327 return -ENOMEM; in i40e_xdp_setup()
13330 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13331 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in i40e_xdp_setup()
13340 for (i = 0; i < vsi->num_queue_pairs; i++) in i40e_xdp_setup()
13341 if (vsi->xdp_rings[i]->xsk_pool) in i40e_xdp_setup()
13342 (void)i40e_xsk_wakeup(vsi->netdev, i, in i40e_xdp_setup()
13344 xdp_features_set_redirect_target(vsi->netdev, true); in i40e_xdp_setup()
13351 * i40e_enter_busy_conf - Enters busy config state
13358 struct i40e_pf *pf = vsi->back; in i40e_enter_busy_conf()
13361 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_enter_busy_conf()
13362 timeout--; in i40e_enter_busy_conf()
13364 return -EBUSY; in i40e_enter_busy_conf()
13372 * i40e_exit_busy_conf - Exits busy config state
13377 struct i40e_pf *pf = vsi->back; in i40e_exit_busy_conf()
13379 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_exit_busy_conf()
13383 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
13389 memset(&vsi->rx_rings[queue_pair]->rx_stats, 0, in i40e_queue_pair_reset_stats()
13390 sizeof(vsi->rx_rings[queue_pair]->rx_stats)); in i40e_queue_pair_reset_stats()
13391 memset(&vsi->tx_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13392 sizeof(vsi->tx_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13394 memset(&vsi->xdp_rings[queue_pair]->stats, 0, in i40e_queue_pair_reset_stats()
13395 sizeof(vsi->xdp_rings[queue_pair]->stats)); in i40e_queue_pair_reset_stats()
13400 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
13406 i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13408 /* Make sure that in-progress ndo_xdp_xmit calls are in i40e_queue_pair_clean_rings()
13412 i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13414 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_clean_rings()
13418 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
13426 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_toggle_napi()
13427 struct i40e_q_vector *q_vector = rxr->q_vector; in i40e_queue_pair_toggle_napi()
13429 if (!vsi->netdev) in i40e_queue_pair_toggle_napi()
13433 if (q_vector->rx.ring || q_vector->tx.ring) { in i40e_queue_pair_toggle_napi()
13435 napi_enable(&q_vector->napi); in i40e_queue_pair_toggle_napi()
13437 napi_disable(&q_vector->napi); in i40e_queue_pair_toggle_napi()
13442 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
13452 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_toggle_rings()
13455 pf_q = vsi->base_queue + queue_pair; in i40e_queue_pair_toggle_rings()
13456 ret = i40e_control_wait_tx_q(vsi->seid, pf, pf_q, in i40e_queue_pair_toggle_rings()
13459 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13461 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13468 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13470 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13483 ret = i40e_control_wait_tx_q(vsi->seid, pf, in i40e_queue_pair_toggle_rings()
13484 pf_q + vsi->alloc_queue_pairs, in i40e_queue_pair_toggle_rings()
13487 dev_info(&pf->pdev->dev, in i40e_queue_pair_toggle_rings()
13489 vsi->seid, pf_q, (enable ? "en" : "dis")); in i40e_queue_pair_toggle_rings()
13496 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
13502 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_enable_irq()
13503 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_enable_irq()
13504 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_enable_irq()
13507 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_queue_pair_enable_irq()
13508 i40e_irq_dynamic_enable(vsi, rxr->q_vector->v_idx); in i40e_queue_pair_enable_irq()
13516 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
13522 struct i40e_ring *rxr = vsi->rx_rings[queue_pair]; in i40e_queue_pair_disable_irq()
13523 struct i40e_pf *pf = vsi->back; in i40e_queue_pair_disable_irq()
13524 struct i40e_hw *hw = &pf->hw; in i40e_queue_pair_disable_irq()
13532 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_queue_pair_disable_irq()
13533 u32 intpf = vsi->base_vector + rxr->q_vector->v_idx; in i40e_queue_pair_disable_irq()
13535 wr32(hw, I40E_PFINT_DYN_CTLN(intpf - 1), 0); in i40e_queue_pair_disable_irq()
13537 synchronize_irq(pf->msix_entries[intpf].vector); in i40e_queue_pair_disable_irq()
13539 /* Legacy and MSI mode - this stops all interrupt handling */ in i40e_queue_pair_disable_irq()
13543 synchronize_irq(pf->pdev->irq); in i40e_queue_pair_disable_irq()
13548 * i40e_queue_pair_disable - Disables a queue pair
13565 i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_disable()
13573 * i40e_queue_pair_enable - Enables a queue pair
13583 err = i40e_configure_tx_ring(vsi->tx_rings[queue_pair]); in i40e_queue_pair_enable()
13588 err = i40e_configure_tx_ring(vsi->xdp_rings[queue_pair]); in i40e_queue_pair_enable()
13593 err = i40e_configure_rx_ring(vsi->rx_rings[queue_pair]); in i40e_queue_pair_enable()
13607 * i40e_xdp - implements ndo_bpf for i40e
13615 struct i40e_vsi *vsi = np->vsi; in i40e_xdp()
13617 if (vsi->type != I40E_VSI_MAIN) in i40e_xdp()
13618 return -EINVAL; in i40e_xdp()
13620 switch (xdp->command) { in i40e_xdp()
13622 return i40e_xdp_setup(vsi, xdp->prog, xdp->extack); in i40e_xdp()
13624 return i40e_xsk_pool_setup(vsi, xdp->xsk.pool, in i40e_xdp()
13625 xdp->xsk.queue_id); in i40e_xdp()
13627 return -EINVAL; in i40e_xdp()
13671 * i40e_config_netdev - Setup the netdev flags
13678 struct i40e_pf *pf = vsi->back; in i40e_config_netdev()
13679 struct i40e_hw *hw = &pf->hw; in i40e_config_netdev()
13689 netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs); in i40e_config_netdev()
13691 return -ENOMEM; in i40e_config_netdev()
13693 vsi->netdev = netdev; in i40e_config_netdev()
13695 np->vsi = vsi; in i40e_config_netdev()
13717 if (!test_bit(I40E_HW_CAP_OUTER_UDP_CSUM, pf->hw.caps)) in i40e_config_netdev()
13718 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in i40e_config_netdev()
13720 netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic; in i40e_config_netdev()
13722 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; in i40e_config_netdev()
13724 netdev->hw_enc_features |= hw_enc_features; in i40e_config_netdev()
13727 netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID; in i40e_config_netdev()
13736 netdev->gso_partial_features = I40E_GSO_PARTIAL_FEATURES; in i40e_config_netdev()
13737 netdev->features |= NETIF_F_GSO_PARTIAL | in i40e_config_netdev()
13740 netdev->mpls_features |= NETIF_F_SG; in i40e_config_netdev()
13741 netdev->mpls_features |= NETIF_F_HW_CSUM; in i40e_config_netdev()
13742 netdev->mpls_features |= NETIF_F_TSO; in i40e_config_netdev()
13743 netdev->mpls_features |= NETIF_F_TSO6; in i40e_config_netdev()
13744 netdev->mpls_features |= I40E_GSO_PARTIAL_FEATURES; in i40e_config_netdev()
13747 netdev->hw_features |= NETIF_F_HW_L2FW_DOFFLOAD; in i40e_config_netdev()
13753 if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) in i40e_config_netdev()
13756 netdev->hw_features |= hw_features | NETIF_F_LOOPBACK; in i40e_config_netdev()
13758 netdev->features |= hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; in i40e_config_netdev()
13759 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID; in i40e_config_netdev()
13761 netdev->features &= ~NETIF_F_HW_TC; in i40e_config_netdev()
13763 if (vsi->type == I40E_VSI_MAIN) { in i40e_config_netdev()
13764 SET_NETDEV_DEV(netdev, &pf->pdev->dev); in i40e_config_netdev()
13765 ether_addr_copy(mac_addr, hw->mac.perm_addr); in i40e_config_netdev()
13767 * some older NVM configurations load a default MAC-VLAN in i40e_config_netdev()
13777 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13779 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13781 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | in i40e_config_netdev()
13785 netdev->xdp_zc_max_segs = I40E_MAX_BUFFER_TXD; in i40e_config_netdev()
13790 * original name by IFNAMSIZ - 4 in i40e_config_netdev()
13792 snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", in i40e_config_netdev()
13793 IFNAMSIZ - 4, in i40e_config_netdev()
13794 pf->vsi[pf->lan_vsi]->netdev->name); in i40e_config_netdev()
13797 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13799 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13816 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13818 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_config_netdev()
13821 ether_addr_copy(netdev->perm_addr, mac_addr); in i40e_config_netdev()
13823 /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ in i40e_config_netdev()
13824 netdev->neigh_priv_len = sizeof(u32) * 4; in i40e_config_netdev()
13826 netdev->priv_flags |= IFF_UNICAST_FLT; in i40e_config_netdev()
13827 netdev->priv_flags |= IFF_SUPP_NOFCS; in i40e_config_netdev()
13829 i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc); in i40e_config_netdev()
13831 netdev->netdev_ops = &i40e_netdev_ops; in i40e_config_netdev()
13832 netdev->watchdog_timeo = 5 * HZ; in i40e_config_netdev()
13835 /* MTU range: 68 - 9706 */ in i40e_config_netdev()
13836 netdev->min_mtu = ETH_MIN_MTU; in i40e_config_netdev()
13837 netdev->max_mtu = I40E_MAX_RXBUFFER - I40E_PACKET_HDR_PAD; in i40e_config_netdev()
13843 * i40e_vsi_delete - Delete a VSI from the switch
13851 if (vsi == vsi->back->vsi[vsi->back->lan_vsi]) in i40e_vsi_delete()
13854 i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL); in i40e_vsi_delete()
13858 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
13866 struct i40e_pf *pf = vsi->back; in i40e_is_vsi_uplink_mode_veb()
13869 if (vsi->veb_idx >= I40E_MAX_VEB) in i40e_is_vsi_uplink_mode_veb()
13872 veb = pf->veb[vsi->veb_idx]; in i40e_is_vsi_uplink_mode_veb()
13874 dev_info(&pf->pdev->dev, in i40e_is_vsi_uplink_mode_veb()
13876 return -ENOENT; in i40e_is_vsi_uplink_mode_veb()
13880 if (veb->bridge_mode & BRIDGE_MODE_VEPA) { in i40e_is_vsi_uplink_mode_veb()
13892 * i40e_add_vsi - Add a VSI to the switch
13900 int ret = -ENODEV; in i40e_add_vsi()
13901 struct i40e_pf *pf = vsi->back; in i40e_add_vsi()
13902 struct i40e_hw *hw = &pf->hw; in i40e_add_vsi()
13912 switch (vsi->type) { in i40e_add_vsi()
13919 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13920 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13922 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); in i40e_add_vsi()
13925 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13928 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13929 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13930 return -ENOENT; in i40e_add_vsi()
13932 vsi->info = ctxt.info; in i40e_add_vsi()
13933 vsi->info.valid_sections = 0; in i40e_add_vsi()
13935 vsi->seid = ctxt.seid; in i40e_add_vsi()
13936 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
13941 * negative logic - if it's set, we need to fiddle with in i40e_add_vsi()
13944 if (test_bit(I40E_FLAG_SOURCE_PRUNING_DIS, pf->flags)) { in i40e_add_vsi()
13946 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13947 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13955 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13958 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13959 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13960 ret = -ENOENT; in i40e_add_vsi()
13966 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags) && in i40e_add_vsi()
13967 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ in i40e_add_vsi()
13969 ctxt.seid = pf->main_vsi_seid; in i40e_add_vsi()
13970 ctxt.pf_num = pf->hw.pf_id; in i40e_add_vsi()
13975 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13978 i40e_aq_str(&pf->hw, in i40e_add_vsi()
13979 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
13980 ret = -ENOENT; in i40e_add_vsi()
13985 vsi->info.valid_sections = 0; in i40e_add_vsi()
13988 * reconfigure it to enable all TCs that are in i40e_add_vsi()
13998 dev_info(&pf->pdev->dev, in i40e_add_vsi()
13999 "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n", in i40e_add_vsi()
14002 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14003 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14009 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
14011 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14014 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags) && in i40e_add_vsi()
14025 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
14027 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14046 ctxt.pf_num = hw->pf_id; in i40e_add_vsi()
14047 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id; in i40e_add_vsi()
14048 ctxt.uplink_seid = vsi->uplink_seid; in i40e_add_vsi()
14062 if (test_bit(I40E_FLAG_IWARP_ENA, vsi->back->flags)) { in i40e_add_vsi()
14072 if (pf->vf[vsi->vf_id].spoofchk) { in i40e_add_vsi()
14088 return -ENODEV; in i40e_add_vsi()
14091 if (vsi->type != I40E_VSI_MAIN) { in i40e_add_vsi()
14094 dev_info(&vsi->back->pdev->dev, in i40e_add_vsi()
14097 i40e_aq_str(&pf->hw, in i40e_add_vsi()
14098 pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14099 ret = -ENOENT; in i40e_add_vsi()
14102 vsi->info = ctxt.info; in i40e_add_vsi()
14103 vsi->info.valid_sections = 0; in i40e_add_vsi()
14104 vsi->seid = ctxt.seid; in i40e_add_vsi()
14105 vsi->id = ctxt.vsi_number; in i40e_add_vsi()
14108 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14109 vsi->active_filters = 0; in i40e_add_vsi()
14111 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { in i40e_add_vsi()
14112 f->state = I40E_FILTER_NEW; in i40e_add_vsi()
14115 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_add_vsi()
14116 clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); in i40e_add_vsi()
14119 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; in i40e_add_vsi()
14120 set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state); in i40e_add_vsi()
14126 dev_info(&pf->pdev->dev, in i40e_add_vsi()
14129 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_vsi()
14139 * i40e_vsi_release - Delete a VSI and free its resources
14153 pf = vsi->back; in i40e_vsi_release()
14155 /* release of a VEB-owner or last VSI is not allowed */ in i40e_vsi_release()
14156 if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) { in i40e_vsi_release()
14157 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", in i40e_vsi_release()
14158 vsi->seid, vsi->uplink_seid); in i40e_vsi_release()
14159 return -ENODEV; in i40e_vsi_release()
14161 if (vsi == pf->vsi[pf->lan_vsi] && in i40e_vsi_release()
14162 !test_bit(__I40E_DOWN, pf->state)) { in i40e_vsi_release()
14163 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); in i40e_vsi_release()
14164 return -ENODEV; in i40e_vsi_release()
14166 set_bit(__I40E_VSI_RELEASING, vsi->state); in i40e_vsi_release()
14167 uplink_seid = vsi->uplink_seid; in i40e_vsi_release()
14169 if (vsi->type != I40E_VSI_SRIOV) { in i40e_vsi_release()
14170 if (vsi->netdev_registered) { in i40e_vsi_release()
14171 vsi->netdev_registered = false; in i40e_vsi_release()
14172 if (vsi->netdev) { in i40e_vsi_release()
14174 unregister_netdev(vsi->netdev); in i40e_vsi_release()
14182 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_release()
14185 spin_lock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14188 if (vsi->netdev) { in i40e_vsi_release()
14189 __dev_uc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14190 __dev_mc_unsync(vsi->netdev, NULL); in i40e_vsi_release()
14194 hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) in i40e_vsi_release()
14197 spin_unlock_bh(&vsi->mac_filter_hash_lock); in i40e_vsi_release()
14203 if (vsi->netdev) { in i40e_vsi_release()
14204 free_netdev(vsi->netdev); in i40e_vsi_release()
14205 vsi->netdev = NULL; in i40e_vsi_release()
14215 * the orphan VEBs yet. We'll wait for an explicit remove request in i40e_vsi_release()
14218 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_release()
14219 if (pf->vsi[i] && in i40e_vsi_release()
14220 pf->vsi[i]->uplink_seid == uplink_seid && in i40e_vsi_release()
14221 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_vsi_release()
14226 if (!pf->veb[i]) in i40e_vsi_release()
14228 if (pf->veb[i]->uplink_seid == uplink_seid) in i40e_vsi_release()
14230 if (pf->veb[i]->seid == uplink_seid) in i40e_vsi_release()
14231 veb = pf->veb[i]; in i40e_vsi_release()
14233 if (n == 0 && veb && veb->uplink_seid != 0) in i40e_vsi_release()
14240 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
14251 int ret = -ENOENT; in i40e_vsi_setup_vectors()
14252 struct i40e_pf *pf = vsi->back; in i40e_vsi_setup_vectors()
14254 if (vsi->q_vectors[0]) { in i40e_vsi_setup_vectors()
14255 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", in i40e_vsi_setup_vectors()
14256 vsi->seid); in i40e_vsi_setup_vectors()
14257 return -EEXIST; in i40e_vsi_setup_vectors()
14260 if (vsi->base_vector) { in i40e_vsi_setup_vectors()
14261 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", in i40e_vsi_setup_vectors()
14262 vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14263 return -EEXIST; in i40e_vsi_setup_vectors()
14268 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14270 vsi->num_q_vectors, vsi->seid, ret); in i40e_vsi_setup_vectors()
14271 vsi->num_q_vectors = 0; in i40e_vsi_setup_vectors()
14278 if (!test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_vsi_setup_vectors()
14280 if (vsi->num_q_vectors) in i40e_vsi_setup_vectors()
14281 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_vsi_setup_vectors()
14282 vsi->num_q_vectors, vsi->idx); in i40e_vsi_setup_vectors()
14283 if (vsi->base_vector < 0) { in i40e_vsi_setup_vectors()
14284 dev_info(&pf->pdev->dev, in i40e_vsi_setup_vectors()
14286 vsi->num_q_vectors, vsi->seid, vsi->base_vector); in i40e_vsi_setup_vectors()
14288 ret = -ENOENT; in i40e_vsi_setup_vectors()
14297 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
14300 * This re-allocates a vsi's queue resources.
14315 pf = vsi->back; in i40e_vsi_reinit_setup()
14317 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); in i40e_vsi_reinit_setup()
14326 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_reinit_setup()
14329 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_reinit_setup()
14331 dev_info(&pf->pdev->dev, in i40e_vsi_reinit_setup()
14333 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_reinit_setup()
14336 vsi->base_queue = ret; in i40e_vsi_reinit_setup()
14341 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_vsi_reinit_setup()
14342 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_vsi_reinit_setup()
14343 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_vsi_reinit_setup()
14344 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_vsi_reinit_setup()
14345 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
14346 i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); in i40e_vsi_reinit_setup()
14359 if (vsi->netdev_registered) { in i40e_vsi_reinit_setup()
14360 vsi->netdev_registered = false; in i40e_vsi_reinit_setup()
14361 unregister_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14362 free_netdev(vsi->netdev); in i40e_vsi_reinit_setup()
14363 vsi->netdev = NULL; in i40e_vsi_reinit_setup()
14365 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_reinit_setup()
14367 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_reinit_setup()
14374 * i40e_vsi_setup - Set up a VSI by a given type
14396 * - the PF's port seid in i40e_vsi_setup()
14399 * - seid of an existing VEB in i40e_vsi_setup()
14400 * - seid of a VSI that owns an existing VEB in i40e_vsi_setup()
14401 * - seid of a VSI that doesn't own a VEB in i40e_vsi_setup()
14403 * - seid of the PF VSI, which is what creates the first VEB in i40e_vsi_setup()
14409 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { in i40e_vsi_setup()
14410 veb = pf->veb[i]; in i40e_vsi_setup()
14415 if (!veb && uplink_seid != pf->mac_seid) { in i40e_vsi_setup()
14417 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_vsi_setup()
14418 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { in i40e_vsi_setup()
14419 vsi = pf->vsi[i]; in i40e_vsi_setup()
14424 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", in i40e_vsi_setup()
14429 if (vsi->uplink_seid == pf->mac_seid) in i40e_vsi_setup()
14430 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, in i40e_vsi_setup()
14431 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14432 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) in i40e_vsi_setup()
14433 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, in i40e_vsi_setup()
14434 vsi->tc_config.enabled_tc); in i40e_vsi_setup()
14436 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { in i40e_vsi_setup()
14437 dev_info(&vsi->back->pdev->dev, in i40e_vsi_setup()
14445 if (!test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) { in i40e_vsi_setup()
14446 veb->bridge_mode = BRIDGE_MODE_VEPA; in i40e_vsi_setup()
14447 clear_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_vsi_setup()
14452 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) in i40e_vsi_setup()
14453 veb = pf->veb[i]; in i40e_vsi_setup()
14456 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); in i40e_vsi_setup()
14460 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_vsi_setup()
14461 uplink_seid = veb->seid; in i40e_vsi_setup()
14468 vsi = pf->vsi[v_idx]; in i40e_vsi_setup()
14471 vsi->type = type; in i40e_vsi_setup()
14472 vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB); in i40e_vsi_setup()
14475 pf->lan_vsi = v_idx; in i40e_vsi_setup()
14477 vsi->vf_id = param1; in i40e_vsi_setup()
14479 alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_vsi_setup()
14482 ret = i40e_get_lump(pf, pf->qp_pile, alloc_queue_pairs, vsi->idx); in i40e_vsi_setup()
14484 dev_info(&pf->pdev->dev, in i40e_vsi_setup()
14486 alloc_queue_pairs, vsi->seid, ret); in i40e_vsi_setup()
14489 vsi->base_queue = ret; in i40e_vsi_setup()
14492 vsi->uplink_seid = uplink_seid; in i40e_vsi_setup()
14497 switch (vsi->type) { in i40e_vsi_setup()
14507 if (vsi->type == I40E_VSI_MAIN) { in i40e_vsi_setup()
14511 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in i40e_vsi_setup()
14513 ret = register_netdev(vsi->netdev); in i40e_vsi_setup()
14516 vsi->netdev_registered = true; in i40e_vsi_setup()
14517 netif_carrier_off(vsi->netdev); in i40e_vsi_setup()
14543 if (test_bit(I40E_HW_CAP_RSS_AQ, pf->hw.caps) && in i40e_vsi_setup()
14544 vsi->type == I40E_VSI_VMDQ2) { in i40e_vsi_setup()
14556 if (vsi->netdev_registered) { in i40e_vsi_setup()
14557 vsi->netdev_registered = false; in i40e_vsi_setup()
14558 unregister_netdev(vsi->netdev); in i40e_vsi_setup()
14559 free_netdev(vsi->netdev); in i40e_vsi_setup()
14560 vsi->netdev = NULL; in i40e_vsi_setup()
14563 if (vsi->type == I40E_VSI_MAIN) in i40e_vsi_setup()
14566 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); in i40e_vsi_setup()
14574 * i40e_veb_get_bw_info - Query VEB BW information
14583 struct i40e_pf *pf = veb->pf; in i40e_veb_get_bw_info()
14584 struct i40e_hw *hw = &pf->hw; in i40e_veb_get_bw_info()
14589 ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, in i40e_veb_get_bw_info()
14592 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14595 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14599 ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, in i40e_veb_get_bw_info()
14602 dev_info(&pf->pdev->dev, in i40e_veb_get_bw_info()
14605 i40e_aq_str(&pf->hw, hw->aq.asq_last_status)); in i40e_veb_get_bw_info()
14609 veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit); in i40e_veb_get_bw_info()
14610 veb->bw_max_quanta = ets_data.tc_bw_max; in i40e_veb_get_bw_info()
14611 veb->is_abs_credits = bw_data.absolute_credits_enable; in i40e_veb_get_bw_info()
14612 veb->enabled_tc = ets_data.tc_valid_bits; in i40e_veb_get_bw_info()
14616 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i]; in i40e_veb_get_bw_info()
14617 veb->bw_tc_limit_credits[i] = in i40e_veb_get_bw_info()
14619 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7); in i40e_veb_get_bw_info()
14627 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
14635 int ret = -ENOENT; in i40e_veb_mem_alloc()
14640 mutex_lock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14649 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) in i40e_veb_mem_alloc()
14652 ret = -ENOMEM; in i40e_veb_mem_alloc()
14658 ret = -ENOMEM; in i40e_veb_mem_alloc()
14661 veb->pf = pf; in i40e_veb_mem_alloc()
14662 veb->idx = i; in i40e_veb_mem_alloc()
14663 veb->enabled_tc = 1; in i40e_veb_mem_alloc()
14665 pf->veb[i] = veb; in i40e_veb_mem_alloc()
14668 mutex_unlock(&pf->switch_mutex); in i40e_veb_mem_alloc()
14673 * i40e_switch_branch_release - Delete a branch of the switch tree
14681 struct i40e_pf *pf = branch->pf; in i40e_switch_branch_release()
14682 u16 branch_seid = branch->seid; in i40e_switch_branch_release()
14683 u16 veb_idx = branch->idx; in i40e_switch_branch_release()
14686 /* release any VEBs on this VEB - RECURSION */ in i40e_switch_branch_release()
14688 if (!pf->veb[i]) in i40e_switch_branch_release()
14690 if (pf->veb[i]->uplink_seid == branch->seid) in i40e_switch_branch_release()
14691 i40e_switch_branch_release(pf->veb[i]); in i40e_switch_branch_release()
14699 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_switch_branch_release()
14700 if (!pf->vsi[i]) in i40e_switch_branch_release()
14702 if (pf->vsi[i]->uplink_seid == branch_seid && in i40e_switch_branch_release()
14703 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { in i40e_switch_branch_release()
14704 i40e_vsi_release(pf->vsi[i]); in i40e_switch_branch_release()
14713 if (pf->veb[veb_idx]) in i40e_switch_branch_release()
14714 i40e_veb_release(pf->veb[veb_idx]); in i40e_switch_branch_release()
14718 * i40e_veb_clear - remove veb struct
14726 if (veb->pf) { in i40e_veb_clear()
14727 struct i40e_pf *pf = veb->pf; in i40e_veb_clear()
14729 mutex_lock(&pf->switch_mutex); in i40e_veb_clear()
14730 if (pf->veb[veb->idx] == veb) in i40e_veb_clear()
14731 pf->veb[veb->idx] = NULL; in i40e_veb_clear()
14732 mutex_unlock(&pf->switch_mutex); in i40e_veb_clear()
14739 * i40e_veb_release - Delete a VEB and free its resources
14748 pf = veb->pf; in i40e_veb_release()
14751 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_veb_release()
14752 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { in i40e_veb_release()
14754 vsi = pf->vsi[i]; in i40e_veb_release()
14758 dev_info(&pf->pdev->dev, in i40e_veb_release()
14760 veb->seid, n); in i40e_veb_release()
14765 vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER; in i40e_veb_release()
14766 if (veb->uplink_seid) { in i40e_veb_release()
14767 vsi->uplink_seid = veb->uplink_seid; in i40e_veb_release()
14768 if (veb->uplink_seid == pf->mac_seid) in i40e_veb_release()
14769 vsi->veb_idx = I40E_NO_VEB; in i40e_veb_release()
14771 vsi->veb_idx = veb->veb_idx; in i40e_veb_release()
14774 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; in i40e_veb_release()
14775 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; in i40e_veb_release()
14778 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_veb_release()
14783 * i40e_add_veb - create the VEB in the switch
14789 struct i40e_pf *pf = veb->pf; in i40e_add_veb()
14790 bool enable_stats = !!test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); in i40e_add_veb()
14793 ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, in i40e_add_veb()
14794 veb->enabled_tc, false, in i40e_add_veb()
14795 &veb->seid, enable_stats, NULL); in i40e_add_veb()
14799 dev_info(&pf->pdev->dev, in i40e_add_veb()
14802 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14803 return -EPERM; in i40e_add_veb()
14807 ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL, in i40e_add_veb()
14808 &veb->stats_idx, NULL, NULL, NULL); in i40e_add_veb()
14810 dev_info(&pf->pdev->dev, in i40e_add_veb()
14813 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14814 return -EPERM; in i40e_add_veb()
14818 dev_info(&pf->pdev->dev, in i40e_add_veb()
14821 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_add_veb()
14822 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); in i40e_add_veb()
14823 return -ENOENT; in i40e_add_veb()
14826 vsi->uplink_seid = veb->seid; in i40e_add_veb()
14827 vsi->veb_idx = veb->idx; in i40e_add_veb()
14828 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER; in i40e_add_veb()
14834 * i40e_veb_setup - Set up a VEB
14839 * @enabled_tc: Enabled TC bit-map
14860 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14867 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) in i40e_veb_setup()
14868 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) in i40e_veb_setup()
14870 if (vsi_idx == pf->num_alloc_vsi && vsi_seid != 0) { in i40e_veb_setup()
14871 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", in i40e_veb_setup()
14876 if (uplink_seid && uplink_seid != pf->mac_seid) { in i40e_veb_setup()
14878 if (pf->veb[veb_idx] && in i40e_veb_setup()
14879 pf->veb[veb_idx]->seid == uplink_seid) { in i40e_veb_setup()
14880 uplink_veb = pf->veb[veb_idx]; in i40e_veb_setup()
14885 dev_info(&pf->pdev->dev, in i40e_veb_setup()
14895 veb = pf->veb[veb_idx]; in i40e_veb_setup()
14896 veb->flags = flags; in i40e_veb_setup()
14897 veb->uplink_seid = uplink_seid; in i40e_veb_setup()
14898 veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB); in i40e_veb_setup()
14899 veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); in i40e_veb_setup()
14902 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); in i40e_veb_setup()
14905 if (vsi_idx == pf->lan_vsi) in i40e_veb_setup()
14906 pf->lan_veb = veb->idx; in i40e_veb_setup()
14917 * i40e_setup_pf_switch_element - set PF vars based on switch type
14929 u16 downlink_seid = le16_to_cpu(ele->downlink_seid); in i40e_setup_pf_switch_element()
14930 u16 uplink_seid = le16_to_cpu(ele->uplink_seid); in i40e_setup_pf_switch_element()
14931 u8 element_type = ele->element_type; in i40e_setup_pf_switch_element()
14932 u16 seid = le16_to_cpu(ele->seid); in i40e_setup_pf_switch_element()
14935 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14941 pf->mac_seid = seid; in i40e_setup_pf_switch_element()
14945 if (uplink_seid != pf->mac_seid) in i40e_setup_pf_switch_element()
14947 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14952 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { in i40e_setup_pf_switch_element()
14953 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14957 if (pf->lan_veb >= I40E_MAX_VEB) { in i40e_setup_pf_switch_element()
14961 pf->lan_veb = v; in i40e_setup_pf_switch_element()
14964 if (pf->lan_veb >= I40E_MAX_VEB) in i40e_setup_pf_switch_element()
14967 pf->veb[pf->lan_veb]->seid = seid; in i40e_setup_pf_switch_element()
14968 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; in i40e_setup_pf_switch_element()
14969 pf->veb[pf->lan_veb]->pf = pf; in i40e_setup_pf_switch_element()
14970 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; in i40e_setup_pf_switch_element()
14978 pf->mac_seid = uplink_seid; in i40e_setup_pf_switch_element()
14979 pf->main_vsi_seid = seid; in i40e_setup_pf_switch_element()
14981 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch_element()
14983 downlink_seid, pf->main_vsi_seid); in i40e_setup_pf_switch_element()
14994 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", in i40e_setup_pf_switch_element()
15001 * i40e_fetch_switch_configuration - Get switch config from firmware
15018 return -ENOMEM; in i40e_fetch_switch_configuration()
15024 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, in i40e_fetch_switch_configuration()
15028 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15031 i40e_aq_str(&pf->hw, in i40e_fetch_switch_configuration()
15032 pf->hw.aq.asq_last_status)); in i40e_fetch_switch_configuration()
15034 return -ENOENT; in i40e_fetch_switch_configuration()
15037 num_reported = le16_to_cpu(sw_config->header.num_reported); in i40e_fetch_switch_configuration()
15038 num_total = le16_to_cpu(sw_config->header.num_total); in i40e_fetch_switch_configuration()
15041 dev_info(&pf->pdev->dev, in i40e_fetch_switch_configuration()
15047 &sw_config->element[i]; in i40e_fetch_switch_configuration()
15059 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
15061 * @reinit: if the Main VSI needs to re-initialized.
15074 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15077 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15088 if ((pf->hw.pf_id == 0) && in i40e_setup_pf_switch()
15089 !test_bit(I40E_FLAG_TRUE_PROMISC_ENA, pf->flags)) { in i40e_setup_pf_switch()
15091 pf->last_sw_conf_flags = flags; in i40e_setup_pf_switch()
15094 if (pf->hw.pf_id == 0) { in i40e_setup_pf_switch()
15098 ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags, 0, in i40e_setup_pf_switch()
15100 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_setup_pf_switch()
15101 dev_info(&pf->pdev->dev, in i40e_setup_pf_switch()
15104 i40e_aq_str(&pf->hw, in i40e_setup_pf_switch()
15105 pf->hw.aq.asq_last_status)); in i40e_setup_pf_switch()
15108 pf->last_sw_conf_valid_flags = valid_flags; in i40e_setup_pf_switch()
15112 if (pf->lan_vsi == I40E_NO_VSI || reinit) { in i40e_setup_pf_switch()
15119 if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) in i40e_setup_pf_switch()
15120 uplink_seid = pf->veb[pf->lan_veb]->seid; in i40e_setup_pf_switch()
15122 uplink_seid = pf->mac_seid; in i40e_setup_pf_switch()
15123 if (pf->lan_vsi == I40E_NO_VSI) in i40e_setup_pf_switch()
15126 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15128 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); in i40e_setup_pf_switch()
15131 return -EAGAIN; in i40e_setup_pf_switch()
15135 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; in i40e_setup_pf_switch()
15137 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; in i40e_setup_pf_switch()
15138 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; in i40e_setup_pf_switch()
15139 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); in i40e_setup_pf_switch()
15141 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); in i40e_setup_pf_switch()
15148 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", in i40e_setup_pf_switch()
15156 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) in i40e_setup_pf_switch()
15168 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); in i40e_setup_pf_switch()
15177 * i40e_determine_queue_usage - Work out queue distribution
15185 pf->num_lan_qps = 0; in i40e_determine_queue_usage()
15191 queues_left = pf->hw.func_caps.num_tx_qp; in i40e_determine_queue_usage()
15194 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_determine_queue_usage()
15197 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15200 clear_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_determine_queue_usage()
15201 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_determine_queue_usage()
15202 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_determine_queue_usage()
15203 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_determine_queue_usage()
15204 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_determine_queue_usage()
15205 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_determine_queue_usage()
15206 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_determine_queue_usage()
15207 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_determine_queue_usage()
15208 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_determine_queue_usage()
15209 } else if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags) && in i40e_determine_queue_usage()
15210 !test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) && in i40e_determine_queue_usage()
15211 !test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags) && in i40e_determine_queue_usage()
15212 !test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) { in i40e_determine_queue_usage()
15214 pf->alloc_rss_size = pf->num_lan_qps = 1; in i40e_determine_queue_usage()
15215 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15217 clear_bit(I40E_FLAG_RSS_ENA, pf->flags); in i40e_determine_queue_usage()
15218 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_determine_queue_usage()
15219 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_determine_queue_usage()
15220 clear_bit(I40E_FLAG_FD_ATR_ENA, pf->flags); in i40e_determine_queue_usage()
15221 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_determine_queue_usage()
15222 clear_bit(I40E_FLAG_VMDQ_ENA, pf->flags); in i40e_determine_queue_usage()
15223 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_determine_queue_usage()
15225 /* Not enough queues for all TCs */ in i40e_determine_queue_usage()
15226 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags) && in i40e_determine_queue_usage()
15228 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_determine_queue_usage()
15229 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_determine_queue_usage()
15230 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); in i40e_determine_queue_usage()
15234 q_max = max_t(int, pf->rss_size_max, num_online_cpus()); in i40e_determine_queue_usage()
15235 q_max = min_t(int, q_max, pf->hw.func_caps.num_tx_qp); in i40e_determine_queue_usage()
15236 q_max = min_t(int, q_max, pf->hw.func_caps.num_msix_vectors); in i40e_determine_queue_usage()
15237 pf->num_lan_qps = q_max; in i40e_determine_queue_usage()
15239 queues_left -= pf->num_lan_qps; in i40e_determine_queue_usage()
15242 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_determine_queue_usage()
15244 queues_left -= 1; /* save 1 queue for FD */ in i40e_determine_queue_usage()
15246 clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); in i40e_determine_queue_usage()
15247 set_bit(I40E_FLAG_FD_SB_INACTIVE, pf->flags); in i40e_determine_queue_usage()
15248 …dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n… in i40e_determine_queue_usage()
15252 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && in i40e_determine_queue_usage()
15253 pf->num_vf_qps && pf->num_req_vfs && queues_left) { in i40e_determine_queue_usage()
15254 pf->num_req_vfs = min_t(int, pf->num_req_vfs, in i40e_determine_queue_usage()
15255 (queues_left / pf->num_vf_qps)); in i40e_determine_queue_usage()
15256 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); in i40e_determine_queue_usage()
15259 if (test_bit(I40E_FLAG_VMDQ_ENA, pf->flags) && in i40e_determine_queue_usage()
15260 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { in i40e_determine_queue_usage()
15261 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, in i40e_determine_queue_usage()
15262 (queues_left / pf->num_vmdq_qps)); in i40e_determine_queue_usage()
15263 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); in i40e_determine_queue_usage()
15266 pf->queues_left = queues_left; in i40e_determine_queue_usage()
15267 dev_dbg(&pf->pdev->dev, in i40e_determine_queue_usage()
15269 pf->hw.func_caps.num_tx_qp, in i40e_determine_queue_usage()
15270 !!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags), in i40e_determine_queue_usage()
15271 pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs, in i40e_determine_queue_usage()
15272 pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps, in i40e_determine_queue_usage()
15277 * i40e_setup_pf_filter_control - Setup PF static filter control
15289 struct i40e_filter_control_settings *settings = &pf->filter_settings; in i40e_setup_pf_filter_control()
15291 settings->hash_lut_size = I40E_HASH_LUT_SIZE_128; in i40e_setup_pf_filter_control()
15294 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) || in i40e_setup_pf_filter_control()
15295 test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) in i40e_setup_pf_filter_control()
15296 settings->enable_fdir = true; in i40e_setup_pf_filter_control()
15299 settings->enable_ethtype = true; in i40e_setup_pf_filter_control()
15300 settings->enable_macvlan = true; in i40e_setup_pf_filter_control()
15302 if (i40e_set_filter_control(&pf->hw, settings)) in i40e_setup_pf_filter_control()
15303 return -ENOENT; in i40e_setup_pf_filter_control()
15309 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
15312 struct i40e_hw *hw = &pf->hw; in i40e_print_features()
15320 i = snprintf(buf, INFO_STRING_LEN, "Features: PF-id[%d]", hw->pf_id); in i40e_print_features()
15322 i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); in i40e_print_features()
15325 pf->hw.func_caps.num_vsis, in i40e_print_features()
15326 pf->vsi[pf->lan_vsi]->num_queue_pairs); in i40e_print_features()
15327 if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) in i40e_print_features()
15329 if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) in i40e_print_features()
15331 if (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) { in i40e_print_features()
15335 if (test_bit(I40E_FLAG_DCB_CAPABLE, pf->flags)) in i40e_print_features()
15339 if (test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_print_features()
15341 if (test_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags)) in i40e_print_features()
15346 dev_info(&pf->pdev->dev, "%s\n", buf); in i40e_print_features()
15352 * i40e_get_platform_mac_addr - get platform-specific MAC address
15363 if (eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) in i40e_get_platform_mac_addr()
15364 i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr); in i40e_get_platform_mac_addr()
15368 * i40e_set_fec_in_flags - helper function for setting FEC options in flags
15395 * i40e_check_recovery_mode - check if we are running transition firmware
15405 u32 val = rd32(&pf->hw, I40E_GL_FWSTS); in i40e_check_recovery_mode()
15408 dev_crit(&pf->pdev->dev, "Firmware recovery mode detected. Limiting functionality.\n"); in i40e_check_recovery_mode()
15409 …dev_crit(&pf->pdev->dev, "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for detai… in i40e_check_recovery_mode()
15410 set_bit(__I40E_RECOVERY_MODE, pf->state); in i40e_check_recovery_mode()
15414 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_check_recovery_mode()
15415 …dev_info(&pf->pdev->dev, "Please do Power-On Reset to initialize adapter in normal mode with full … in i40e_check_recovery_mode()
15421 * i40e_pf_loop_reset - perform reset in a loop.
15431 * state is to issue a series of pf-resets and check a return value.
15443 /* wait max 10 seconds for PF reset to succeed */ in i40e_pf_loop_reset()
15445 struct i40e_hw *hw = &pf->hw; in i40e_pf_loop_reset()
15455 pf->pfr_count++; in i40e_pf_loop_reset()
15457 dev_info(&pf->pdev->dev, "PF reset failed: %d\n", ret); in i40e_pf_loop_reset()
15463 * i40e_check_fw_empr - check if FW issued unexpected EMP Reset
15475 const u32 fw_sts = rd32(&pf->hw, I40E_GL_FWSTS) & in i40e_check_fw_empr()
15482 * i40e_handle_resets - handle EMP resets and PF resets
15498 …dev_crit(&pf->pdev->dev, "Entering recovery mode due to repeated FW resets. This may take several … in i40e_handle_resets()
15500 return is_empr ? -EIO : pfr; in i40e_handle_resets()
15504 * i40e_init_recovery_mode - initialize subsystems needed in recovery mode
15519 pci_set_drvdata(pf->pdev, pf); in i40e_init_recovery_mode()
15520 pci_save_state(pf->pdev); in i40e_init_recovery_mode()
15523 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_init_recovery_mode()
15524 pf->service_timer_period = HZ; in i40e_init_recovery_mode()
15526 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_init_recovery_mode()
15527 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_init_recovery_mode()
15538 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_init_recovery_mode()
15539 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_init_recovery_mode()
15541 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_init_recovery_mode()
15544 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_init_recovery_mode()
15546 if (!pf->vsi) { in i40e_init_recovery_mode()
15547 err = -ENOMEM; in i40e_init_recovery_mode()
15559 pf->lan_vsi = v_idx; in i40e_init_recovery_mode()
15560 vsi = pf->vsi[v_idx]; in i40e_init_recovery_mode()
15562 err = -EFAULT; in i40e_init_recovery_mode()
15565 vsi->alloc_queue_pairs = 1; in i40e_init_recovery_mode()
15569 err = register_netdev(vsi->netdev); in i40e_init_recovery_mode()
15572 vsi->netdev_registered = true; in i40e_init_recovery_mode()
15583 mod_timer(&pf->service_timer, in i40e_init_recovery_mode()
15584 round_jiffies(jiffies + pf->service_timer_period)); in i40e_init_recovery_mode()
15590 timer_shutdown_sync(&pf->service_timer); in i40e_init_recovery_mode()
15592 iounmap(hw->hw_addr); in i40e_init_recovery_mode()
15593 pci_release_mem_regions(pf->pdev); in i40e_init_recovery_mode()
15594 pci_disable_device(pf->pdev); in i40e_init_recovery_mode()
15601 * i40e_set_subsystem_device_id - set subsystem device id
15611 hw->subsystem_device_id = pf->pdev->subsystem_device ? in i40e_set_subsystem_device_id()
15612 pf->pdev->subsystem_device : in i40e_set_subsystem_device_id()
15617 * i40e_probe - Device initialization routine
15650 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in i40e_probe()
15652 dev_err(&pdev->dev, in i40e_probe()
15660 dev_info(&pdev->dev, in i40e_probe()
15672 pf = i40e_alloc_pf(&pdev->dev); in i40e_probe()
15674 err = -ENOMEM; in i40e_probe()
15677 pf->next_vsi = 0; in i40e_probe()
15678 pf->pdev = pdev; in i40e_probe()
15679 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
15681 hw = &pf->hw; in i40e_probe()
15683 pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0), in i40e_probe()
15690 if (pf->ioremap_len < I40E_GLGEN_STAT_CLEAR) { in i40e_probe()
15691 dev_err(&pdev->dev, "Cannot map registers, bar size 0x%X too small, aborting\n", in i40e_probe()
15692 pf->ioremap_len); in i40e_probe()
15693 err = -ENOMEM; in i40e_probe()
15696 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len); in i40e_probe()
15697 if (!hw->hw_addr) { in i40e_probe()
15698 err = -EIO; in i40e_probe()
15699 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n", in i40e_probe()
15701 pf->ioremap_len, err); in i40e_probe()
15704 hw->vendor_id = pdev->vendor; in i40e_probe()
15705 hw->device_id = pdev->device; in i40e_probe()
15706 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); in i40e_probe()
15707 hw->subsystem_vendor_id = pdev->subsystem_vendor; in i40e_probe()
15709 hw->bus.device = PCI_SLOT(pdev->devfn); in i40e_probe()
15710 hw->bus.func = PCI_FUNC(pdev->devfn); in i40e_probe()
15711 hw->bus.bus_id = pdev->bus->number; in i40e_probe()
15716 hw->switch_tag = 0xffff; in i40e_probe()
15717 hw->first_tag = ETH_P_8021AD; in i40e_probe()
15718 hw->second_tag = ETH_P_8021Q; in i40e_probe()
15720 INIT_LIST_HEAD(&pf->l3_flex_pit_list); in i40e_probe()
15721 INIT_LIST_HEAD(&pf->l4_flex_pit_list); in i40e_probe()
15722 INIT_LIST_HEAD(&pf->ddp_old_prof); in i40e_probe()
15727 mutex_init(&hw->aq.asq_mutex); in i40e_probe()
15728 mutex_init(&hw->aq.arq_mutex); in i40e_probe()
15730 pf->msg_enable = netif_msg_init(debug, in i40e_probe()
15734 if (debug < -1) in i40e_probe()
15735 pf->hw.debug_mask = debug; in i40e_probe()
15738 if (hw->revision_id == 0 && in i40e_probe()
15743 pf->corer_count++; in i40e_probe()
15753 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", in i40e_probe()
15765 hw->aq.num_arq_entries = I40E_MIN_ARQ_LEN; in i40e_probe()
15766 hw->aq.num_asq_entries = I40E_MIN_ASQ_LEN; in i40e_probe()
15768 hw->aq.num_arq_entries = I40E_AQ_LEN; in i40e_probe()
15769 hw->aq.num_asq_entries = I40E_AQ_LEN; in i40e_probe()
15771 hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE; in i40e_probe()
15772 hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE; in i40e_probe()
15774 snprintf(pf->int_name, sizeof(pf->int_name) - 1, in i40e_probe()
15775 "%s-%s:misc", in i40e_probe()
15776 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); in i40e_probe()
15780 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n", in i40e_probe()
15786 pf->hw.fc.requested_mode = I40E_FC_NONE; in i40e_probe()
15790 if (err == -EIO) in i40e_probe()
15791 dev_info(&pdev->dev, in i40e_probe()
15793 hw->aq.api_maj_ver, in i40e_probe()
15794 hw->aq.api_min_ver, in i40e_probe()
15798 dev_info(&pdev->dev, in i40e_probe()
15808 dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s [%04x:%04x] [%04x:%04x]\n", in i40e_probe()
15809 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build, in i40e_probe()
15810 hw->aq.api_maj_ver, hw->aq.api_min_ver, nvm_ver, in i40e_probe()
15811 hw->vendor_id, hw->device_id, hw->subsystem_vendor_id, in i40e_probe()
15812 hw->subsystem_device_id); in i40e_probe()
15816 dev_dbg(&pdev->dev, in i40e_probe()
15818 hw->aq.api_maj_ver, in i40e_probe()
15819 hw->aq.api_min_ver, in i40e_probe()
15823 dev_info(&pdev->dev, in i40e_probe()
15825 hw->aq.api_maj_ver, in i40e_probe()
15826 hw->aq.api_min_ver, in i40e_probe()
15833 if (hw->revision_id < 1) in i40e_probe()
15834 …dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be is… in i40e_probe()
15844 dev_info(&pdev->dev, "sw_init failed: %d\n", err); in i40e_probe()
15848 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_probe()
15851 err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, in i40e_probe()
15852 hw->func_caps.num_rx_qp, 0, 0); in i40e_probe()
15854 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err); in i40e_probe()
15860 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err); in i40e_probe()
15861 err = -ENOENT; in i40e_probe()
15869 if (test_bit(I40E_HW_CAP_STOP_FW_LLDP, pf->hw.caps)) { in i40e_probe()
15870 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); in i40e_probe()
15877 if (!is_valid_ether_addr(hw->mac.addr)) { in i40e_probe()
15878 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr); in i40e_probe()
15879 err = -EIO; in i40e_probe()
15882 dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr); in i40e_probe()
15883 ether_addr_copy(hw->mac.perm_addr, hw->mac.addr); in i40e_probe()
15884 i40e_get_port_mac_addr(hw, hw->mac.port_addr); in i40e_probe()
15885 if (is_valid_ether_addr(hw->mac.port_addr)) in i40e_probe()
15886 set_bit(I40E_HW_CAP_PORT_ID_VALID, pf->hw.caps); in i40e_probe()
15893 status = i40e_get_fw_lldp_status(&pf->hw, &lldp_status); in i40e_probe()
15896 (clear_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)) : in i40e_probe()
15897 (set_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags)); in i40e_probe()
15898 dev_info(&pdev->dev, in i40e_probe()
15899 test_bit(I40E_FLAG_FW_LLDP_DIS, pf->flags) ? in i40e_probe()
15903 /* Enable FW to write default DCB config on link-up */ in i40e_probe()
15908 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err); in i40e_probe()
15909 clear_bit(I40E_FLAG_DCB_CAPABLE, pf->flags); in i40e_probe()
15910 clear_bit(I40E_FLAG_DCB_ENA, pf->flags); in i40e_probe()
15916 timer_setup(&pf->service_timer, i40e_service_timer, 0); in i40e_probe()
15917 pf->service_timer_period = HZ; in i40e_probe()
15919 INIT_WORK(&pf->service_task, i40e_service_task); in i40e_probe()
15920 clear_bit(__I40E_SERVICE_SCHED, pf->state); in i40e_probe()
15924 if (BIT (hw->port) & wol_nvm_bits || hw->partition_id != 1) in i40e_probe()
15925 pf->wol_en = false; in i40e_probe()
15927 pf->wol_en = true; in i40e_probe()
15928 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_probe()
15937 * When MSI-X is enabled, it's not allowed to use more TC queue in i40e_probe()
15938 * pairs than MSI-X vectors (pf->num_lan_msix) exist. Thus in i40e_probe()
15939 * vsi->num_queue_pairs will be equal to pf->num_lan_msix, i.e., 1. in i40e_probe()
15942 pf->num_lan_msix = 1; in i40e_probe()
15944 pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port; in i40e_probe()
15945 pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port; in i40e_probe()
15946 pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in i40e_probe()
15947 pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared; in i40e_probe()
15948 pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS; in i40e_probe()
15949 pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN | in i40e_probe()
15957 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) in i40e_probe()
15958 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; in i40e_probe()
15960 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; in i40e_probe()
15961 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in i40e_probe()
15962 dev_warn(&pf->pdev->dev, in i40e_probe()
15964 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in i40e_probe()
15965 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in i40e_probe()
15969 pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), in i40e_probe()
15971 if (!pf->vsi) { in i40e_probe()
15972 err = -ENOMEM; in i40e_probe()
15978 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && in i40e_probe()
15979 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && in i40e_probe()
15980 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
15982 set_bit(I40E_FLAG_VEB_MODE_ENA, pf->flags); in i40e_probe()
15987 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); in i40e_probe()
15990 INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); in i40e_probe()
15993 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_probe()
15994 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { in i40e_probe()
15995 i40e_vsi_open(pf->vsi[i]); in i40e_probe()
16003 err = i40e_aq_set_phy_int_mask(&pf->hw, in i40e_probe()
16008 dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n", in i40e_probe()
16010 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16023 if (test_bit(I40E_HW_CAP_RESTART_AUTONEG, pf->hw.caps)) { in i40e_probe()
16025 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); in i40e_probe()
16027 dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n", in i40e_probe()
16029 i40e_aq_str(&pf->hw, in i40e_probe()
16030 pf->hw.aq.asq_last_status)); in i40e_probe()
16036 clear_bit(__I40E_DOWN, pf->state); in i40e_probe()
16043 if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) { in i40e_probe()
16046 dev_info(&pdev->dev, in i40e_probe()
16056 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags) && in i40e_probe()
16057 test_bit(I40E_FLAG_MSIX_ENA, pf->flags) && in i40e_probe()
16058 !test_bit(__I40E_BAD_EEPROM, pf->state)) { in i40e_probe()
16066 dev_info(&pdev->dev, in i40e_probe()
16070 dev_info(&pdev->dev, in i40e_probe()
16077 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_probe()
16078 pf->iwarp_base_vector = i40e_get_lump(pf, pf->irq_pile, in i40e_probe()
16079 pf->num_iwarp_msix, in i40e_probe()
16081 if (pf->iwarp_base_vector < 0) { in i40e_probe()
16082 dev_info(&pdev->dev, in i40e_probe()
16084 pf->num_iwarp_msix, pf->iwarp_base_vector); in i40e_probe()
16085 clear_bit(I40E_FLAG_IWARP_ENA, pf->flags); in i40e_probe()
16095 mod_timer(&pf->service_timer, in i40e_probe()
16096 round_jiffies(jiffies + pf->service_timer_period)); in i40e_probe()
16099 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_probe()
16102 dev_info(&pdev->dev, "Failed to add PF to client API service list: %d\n", in i40e_probe()
16112 if (!test_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, pf->hw.caps)) { in i40e_probe()
16119 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, in i40e_probe()
16124 switch (hw->bus.speed) { in i40e_probe()
16134 switch (hw->bus.width) { in i40e_probe()
16147 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n", in i40e_probe()
16150 if (hw->bus.width < i40e_bus_width_pcie_x8 || in i40e_probe()
16151 hw->bus.speed < i40e_bus_speed_8000) { in i40e_probe()
16152 …dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for opti… in i40e_probe()
16153 …dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or high… in i40e_probe()
16160 dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n", in i40e_probe()
16162 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16163 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; in i40e_probe()
16166 i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, pf->flags); in i40e_probe()
16171 dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n", in i40e_probe()
16173 i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); in i40e_probe()
16178 rd32(&pf->hw, I40E_PRTGL_SAH)); in i40e_probe()
16180 dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n", in i40e_probe()
16181 pf->hw.port, val); in i40e_probe()
16189 i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, in i40e_probe()
16190 pf->main_vsi_seid); in i40e_probe()
16192 if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || in i40e_probe()
16193 (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) in i40e_probe()
16194 set_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps); in i40e_probe()
16195 if (pf->hw.device_id == I40E_DEV_ID_SFP_I_X722) in i40e_probe()
16196 set_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps); in i40e_probe()
16206 set_bit(__I40E_DOWN, pf->state); in i40e_probe()
16208 kfree(pf->vsi); in i40e_probe()
16211 timer_shutdown_sync(&pf->service_timer); in i40e_probe()
16216 kfree(pf->qp_pile); in i40e_probe()
16220 iounmap(hw->hw_addr); in i40e_probe()
16232 * i40e_remove - Device removal routine
16237 * Hot-Plug event, or because the driver is going to be removed from
16243 struct i40e_hw *hw = &pf->hw; in i40e_remove()
16261 while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) in i40e_remove()
16263 set_bit(__I40E_IN_REMOVE, pf->state); in i40e_remove()
16265 if (test_bit(I40E_FLAG_SRIOV_ENA, pf->flags)) { in i40e_remove()
16266 set_bit(__I40E_VF_RESETS_DISABLED, pf->state); in i40e_remove()
16268 clear_bit(I40E_FLAG_SRIOV_ENA, pf->flags); in i40e_remove()
16271 set_bit(__I40E_SUSPENDED, pf->state); in i40e_remove()
16272 set_bit(__I40E_DOWN, pf->state); in i40e_remove()
16273 if (pf->service_timer.function) in i40e_remove()
16274 timer_shutdown_sync(&pf->service_timer); in i40e_remove()
16275 if (pf->service_task.func) in i40e_remove()
16276 cancel_work_sync(&pf->service_task); in i40e_remove()
16278 if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { in i40e_remove()
16279 struct i40e_vsi *vsi = pf->vsi[0]; in i40e_remove()
16285 unregister_netdev(vsi->netdev); in i40e_remove()
16286 free_netdev(vsi->netdev); in i40e_remove()
16294 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_remove()
16302 if (!pf->veb[i]) in i40e_remove()
16305 if (pf->veb[i]->uplink_seid == pf->mac_seid || in i40e_remove()
16306 pf->veb[i]->uplink_seid == 0) in i40e_remove()
16307 i40e_switch_branch_release(pf->veb[i]); in i40e_remove()
16313 for (i = pf->num_alloc_vsi; i--;) in i40e_remove()
16314 if (pf->vsi[i]) { in i40e_remove()
16315 i40e_vsi_close(pf->vsi[i]); in i40e_remove()
16316 i40e_vsi_release(pf->vsi[i]); in i40e_remove()
16317 pf->vsi[i] = NULL; in i40e_remove()
16323 if (test_bit(I40E_FLAG_IWARP_ENA, pf->flags)) { in i40e_remove()
16326 dev_warn(&pdev->dev, "Failed to delete client device: %d\n", in i40e_remove()
16331 if (hw->hmc.hmc_obj) { in i40e_remove()
16334 dev_warn(&pdev->dev, in i40e_remove()
16341 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_remove()
16342 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_remove()
16343 free_irq(pf->pdev->irq, pf); in i40e_remove()
16349 mutex_destroy(&hw->aq.arq_mutex); in i40e_remove()
16350 mutex_destroy(&hw->aq.asq_mutex); in i40e_remove()
16355 for (i = 0; i < pf->num_alloc_vsi; i++) { in i40e_remove()
16356 if (pf->vsi[i]) { in i40e_remove()
16357 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_remove()
16358 i40e_vsi_clear_rings(pf->vsi[i]); in i40e_remove()
16359 i40e_vsi_clear(pf->vsi[i]); in i40e_remove()
16360 pf->vsi[i] = NULL; in i40e_remove()
16366 kfree(pf->veb[i]); in i40e_remove()
16367 pf->veb[i] = NULL; in i40e_remove()
16370 kfree(pf->qp_pile); in i40e_remove()
16371 kfree(pf->vsi); in i40e_remove()
16373 iounmap(hw->hw_addr); in i40e_remove()
16381 * i40e_pci_error_detected - warning that something funky happened in PCI land
16394 dev_info(&pdev->dev, "%s: error %d\n", __func__, error); in i40e_pci_error_detected()
16397 dev_info(&pdev->dev, in i40e_pci_error_detected()
16398 "Cannot recover - error happened during device probe\n"); in i40e_pci_error_detected()
16403 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_detected()
16411 * i40e_pci_error_slot_reset - a PCI slot reset just happened
16425 dev_dbg(&pdev->dev, "%s\n", __func__); in i40e_pci_error_slot_reset()
16427 dev_info(&pdev->dev, in i40e_pci_error_slot_reset()
16428 "Cannot re-enable PCI device after reset.\n"); in i40e_pci_error_slot_reset()
16436 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); in i40e_pci_error_slot_reset()
16447 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
16458 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
16465 if (test_bit(__I40E_IN_REMOVE, pf->state)) in i40e_pci_error_reset_done()
16475 * i40e_pci_error_resume - restart operations after PCI error recovery
16485 dev_dbg(&pdev->dev, "%s\n", __func__); in i40e_pci_error_resume()
16486 if (test_bit(__I40E_SUSPENDED, pf->state)) in i40e_pci_error_resume()
16493 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
16499 struct i40e_hw *hw = &pf->hw; in i40e_enable_mc_magic_wake()
16505 if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { in i40e_enable_mc_magic_wake()
16507 pf->vsi[pf->lan_vsi]->netdev->dev_addr); in i40e_enable_mc_magic_wake()
16509 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16511 ether_addr_copy(mac_addr, hw->mac.addr); in i40e_enable_mc_magic_wake()
16520 if (hw->func_caps.flex10_enable && hw->partition_id != 1) in i40e_enable_mc_magic_wake()
16525 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16535 dev_err(&pf->pdev->dev, in i40e_enable_mc_magic_wake()
16540 * i40e_shutdown - PCI callback for shutting down
16546 struct i40e_hw *hw = &pf->hw; in i40e_shutdown()
16548 set_bit(__I40E_SUSPENDED, pf->state); in i40e_shutdown()
16549 set_bit(__I40E_DOWN, pf->state); in i40e_shutdown()
16551 del_timer_sync(&pf->service_timer); in i40e_shutdown()
16552 cancel_work_sync(&pf->service_task); in i40e_shutdown()
16559 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_shutdown()
16561 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && in i40e_shutdown()
16562 pf->wol_en) in i40e_shutdown()
16568 (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_shutdown()
16570 (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_shutdown()
16573 if (test_bit(__I40E_RECOVERY_MODE, pf->state) && in i40e_shutdown()
16574 !test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) in i40e_shutdown()
16575 free_irq(pf->pdev->irq, pf); in i40e_shutdown()
16586 pci_wake_from_d3(pdev, pf->wol_en); in i40e_shutdown()
16592 * i40e_suspend - PM callback for moving to D3
16598 struct i40e_hw *hw = &pf->hw; in i40e_suspend()
16601 if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) in i40e_suspend()
16604 set_bit(__I40E_DOWN, pf->state); in i40e_suspend()
16607 del_timer_sync(&pf->service_timer); in i40e_suspend()
16608 cancel_work_sync(&pf->service_task); in i40e_suspend()
16613 i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); in i40e_suspend()
16615 if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && in i40e_suspend()
16616 pf->wol_en) in i40e_suspend()
16627 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); in i40e_suspend()
16628 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); in i40e_suspend()
16643 * i40e_resume - PM callback for waking up from D3
16652 if (!test_bit(__I40E_SUSPENDED, pf->state)) in i40e_resume()
16669 clear_bit(__I40E_DOWN, pf->state); in i40e_resume()
16675 clear_bit(__I40E_SUSPENDED, pf->state); in i40e_resume()
16678 mod_timer(&pf->service_timer, in i40e_resume()
16679 round_jiffies(jiffies + pf->service_timer_period)); in i40e_resume()
16708 * i40e_init_module - Driver registration routine
16730 return -ENOMEM; in i40e_init_module()
16746 * i40e_exit_module - Driver exit cleanup routine