Lines Matching +full:mss +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0
32 static int debug = -1;
81 struct net_device *dev = adapter->netdev; in igc_reset()
82 struct igc_hw *hw = &adapter->hw; in igc_reset()
83 struct igc_fc_info *fc = &hw->fc; in igc_reset()
95 * - the full Rx FIFO size minus one full Tx plus one full Rx frame in igc_reset()
97 hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); in igc_reset()
99 fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ in igc_reset()
100 fc->low_water = fc->high_water - 16; in igc_reset()
101 fc->pause_time = 0xFFFF; in igc_reset()
102 fc->send_xon = 1; in igc_reset()
103 fc->current_mode = fc->requested_mode; in igc_reset()
105 hw->mac.ops.reset_hw(hw); in igc_reset()
107 if (hw->mac.ops.init_hw(hw)) in igc_reset()
110 /* Re-establish EEE setting */ in igc_reset()
113 if (!netif_running(adapter->netdev)) in igc_reset()
114 igc_power_down_phy_copper_base(&adapter->hw); in igc_reset()
119 /* Re-enable PTP, where applicable. */ in igc_reset()
122 /* Re-enable TSN offloading, where applicable. */ in igc_reset()
129 * igc_power_up_link - Power up the phy link
134 igc_reset_phy(&adapter->hw); in igc_power_up_link()
136 igc_power_up_phy_copper(&adapter->hw); in igc_power_up_link()
138 igc_setup_link(&adapter->hw); in igc_power_up_link()
142 * igc_release_hw_control - release control of the h/w to f/w
151 struct igc_hw *hw = &adapter->hw; in igc_release_hw_control()
154 if (!pci_device_is_present(adapter->pdev)) in igc_release_hw_control()
164 * igc_get_hw_control - get control of the h/w from f/w
173 struct igc_hw *hw = &adapter->hw; in igc_get_hw_control()
191 * igc_clean_tx_ring - Free Tx Buffers
196 u16 i = tx_ring->next_to_clean; in igc_clean_tx_ring()
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_ring()
200 while (i != tx_ring->next_to_use) { in igc_clean_tx_ring()
203 switch (tx_buffer->type) { in igc_clean_tx_ring()
208 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_ring()
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
212 dev_kfree_skb_any(tx_buffer->skb); in igc_clean_tx_ring()
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_ring()
221 eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_ring()
229 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
231 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
240 tx_buffer->next_to_watch = NULL; in igc_clean_tx_ring()
245 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
247 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
258 memset(tx_ring->tx_buffer_info, 0, in igc_clean_tx_ring()
259 sizeof(*tx_ring->tx_buffer_info) * tx_ring->count); in igc_clean_tx_ring()
262 memset(tx_ring->desc, 0, tx_ring->size); in igc_clean_tx_ring()
265 tx_ring->next_to_use = 0; in igc_clean_tx_ring()
266 tx_ring->next_to_clean = 0; in igc_clean_tx_ring()
270 * igc_free_tx_resources - Free Tx Resources per Queue
279 vfree(tx_ring->tx_buffer_info); in igc_free_tx_resources()
280 tx_ring->tx_buffer_info = NULL; in igc_free_tx_resources()
283 if (!tx_ring->desc) in igc_free_tx_resources()
286 dma_free_coherent(tx_ring->dev, tx_ring->size, in igc_free_tx_resources()
287 tx_ring->desc, tx_ring->dma); in igc_free_tx_resources()
289 tx_ring->desc = NULL; in igc_free_tx_resources()
293 * igc_free_all_tx_resources - Free Tx Resources for All Queues
302 for (i = 0; i < adapter->num_tx_queues; i++) in igc_free_all_tx_resources()
303 igc_free_tx_resources(adapter->tx_ring[i]); in igc_free_all_tx_resources()
307 * igc_clean_all_tx_rings - Free Tx Buffers for all queues
314 for (i = 0; i < adapter->num_tx_queues; i++) in igc_clean_all_tx_rings()
315 if (adapter->tx_ring[i]) in igc_clean_all_tx_rings()
316 igc_clean_tx_ring(adapter->tx_ring[i]); in igc_clean_all_tx_rings()
321 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_tx_ring_hw()
322 u8 idx = ring->reg_idx; in igc_disable_tx_ring_hw()
332 * igc_disable_all_tx_rings_hw - Disable all transmit queue operation
339 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_disable_all_tx_rings_hw()
340 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_disable_all_tx_rings_hw()
347 * igc_setup_tx_resources - allocate Tx resources (Descriptors)
354 struct net_device *ndev = tx_ring->netdev; in igc_setup_tx_resources()
355 struct device *dev = tx_ring->dev; in igc_setup_tx_resources()
358 size = sizeof(struct igc_tx_buffer) * tx_ring->count; in igc_setup_tx_resources()
359 tx_ring->tx_buffer_info = vzalloc(size); in igc_setup_tx_resources()
360 if (!tx_ring->tx_buffer_info) in igc_setup_tx_resources()
364 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); in igc_setup_tx_resources()
365 tx_ring->size = ALIGN(tx_ring->size, 4096); in igc_setup_tx_resources()
367 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igc_setup_tx_resources()
368 &tx_ring->dma, GFP_KERNEL); in igc_setup_tx_resources()
370 if (!tx_ring->desc) in igc_setup_tx_resources()
373 tx_ring->next_to_use = 0; in igc_setup_tx_resources()
374 tx_ring->next_to_clean = 0; in igc_setup_tx_resources()
379 vfree(tx_ring->tx_buffer_info); in igc_setup_tx_resources()
381 return -ENOMEM; in igc_setup_tx_resources()
385 * igc_setup_all_tx_resources - wrapper to allocate Tx resources for all queues
392 struct net_device *dev = adapter->netdev; in igc_setup_all_tx_resources()
395 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_setup_all_tx_resources()
396 err = igc_setup_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
399 for (i--; i >= 0; i--) in igc_setup_all_tx_resources()
400 igc_free_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
410 u16 i = rx_ring->next_to_clean; in igc_clean_rx_ring_page_shared()
412 dev_kfree_skb(rx_ring->skb); in igc_clean_rx_ring_page_shared()
413 rx_ring->skb = NULL; in igc_clean_rx_ring_page_shared()
416 while (i != rx_ring->next_to_alloc) { in igc_clean_rx_ring_page_shared()
417 struct igc_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; in igc_clean_rx_ring_page_shared()
422 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_clean_rx_ring_page_shared()
423 buffer_info->dma, in igc_clean_rx_ring_page_shared()
424 buffer_info->page_offset, in igc_clean_rx_ring_page_shared()
429 dma_unmap_page_attrs(rx_ring->dev, in igc_clean_rx_ring_page_shared()
430 buffer_info->dma, in igc_clean_rx_ring_page_shared()
434 __page_frag_cache_drain(buffer_info->page, in igc_clean_rx_ring_page_shared()
435 buffer_info->pagecnt_bias); in igc_clean_rx_ring_page_shared()
438 if (i == rx_ring->count) in igc_clean_rx_ring_page_shared()
448 for (i = 0; i < ring->count; i++) { in igc_clean_rx_ring_xsk_pool()
449 bi = &ring->rx_buffer_info[i]; in igc_clean_rx_ring_xsk_pool()
450 if (!bi->xdp) in igc_clean_rx_ring_xsk_pool()
453 xsk_buff_free(bi->xdp); in igc_clean_rx_ring_xsk_pool()
454 bi->xdp = NULL; in igc_clean_rx_ring_xsk_pool()
459 * igc_clean_rx_ring - Free Rx Buffers per Queue
464 if (ring->xsk_pool) in igc_clean_rx_ring()
471 ring->next_to_alloc = 0; in igc_clean_rx_ring()
472 ring->next_to_clean = 0; in igc_clean_rx_ring()
473 ring->next_to_use = 0; in igc_clean_rx_ring()
477 * igc_clean_all_rx_rings - Free Rx Buffers for all queues
484 for (i = 0; i < adapter->num_rx_queues; i++) in igc_clean_all_rx_rings()
485 if (adapter->rx_ring[i]) in igc_clean_all_rx_rings()
486 igc_clean_rx_ring(adapter->rx_ring[i]); in igc_clean_all_rx_rings()
490 * igc_free_rx_resources - Free Rx Resources
499 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_free_rx_resources()
501 vfree(rx_ring->rx_buffer_info); in igc_free_rx_resources()
502 rx_ring->rx_buffer_info = NULL; in igc_free_rx_resources()
505 if (!rx_ring->desc) in igc_free_rx_resources()
508 dma_free_coherent(rx_ring->dev, rx_ring->size, in igc_free_rx_resources()
509 rx_ring->desc, rx_ring->dma); in igc_free_rx_resources()
511 rx_ring->desc = NULL; in igc_free_rx_resources()
515 * igc_free_all_rx_resources - Free Rx Resources for All Queues
524 for (i = 0; i < adapter->num_rx_queues; i++) in igc_free_all_rx_resources()
525 igc_free_rx_resources(adapter->rx_ring[i]); in igc_free_all_rx_resources()
529 * igc_setup_rx_resources - allocate Rx resources (Descriptors)
536 struct net_device *ndev = rx_ring->netdev; in igc_setup_rx_resources()
537 struct device *dev = rx_ring->dev; in igc_setup_rx_resources()
538 u8 index = rx_ring->queue_index; in igc_setup_rx_resources()
541 /* XDP RX-queue info */ in igc_setup_rx_resources()
542 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in igc_setup_rx_resources()
543 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
544 res = xdp_rxq_info_reg(&rx_ring->xdp_rxq, ndev, index, in igc_setup_rx_resources()
545 rx_ring->q_vector->napi.napi_id); in igc_setup_rx_resources()
552 size = sizeof(struct igc_rx_buffer) * rx_ring->count; in igc_setup_rx_resources()
553 rx_ring->rx_buffer_info = vzalloc(size); in igc_setup_rx_resources()
554 if (!rx_ring->rx_buffer_info) in igc_setup_rx_resources()
560 rx_ring->size = rx_ring->count * desc_len; in igc_setup_rx_resources()
561 rx_ring->size = ALIGN(rx_ring->size, 4096); in igc_setup_rx_resources()
563 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in igc_setup_rx_resources()
564 &rx_ring->dma, GFP_KERNEL); in igc_setup_rx_resources()
566 if (!rx_ring->desc) in igc_setup_rx_resources()
569 rx_ring->next_to_alloc = 0; in igc_setup_rx_resources()
570 rx_ring->next_to_clean = 0; in igc_setup_rx_resources()
571 rx_ring->next_to_use = 0; in igc_setup_rx_resources()
576 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in igc_setup_rx_resources()
577 vfree(rx_ring->rx_buffer_info); in igc_setup_rx_resources()
578 rx_ring->rx_buffer_info = NULL; in igc_setup_rx_resources()
580 return -ENOMEM; in igc_setup_rx_resources()
584 * igc_setup_all_rx_resources - wrapper to allocate Rx resources
592 struct net_device *dev = adapter->netdev; in igc_setup_all_rx_resources()
595 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_setup_all_rx_resources()
596 err = igc_setup_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
599 for (i--; i >= 0; i--) in igc_setup_all_rx_resources()
600 igc_free_rx_resources(adapter->rx_ring[i]); in igc_setup_all_rx_resources()
612 !test_bit(IGC_RING_FLAG_AF_XDP_ZC, &ring->flags)) in igc_get_xsk_pool()
615 return xsk_get_pool_from_qid(ring->netdev, ring->queue_index); in igc_get_xsk_pool()
619 * igc_configure_rx_ring - Configure a receive ring after Reset
628 struct igc_hw *hw = &adapter->hw; in igc_configure_rx_ring()
630 int reg_idx = ring->reg_idx; in igc_configure_rx_ring()
632 u64 rdba = ring->dma; in igc_configure_rx_ring()
635 xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); in igc_configure_rx_ring()
636 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_rx_ring()
637 if (ring->xsk_pool) { in igc_configure_rx_ring()
638 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
641 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in igc_configure_rx_ring()
643 WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, in igc_configure_rx_ring()
659 ring->count * sizeof(union igc_adv_rx_desc)); in igc_configure_rx_ring()
662 ring->tail = adapter->io_addr + IGC_RDT(reg_idx); in igc_configure_rx_ring()
664 writel(0, ring->tail); in igc_configure_rx_ring()
666 /* reset next-to- use/clean to place SW in sync with hardware */ in igc_configure_rx_ring()
667 ring->next_to_clean = 0; in igc_configure_rx_ring()
668 ring->next_to_use = 0; in igc_configure_rx_ring()
670 if (ring->xsk_pool) in igc_configure_rx_ring()
671 buf_size = xsk_pool_get_rx_frame_size(ring->xsk_pool); in igc_configure_rx_ring()
691 memset(ring->rx_buffer_info, 0, in igc_configure_rx_ring()
692 sizeof(struct igc_rx_buffer) * ring->count); in igc_configure_rx_ring()
696 rx_desc->wb.upper.length = 0; in igc_configure_rx_ring()
705 * igc_configure_rx - Configure receive Unit after Reset
717 for (i = 0; i < adapter->num_rx_queues; i++) in igc_configure_rx()
718 igc_configure_rx_ring(adapter, adapter->rx_ring[i]); in igc_configure_rx()
722 * igc_configure_tx_ring - Configure transmit ring after Reset
731 struct igc_hw *hw = &adapter->hw; in igc_configure_tx_ring()
732 int reg_idx = ring->reg_idx; in igc_configure_tx_ring()
733 u64 tdba = ring->dma; in igc_configure_tx_ring()
736 ring->xsk_pool = igc_get_xsk_pool(adapter, ring); in igc_configure_tx_ring()
743 ring->count * sizeof(union igc_adv_tx_desc)); in igc_configure_tx_ring()
748 ring->tail = adapter->io_addr + IGC_TDT(reg_idx); in igc_configure_tx_ring()
750 writel(0, ring->tail); in igc_configure_tx_ring()
761 * igc_configure_tx - Configure transmit Unit after Reset
770 for (i = 0; i < adapter->num_tx_queues; i++) in igc_configure_tx()
771 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); in igc_configure_tx()
775 * igc_setup_mrqc - configure the multiple receive queue control registers
780 struct igc_hw *hw = &adapter->hw; in igc_setup_mrqc()
789 num_rx_queues = adapter->rss_queues; in igc_setup_mrqc()
791 if (adapter->rss_indir_tbl_init != num_rx_queues) { in igc_setup_mrqc()
793 adapter->rss_indir_tbl[j] = in igc_setup_mrqc()
795 adapter->rss_indir_tbl_init = num_rx_queues; in igc_setup_mrqc()
821 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP) in igc_setup_mrqc()
823 if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP) in igc_setup_mrqc()
832 * igc_setup_rctl - configure the receive control registers
837 struct igc_hw *hw = &adapter->hw; in igc_setup_rctl()
846 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT); in igc_setup_rctl()
859 /* disable queue 0 to prevent tail write w/o re-config */ in igc_setup_rctl()
863 if (adapter->netdev->features & NETIF_F_RXALL) { in igc_setup_rctl()
879 * igc_setup_tctl - configure the transmit control registers
884 struct igc_hw *hw = &adapter->hw; in igc_setup_tctl()
903 * igc_set_mac_filter_hw() - Set MAC address filter in hardware
908 * @queue: If non-negative, queue assignment feature is enabled and frames
916 struct net_device *dev = adapter->netdev; in igc_set_mac_filter_hw()
917 struct igc_hw *hw = &adapter->hw; in igc_set_mac_filter_hw()
920 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_set_mac_filter_hw()
946 * igc_clear_mac_filter_hw() - Clear MAC address filter in hardware
952 struct net_device *dev = adapter->netdev; in igc_clear_mac_filter_hw()
953 struct igc_hw *hw = &adapter->hw; in igc_clear_mac_filter_hw()
955 if (WARN_ON(index >= hw->mac.rar_entry_count)) in igc_clear_mac_filter_hw()
967 struct net_device *dev = adapter->netdev; in igc_set_default_mac_filter()
968 u8 *addr = adapter->hw.mac.addr; in igc_set_default_mac_filter()
972 igc_set_mac_filter_hw(adapter, 0, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_set_default_mac_filter()
976 * igc_set_mac - Change the Ethernet Address of the NIC
985 struct igc_hw *hw = &adapter->hw; in igc_set_mac()
988 if (!is_valid_ether_addr(addr->sa_data)) in igc_set_mac()
989 return -EADDRNOTAVAIL; in igc_set_mac()
991 eth_hw_addr_set(netdev, addr->sa_data); in igc_set_mac()
992 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); in igc_set_mac()
1001 * igc_write_mc_addr_list - write multicast addresses to MTA
1005 * Returns: -ENOMEM on failure
1012 struct igc_hw *hw = &adapter->hw; in igc_write_mc_addr_list()
1025 return -ENOMEM; in igc_write_mc_addr_list()
1030 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); in igc_write_mc_addr_list()
1041 struct igc_adapter *adapter = netdev_priv(ring->netdev); in igc_tx_launchtime()
1042 ktime_t cycle_time = adapter->cycle_time; in igc_tx_launchtime()
1043 ktime_t base_time = adapter->base_time; in igc_tx_launchtime()
1055 if (baset_est != ring->last_ff_cycle) { in igc_tx_launchtime()
1057 ring->last_ff_cycle = baset_est; in igc_tx_launchtime()
1059 if (ktime_compare(end_of_cycle, ring->last_tx_cycle) > 0) in igc_tx_launchtime()
1070 netdev_warn(ring->netdev, "Packet with txtime=%llu may not be honoured\n", in igc_tx_launchtime()
1073 ring->last_tx_cycle = end_of_cycle; in igc_tx_launchtime()
1093 dma = dma_map_single(ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_init_empty_frame()
1094 if (dma_mapping_error(ring->dev, dma)) { in igc_init_empty_frame()
1095 netdev_err_once(ring->netdev, "Failed to map DMA for TX\n"); in igc_init_empty_frame()
1096 return -ENOMEM; in igc_init_empty_frame()
1099 buffer->skb = skb; in igc_init_empty_frame()
1100 buffer->protocol = 0; in igc_init_empty_frame()
1101 buffer->bytecount = skb->len; in igc_init_empty_frame()
1102 buffer->gso_segs = 1; in igc_init_empty_frame()
1103 buffer->time_stamp = jiffies; in igc_init_empty_frame()
1104 dma_unmap_len_set(buffer, len, skb->len); in igc_init_empty_frame()
1119 return -EBUSY; in igc_init_tx_empty_descriptor()
1127 first->bytecount; in igc_init_tx_empty_descriptor()
1128 olinfo_status = first->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_init_tx_empty_descriptor()
1130 desc = IGC_TX_DESC(ring, ring->next_to_use); in igc_init_tx_empty_descriptor()
1131 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_init_tx_empty_descriptor()
1132 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_init_tx_empty_descriptor()
1133 desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(first, dma)); in igc_init_tx_empty_descriptor()
1135 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor()
1137 first->next_to_watch = desc; in igc_init_tx_empty_descriptor()
1139 ring->next_to_use++; in igc_init_tx_empty_descriptor()
1140 if (ring->next_to_use == ring->count) in igc_init_tx_empty_descriptor()
1141 ring->next_to_use = 0; in igc_init_tx_empty_descriptor()
1154 u16 i = tx_ring->next_to_use; in igc_tx_ctxtdesc()
1159 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igc_tx_ctxtdesc()
1165 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igc_tx_ctxtdesc()
1166 mss_l4len_idx |= tx_ring->reg_idx << 4; in igc_tx_ctxtdesc()
1171 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in igc_tx_ctxtdesc()
1172 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in igc_tx_ctxtdesc()
1173 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in igc_tx_ctxtdesc()
1174 context_desc->launch_time = launch_time; in igc_tx_ctxtdesc()
1180 struct sk_buff *skb = first->skb; in igc_tx_csum()
1184 if (skb->ip_summed != CHECKSUM_PARTIAL) { in igc_tx_csum()
1186 if (!(first->tx_flags & IGC_TX_FLAGS_VLAN) && in igc_tx_csum()
1187 !tx_ring->launchtime_enable) in igc_tx_csum()
1192 switch (skb->csum_offset) { in igc_tx_csum()
1211 first->tx_flags |= IGC_TX_FLAGS_CSUM; in igc_tx_csum()
1212 vlan_macip_lens = skb_checksum_start_offset(skb) - in igc_tx_csum()
1216 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tx_csum()
1224 struct net_device *netdev = tx_ring->netdev; in __igc_maybe_stop_tx()
1226 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1235 return -EBUSY; in __igc_maybe_stop_tx()
1238 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1240 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1241 tx_ring->tx_stats.restart_queue2++; in __igc_maybe_stop_tx()
1242 u64_stats_update_end(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1290 cmd_type ^= IGC_SET_FLAG(skb->no_fcs, 1, IGC_ADVTXD_DCMD_IFCS); in igc_tx_cmd_type()
1313 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_tx_olinfo_status()
1320 struct sk_buff *skb = first->skb; in igc_tx_map()
1323 u32 tx_flags = first->tx_flags; in igc_tx_map()
1325 u16 i = tx_ring->next_to_use; in igc_tx_map()
1333 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igc_tx_map()
1336 data_len = skb->data_len; in igc_tx_map()
1338 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_tx_map()
1342 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in igc_tx_map()
1343 if (dma_mapping_error(tx_ring->dev, dma)) in igc_tx_map()
1350 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1353 tx_desc->read.cmd_type_len = in igc_tx_map()
1358 if (i == tx_ring->count) { in igc_tx_map()
1362 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1365 size -= IGC_MAX_DATA_PER_TXD; in igc_tx_map()
1367 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_tx_map()
1373 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size); in igc_tx_map()
1377 if (i == tx_ring->count) { in igc_tx_map()
1381 tx_desc->read.olinfo_status = 0; in igc_tx_map()
1384 data_len -= size; in igc_tx_map()
1386 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igc_tx_map()
1389 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1394 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_tx_map()
1396 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map()
1399 first->time_stamp = jiffies; in igc_tx_map()
1404 * are new descriptors to fetch. (Only applicable for weak-ordered in igc_tx_map()
1405 * memory model archs, such as IA-64). in igc_tx_map()
1413 first->next_to_watch = tx_desc; in igc_tx_map()
1416 if (i == tx_ring->count) in igc_tx_map()
1419 tx_ring->next_to_use = i; in igc_tx_map()
1425 writel(i, tx_ring->tail); in igc_tx_map()
1430 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); in igc_tx_map()
1431 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1436 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1438 if (i-- == 0) in igc_tx_map()
1439 i += tx_ring->count; in igc_tx_map()
1440 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1444 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1446 dev_kfree_skb_any(tx_buffer->skb); in igc_tx_map()
1447 tx_buffer->skb = NULL; in igc_tx_map()
1449 tx_ring->next_to_use = i; in igc_tx_map()
1451 return -1; in igc_tx_map()
1460 struct sk_buff *skb = first->skb; in igc_tso()
1474 if (skb->ip_summed != CHECKSUM_PARTIAL) in igc_tso()
1491 if (ip.v4->version == 4) { in igc_tso()
1493 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in igc_tso()
1498 ip.v4->check = csum_fold(csum_partial(trans_start, in igc_tso()
1499 csum_start - trans_start, in igc_tso()
1503 ip.v4->tot_len = 0; in igc_tso()
1504 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1508 ip.v6->payload_len = 0; in igc_tso()
1509 first->tx_flags |= IGC_TX_FLAGS_TSO | in igc_tso()
1514 l4_offset = l4.hdr - skb->data; in igc_tso()
1517 paylen = skb->len - l4_offset; in igc_tso()
1520 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in igc_tso()
1521 csum_replace_by_diff(&l4.tcp->check, in igc_tso()
1526 csum_replace_by_diff(&l4.udp->check, in igc_tso()
1531 first->gso_segs = skb_shinfo(skb)->gso_segs; in igc_tso()
1532 first->bytecount += (first->gso_segs - 1) * *hdr_len; in igc_tso()
1534 /* MSS L4LEN IDX */ in igc_tso()
1535 mss_l4len_idx = (*hdr_len - l4_offset) << IGC_ADVTXD_L4LEN_SHIFT; in igc_tso()
1536 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IGC_ADVTXD_MSS_SHIFT; in igc_tso()
1539 vlan_macip_lens = l4.hdr - ip.hdr; in igc_tso()
1540 vlan_macip_lens |= (ip.hdr - skb->data) << IGC_ADVTXD_MACLEN_SHIFT; in igc_tso()
1541 vlan_macip_lens |= first->tx_flags & IGC_TX_FLAGS_VLAN_MASK; in igc_tso()
1554 struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i]; in igc_request_tx_tstamp()
1556 if (tstamp->skb) in igc_request_tx_tstamp()
1559 tstamp->skb = skb_get(skb); in igc_request_tx_tstamp()
1560 tstamp->start = jiffies; in igc_request_tx_tstamp()
1561 *flags = tstamp->flags; in igc_request_tx_tstamp()
1572 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_xmit_frame_ring()
1590 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) in igc_xmit_frame_ring()
1592 &skb_shinfo(skb)->frags[f])); in igc_xmit_frame_ring()
1599 if (!tx_ring->launchtime_enable) in igc_xmit_frame_ring()
1602 txtime = skb->tstamp; in igc_xmit_frame_ring()
1603 skb->tstamp = ktime_set(0, 0); in igc_xmit_frame_ring()
1611 empty_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1629 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1630 first->type = IGC_TX_BUFFER_TYPE_SKB; in igc_xmit_frame_ring()
1631 first->skb = skb; in igc_xmit_frame_ring()
1632 first->bytecount = skb->len; in igc_xmit_frame_ring()
1633 first->gso_segs = 1; in igc_xmit_frame_ring()
1635 if (adapter->qbv_transition || tx_ring->oper_gate_closed) in igc_xmit_frame_ring()
1638 if (tx_ring->max_sdu > 0 && first->bytecount > tx_ring->max_sdu) { in igc_xmit_frame_ring()
1639 adapter->stats.txdrop++; in igc_xmit_frame_ring()
1643 if (unlikely(test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags) && in igc_xmit_frame_ring()
1644 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { in igc_xmit_frame_ring()
1652 spin_lock_irqsave(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1654 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in igc_xmit_frame_ring()
1656 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP_USE_CYCLES) in igc_xmit_frame_ring()
1659 adapter->tx_hwtstamp_skipped++; in igc_xmit_frame_ring()
1662 spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags); in igc_xmit_frame_ring()
1671 first->tx_flags = tx_flags; in igc_xmit_frame_ring()
1672 first->protocol = protocol; in igc_xmit_frame_ring()
1685 dev_kfree_skb_any(first->skb); in igc_xmit_frame_ring()
1686 first->skb = NULL; in igc_xmit_frame_ring()
1694 unsigned int r_idx = skb->queue_mapping; in igc_tx_queue_mapping()
1696 if (r_idx >= adapter->num_tx_queues) in igc_tx_queue_mapping()
1697 r_idx = r_idx % adapter->num_tx_queues; in igc_tx_queue_mapping()
1699 return adapter->tx_ring[r_idx]; in igc_tx_queue_mapping()
1710 if (skb->len < 17) { in igc_xmit_frame()
1713 skb->len = 17; in igc_xmit_frame()
1730 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in igc_rx_checksum()
1741 if (!(skb->len == 60 && in igc_rx_checksum()
1742 test_bit(IGC_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) { in igc_rx_checksum()
1743 u64_stats_update_begin(&ring->rx_syncp); in igc_rx_checksum()
1744 ring->rx_stats.csum_err++; in igc_rx_checksum()
1745 u64_stats_update_end(&ring->rx_syncp); in igc_rx_checksum()
1753 skb->ip_summed = CHECKSUM_UNNECESSARY; in igc_rx_checksum()
1755 netdev_dbg(ring->netdev, "cksum success: bits %08X\n", in igc_rx_checksum()
1756 le32_to_cpu(rx_desc->wb.upper.status_error)); in igc_rx_checksum()
1772 [11] = PKT_HASH_TYPE_NONE, /* keep array sized for SW bit-mask */
1783 if (ring->netdev->features & NETIF_F_RXHASH) { in igc_rx_hash()
1784 u32 rss_hash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); in igc_rx_hash()
1795 struct net_device *dev = rx_ring->netdev; in igc_rx_vlan()
1798 if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && in igc_rx_vlan()
1801 test_bit(IGC_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags)) in igc_rx_vlan()
1802 vid = be16_to_cpu((__force __be16)rx_desc->wb.upper.vlan); in igc_rx_vlan()
1804 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in igc_rx_vlan()
1811 * igc_process_skb_fields - Populate skb header fields from Rx descriptor
1830 skb_record_rx_queue(skb, rx_ring->queue_index); in igc_process_skb_fields()
1832 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in igc_process_skb_fields()
1839 struct igc_hw *hw = &adapter->hw; in igc_vlan_mode()
1856 igc_vlan_mode(adapter->netdev, adapter->netdev->features); in igc_restore_vlan()
1865 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in igc_get_rx_buffer()
1868 page_count(rx_buffer->page); in igc_get_rx_buffer()
1872 prefetchw(rx_buffer->page); in igc_get_rx_buffer()
1875 dma_sync_single_range_for_cpu(rx_ring->dev, in igc_get_rx_buffer()
1876 rx_buffer->dma, in igc_get_rx_buffer()
1877 rx_buffer->page_offset, in igc_get_rx_buffer()
1881 rx_buffer->pagecnt_bias--; in igc_get_rx_buffer()
1890 buffer->page_offset ^= truesize; in igc_rx_buffer_flip()
1892 buffer->page_offset += truesize; in igc_rx_buffer_flip()
1913 * igc_add_rx_frag - Add contents of Rx buffer to sk_buff
1919 * This function will add the data contained in rx_buffer->page to the skb.
1935 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in igc_add_rx_frag()
1936 rx_buffer->page_offset, size, truesize); in igc_add_rx_frag()
1945 unsigned int size = xdp->data_end - xdp->data; in igc_build_skb()
1947 unsigned int metasize = xdp->data - xdp->data_meta; in igc_build_skb()
1951 net_prefetch(xdp->data_meta); in igc_build_skb()
1954 skb = napi_build_skb(xdp->data_hard_start, truesize); in igc_build_skb()
1959 skb_reserve(skb, xdp->data - xdp->data_hard_start); in igc_build_skb()
1972 struct xdp_buff *xdp = &ctx->xdp; in igc_construct_skb()
1973 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb()
1974 unsigned int size = xdp->data_end - xdp->data; in igc_construct_skb()
1976 void *va = xdp->data; in igc_construct_skb()
1981 net_prefetch(xdp->data_meta); in igc_construct_skb()
1984 skb = napi_alloc_skb(&rx_ring->q_vector->napi, in igc_construct_skb()
1989 if (ctx->rx_ts) { in igc_construct_skb()
1990 skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV; in igc_construct_skb()
1991 skb_hwtstamps(skb)->netdev_data = ctx->rx_ts; in igc_construct_skb()
1997 headlen = eth_get_headlen(skb->dev, va, IGC_RX_HDR_LEN); in igc_construct_skb()
2000 memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta, in igc_construct_skb()
2009 size -= headlen; in igc_construct_skb()
2011 skb_add_rx_frag(skb, 0, rx_buffer->page, in igc_construct_skb()
2012 (va + headlen) - page_address(rx_buffer->page), in igc_construct_skb()
2016 rx_buffer->pagecnt_bias++; in igc_construct_skb()
2023 * igc_reuse_rx_page - page flip buffer and store it back on the ring
2032 u16 nta = rx_ring->next_to_alloc; in igc_reuse_rx_page()
2035 new_buff = &rx_ring->rx_buffer_info[nta]; in igc_reuse_rx_page()
2039 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in igc_reuse_rx_page()
2045 new_buff->dma = old_buff->dma; in igc_reuse_rx_page()
2046 new_buff->page = old_buff->page; in igc_reuse_rx_page()
2047 new_buff->page_offset = old_buff->page_offset; in igc_reuse_rx_page()
2048 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in igc_reuse_rx_page()
2054 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in igc_can_reuse_rx_page()
2055 struct page *page = rx_buffer->page; in igc_can_reuse_rx_page()
2057 /* avoid re-using remote and pfmemalloc pages */ in igc_can_reuse_rx_page()
2063 if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) in igc_can_reuse_rx_page()
2067 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGC_RXBUFFER_2048) in igc_can_reuse_rx_page()
2069 if (rx_buffer->page_offset > IGC_LAST_OFFSET) in igc_can_reuse_rx_page()
2078 page_ref_add(page, USHRT_MAX - 1); in igc_can_reuse_rx_page()
2079 rx_buffer->pagecnt_bias = USHRT_MAX; in igc_can_reuse_rx_page()
2086 * igc_is_non_eop - process handling of non-EOP buffers
2093 * that this is in fact a non-EOP buffer.
2098 u32 ntc = rx_ring->next_to_clean + 1; in igc_is_non_eop()
2101 ntc = (ntc < rx_ring->count) ? ntc : 0; in igc_is_non_eop()
2102 rx_ring->next_to_clean = ntc; in igc_is_non_eop()
2113 * igc_cleanup_headers - Correct corrupted or empty headers
2135 struct net_device *netdev = rx_ring->netdev; in igc_cleanup_headers()
2137 if (!(netdev->features & NETIF_F_RXALL)) { in igc_cleanup_headers()
2161 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in igc_put_rx_buffer()
2164 __page_frag_cache_drain(rx_buffer->page, in igc_put_rx_buffer()
2165 rx_buffer->pagecnt_bias); in igc_put_rx_buffer()
2169 rx_buffer->page = NULL; in igc_put_rx_buffer()
2174 struct igc_adapter *adapter = rx_ring->q_vector->adapter; in igc_rx_offset()
2187 struct page *page = bi->page; in igc_alloc_mapped_page()
2197 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2202 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in igc_alloc_mapped_page()
2210 if (dma_mapping_error(rx_ring->dev, dma)) { in igc_alloc_mapped_page()
2213 rx_ring->rx_stats.alloc_failed++; in igc_alloc_mapped_page()
2217 bi->dma = dma; in igc_alloc_mapped_page()
2218 bi->page = page; in igc_alloc_mapped_page()
2219 bi->page_offset = igc_rx_offset(rx_ring); in igc_alloc_mapped_page()
2220 page_ref_add(page, USHRT_MAX - 1); in igc_alloc_mapped_page()
2221 bi->pagecnt_bias = USHRT_MAX; in igc_alloc_mapped_page()
2227 * igc_alloc_rx_buffers - Replace used receive buffers; packet split
2234 u16 i = rx_ring->next_to_use; in igc_alloc_rx_buffers()
2243 bi = &rx_ring->rx_buffer_info[i]; in igc_alloc_rx_buffers()
2244 i -= rx_ring->count; in igc_alloc_rx_buffers()
2253 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in igc_alloc_rx_buffers()
2254 bi->page_offset, bufsz, in igc_alloc_rx_buffers()
2258 * because each write-back erases this info. in igc_alloc_rx_buffers()
2260 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in igc_alloc_rx_buffers()
2267 bi = rx_ring->rx_buffer_info; in igc_alloc_rx_buffers()
2268 i -= rx_ring->count; in igc_alloc_rx_buffers()
2272 rx_desc->wb.upper.length = 0; in igc_alloc_rx_buffers()
2274 cleaned_count--; in igc_alloc_rx_buffers()
2277 i += rx_ring->count; in igc_alloc_rx_buffers()
2279 if (rx_ring->next_to_use != i) { in igc_alloc_rx_buffers()
2281 rx_ring->next_to_use = i; in igc_alloc_rx_buffers()
2284 rx_ring->next_to_alloc = i; in igc_alloc_rx_buffers()
2288 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers()
2289 * such as IA-64). in igc_alloc_rx_buffers()
2292 writel(i, rx_ring->tail); in igc_alloc_rx_buffers()
2299 u16 i = ring->next_to_use; in igc_alloc_rx_buffers_zc()
2310 bi = &ring->rx_buffer_info[i]; in igc_alloc_rx_buffers_zc()
2311 i -= ring->count; in igc_alloc_rx_buffers_zc()
2314 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc()
2315 if (!bi->xdp) { in igc_alloc_rx_buffers_zc()
2320 dma = xsk_buff_xdp_get_dma(bi->xdp); in igc_alloc_rx_buffers_zc()
2321 desc->read.pkt_addr = cpu_to_le64(dma); in igc_alloc_rx_buffers_zc()
2328 bi = ring->rx_buffer_info; in igc_alloc_rx_buffers_zc()
2329 i -= ring->count; in igc_alloc_rx_buffers_zc()
2333 desc->wb.upper.length = 0; in igc_alloc_rx_buffers_zc()
2335 count--; in igc_alloc_rx_buffers_zc()
2338 i += ring->count; in igc_alloc_rx_buffers_zc()
2340 if (ring->next_to_use != i) { in igc_alloc_rx_buffers_zc()
2341 ring->next_to_use = i; in igc_alloc_rx_buffers_zc()
2345 * applicable for weak-ordered memory model archs, in igc_alloc_rx_buffers_zc()
2346 * such as IA-64). in igc_alloc_rx_buffers_zc()
2349 writel(i, ring->tail); in igc_alloc_rx_buffers_zc()
2360 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in igc_xdp_init_tx_descriptor()
2361 u16 count, index = ring->next_to_use; in igc_xdp_init_tx_descriptor()
2362 struct igc_tx_buffer *head = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2365 u32 olinfo_status, len = xdpf->len, cmd_type; in igc_xdp_init_tx_descriptor()
2366 void *data = xdpf->data; in igc_xdp_init_tx_descriptor()
2371 count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i])); in igc_xdp_init_tx_descriptor()
2375 return -EBUSY; in igc_xdp_init_tx_descriptor()
2379 head->bytecount = xdp_get_frame_len(xdpf); in igc_xdp_init_tx_descriptor()
2380 head->type = IGC_TX_BUFFER_TYPE_XDP; in igc_xdp_init_tx_descriptor()
2381 head->gso_segs = 1; in igc_xdp_init_tx_descriptor()
2382 head->xdpf = xdpf; in igc_xdp_init_tx_descriptor()
2384 olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT; in igc_xdp_init_tx_descriptor()
2385 desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_init_tx_descriptor()
2390 dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE); in igc_xdp_init_tx_descriptor()
2391 if (dma_mapping_error(ring->dev, dma)) { in igc_xdp_init_tx_descriptor()
2392 netdev_err_once(ring->netdev, in igc_xdp_init_tx_descriptor()
2403 desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_xdp_init_tx_descriptor()
2404 desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_init_tx_descriptor()
2406 buffer->protocol = 0; in igc_xdp_init_tx_descriptor()
2408 if (++index == ring->count) in igc_xdp_init_tx_descriptor()
2414 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2416 desc->read.olinfo_status = 0; in igc_xdp_init_tx_descriptor()
2418 data = skb_frag_address(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2419 len = skb_frag_size(&sinfo->frags[i]); in igc_xdp_init_tx_descriptor()
2422 desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD); in igc_xdp_init_tx_descriptor()
2424 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor()
2426 head->time_stamp = jiffies; in igc_xdp_init_tx_descriptor()
2428 head->next_to_watch = desc; in igc_xdp_init_tx_descriptor()
2429 ring->next_to_use = index; in igc_xdp_init_tx_descriptor()
2435 buffer = &ring->tx_buffer_info[index]; in igc_xdp_init_tx_descriptor()
2437 dma_unmap_page(ring->dev, in igc_xdp_init_tx_descriptor()
2446 index += ring->count; in igc_xdp_init_tx_descriptor()
2447 index--; in igc_xdp_init_tx_descriptor()
2450 return -ENOMEM; in igc_xdp_init_tx_descriptor()
2461 while (index >= adapter->num_tx_queues) in igc_xdp_get_tx_ring()
2462 index -= adapter->num_tx_queues; in igc_xdp_get_tx_ring()
2464 return adapter->tx_ring[index]; in igc_xdp_get_tx_ring()
2476 return -EFAULT; in igc_xdp_xmit_back()
2504 if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) in __igc_xdp_run_prog()
2509 bpf_warn_invalid_xdp_action(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2513 trace_xdp_exception(adapter->netdev, prog, act); in __igc_xdp_run_prog()
2526 prog = READ_ONCE(adapter->xdp_prog); in igc_xdp_run_prog()
2535 return ERR_PTR(-res); in igc_xdp_run_prog()
2546 writel(ring->next_to_use, ring->tail); in igc_flush_tx_descriptors()
2571 struct igc_ring *ring = q_vector->rx.ring; in igc_update_rx_stats()
2573 u64_stats_update_begin(&ring->rx_syncp); in igc_update_rx_stats()
2574 ring->rx_stats.packets += packets; in igc_update_rx_stats()
2575 ring->rx_stats.bytes += bytes; in igc_update_rx_stats()
2576 u64_stats_update_end(&ring->rx_syncp); in igc_update_rx_stats()
2578 q_vector->rx.total_packets += packets; in igc_update_rx_stats()
2579 q_vector->rx.total_bytes += bytes; in igc_update_rx_stats()
2585 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq()
2586 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_clean_rx_irq()
2587 struct sk_buff *skb = rx_ring->skb; in igc_clean_rx_irq()
2605 rx_desc = IGC_RX_DESC(rx_ring, rx_ring->next_to_clean); in igc_clean_rx_irq()
2606 size = le16_to_cpu(rx_desc->wb.upper.length); in igc_clean_rx_irq()
2619 pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; in igc_clean_rx_irq()
2624 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq()
2628 xdp_init_buff(&ctx.xdp, truesize, &rx_ring->xdp_rxq); in igc_clean_rx_irq()
2629 xdp_prepare_buff(&ctx.xdp, pktbuf - igc_rx_offset(rx_ring), in igc_clean_rx_irq()
2639 unsigned int xdp_res = -PTR_ERR(skb); in igc_clean_rx_irq()
2643 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2663 rx_ring->rx_stats.alloc_failed++; in igc_clean_rx_irq()
2664 rx_buffer->pagecnt_bias++; in igc_clean_rx_irq()
2671 /* fetch next buffer in frame if non-eop */ in igc_clean_rx_irq()
2682 total_bytes += skb->len; in igc_clean_rx_irq()
2687 napi_gro_receive(&q_vector->napi, skb); in igc_clean_rx_irq()
2700 rx_ring->skb = skb; in igc_clean_rx_irq()
2713 unsigned int totalsize = xdp->data_end - xdp->data_meta; in igc_construct_skb_zc()
2714 unsigned int metasize = xdp->data - xdp->data_meta; in igc_construct_skb_zc()
2717 net_prefetch(xdp->data_meta); in igc_construct_skb_zc()
2719 skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, in igc_construct_skb_zc()
2724 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in igc_construct_skb_zc()
2740 struct igc_ring *ring = q_vector->rx.ring; in igc_dispatch_skb_zc()
2745 ring->rx_stats.alloc_failed++; in igc_dispatch_skb_zc()
2750 skb_hwtstamps(skb)->hwtstamp = timestamp; in igc_dispatch_skb_zc()
2756 napi_gro_receive(&q_vector->napi, skb); in igc_dispatch_skb_zc()
2763 * igc_xdp_buff fields fall into xdp_buff_xsk->cb in xsk_buff_to_igc_ctx()
2770 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_rx_irq_zc()
2771 struct igc_ring *ring = q_vector->rx.ring; in igc_clean_rx_irq_zc()
2774 u16 ntc = ring->next_to_clean; in igc_clean_rx_irq_zc()
2781 prog = READ_ONCE(adapter->xdp_prog); in igc_clean_rx_irq_zc()
2792 size = le16_to_cpu(desc->wb.upper.length); in igc_clean_rx_irq_zc()
2802 bi = &ring->rx_buffer_info[ntc]; in igc_clean_rx_irq_zc()
2804 ctx = xsk_buff_to_igc_ctx(bi->xdp); in igc_clean_rx_irq_zc()
2805 ctx->rx_desc = desc; in igc_clean_rx_irq_zc()
2808 ctx->rx_ts = bi->xdp->data; in igc_clean_rx_irq_zc()
2810 bi->xdp->data += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2815 bi->xdp->data_meta += IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2816 size -= IGC_TS_HDR_LEN; in igc_clean_rx_irq_zc()
2819 bi->xdp->data_end = bi->xdp->data + size; in igc_clean_rx_irq_zc()
2820 xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); in igc_clean_rx_irq_zc()
2822 res = __igc_xdp_run_prog(adapter, prog, bi->xdp); in igc_clean_rx_irq_zc()
2825 igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); in igc_clean_rx_irq_zc()
2828 xsk_buff_free(bi->xdp); in igc_clean_rx_irq_zc()
2836 bi->xdp = NULL; in igc_clean_rx_irq_zc()
2841 if (ntc == ring->count) in igc_clean_rx_irq_zc()
2845 ring->next_to_clean = ntc; in igc_clean_rx_irq_zc()
2856 if (xsk_uses_need_wakeup(ring->xsk_pool)) { in igc_clean_rx_irq_zc()
2857 if (failure || ring->next_to_clean == ring->next_to_use) in igc_clean_rx_irq_zc()
2858 xsk_set_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2860 xsk_clear_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc()
2870 struct igc_ring *ring = q_vector->tx.ring; in igc_update_tx_stats()
2872 u64_stats_update_begin(&ring->tx_syncp); in igc_update_tx_stats()
2873 ring->tx_stats.bytes += bytes; in igc_update_tx_stats()
2874 ring->tx_stats.packets += packets; in igc_update_tx_stats()
2875 u64_stats_update_end(&ring->tx_syncp); in igc_update_tx_stats()
2877 q_vector->tx.total_bytes += bytes; in igc_update_tx_stats()
2878 q_vector->tx.total_packets += packets; in igc_update_tx_stats()
2883 struct xsk_buff_pool *pool = ring->xsk_pool; in igc_xdp_xmit_zc()
2890 if (!netif_carrier_ok(ring->netdev)) in igc_xdp_xmit_zc()
2898 ntu = ring->next_to_use; in igc_xdp_xmit_zc()
2901 while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { in igc_xdp_xmit_zc()
2915 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in igc_xdp_xmit_zc()
2916 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); in igc_xdp_xmit_zc()
2917 tx_desc->read.buffer_addr = cpu_to_le64(dma); in igc_xdp_xmit_zc()
2919 bi = &ring->tx_buffer_info[ntu]; in igc_xdp_xmit_zc()
2920 bi->type = IGC_TX_BUFFER_TYPE_XSK; in igc_xdp_xmit_zc()
2921 bi->protocol = 0; in igc_xdp_xmit_zc()
2922 bi->bytecount = xdp_desc.len; in igc_xdp_xmit_zc()
2923 bi->gso_segs = 1; in igc_xdp_xmit_zc()
2924 bi->time_stamp = jiffies; in igc_xdp_xmit_zc()
2925 bi->next_to_watch = tx_desc; in igc_xdp_xmit_zc()
2930 if (ntu == ring->count) in igc_xdp_xmit_zc()
2934 ring->next_to_use = ntu; in igc_xdp_xmit_zc()
2944 * igc_clean_tx_irq - Reclaim resources after transmit completes
2952 struct igc_adapter *adapter = q_vector->adapter; in igc_clean_tx_irq()
2954 unsigned int budget = q_vector->tx.work_limit; in igc_clean_tx_irq()
2955 struct igc_ring *tx_ring = q_vector->tx.ring; in igc_clean_tx_irq()
2956 unsigned int i = tx_ring->next_to_clean; in igc_clean_tx_irq()
2961 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_clean_tx_irq()
2964 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_irq()
2966 i -= tx_ring->count; in igc_clean_tx_irq()
2969 union igc_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in igc_clean_tx_irq()
2979 if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD))) in igc_clean_tx_irq()
2983 tx_buffer->next_to_watch = NULL; in igc_clean_tx_irq()
2986 total_bytes += tx_buffer->bytecount; in igc_clean_tx_irq()
2987 total_packets += tx_buffer->gso_segs; in igc_clean_tx_irq()
2989 switch (tx_buffer->type) { in igc_clean_tx_irq()
2994 xdp_return_frame(tx_buffer->xdpf); in igc_clean_tx_irq()
2995 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
2998 napi_consume_skb(tx_buffer->skb, napi_budget); in igc_clean_tx_irq()
2999 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3002 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_irq()
3012 i -= tx_ring->count; in igc_clean_tx_irq()
3013 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3019 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
3027 i -= tx_ring->count; in igc_clean_tx_irq()
3028 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
3036 budget--; in igc_clean_tx_irq()
3042 i += tx_ring->count; in igc_clean_tx_irq()
3043 tx_ring->next_to_clean = i; in igc_clean_tx_irq()
3047 if (tx_ring->xsk_pool) { in igc_clean_tx_irq()
3049 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_irq()
3050 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in igc_clean_tx_irq()
3051 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in igc_clean_tx_irq()
3055 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igc_clean_tx_irq()
3056 struct igc_hw *hw = &adapter->hw; in igc_clean_tx_irq()
3061 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_clean_tx_irq()
3062 if (tx_buffer->next_to_watch && in igc_clean_tx_irq()
3063 time_after(jiffies, tx_buffer->time_stamp + in igc_clean_tx_irq()
3064 (adapter->tx_timeout_factor * HZ)) && in igc_clean_tx_irq()
3066 (rd32(IGC_TDH(tx_ring->reg_idx)) != readl(tx_ring->tail)) && in igc_clean_tx_irq()
3067 !tx_ring->oper_gate_closed) { in igc_clean_tx_irq()
3069 netdev_err(tx_ring->netdev, in igc_clean_tx_irq()
3081 tx_ring->queue_index, in igc_clean_tx_irq()
3082 rd32(IGC_TDH(tx_ring->reg_idx)), in igc_clean_tx_irq()
3083 readl(tx_ring->tail), in igc_clean_tx_irq()
3084 tx_ring->next_to_use, in igc_clean_tx_irq()
3085 tx_ring->next_to_clean, in igc_clean_tx_irq()
3086 tx_buffer->time_stamp, in igc_clean_tx_irq()
3087 tx_buffer->next_to_watch, in igc_clean_tx_irq()
3089 tx_buffer->next_to_watch->wb.status); in igc_clean_tx_irq()
3090 netif_stop_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3091 tx_ring->queue_index); in igc_clean_tx_irq()
3100 netif_carrier_ok(tx_ring->netdev) && in igc_clean_tx_irq()
3106 if (__netif_subqueue_stopped(tx_ring->netdev, in igc_clean_tx_irq()
3107 tx_ring->queue_index) && in igc_clean_tx_irq()
3108 !(test_bit(__IGC_DOWN, &adapter->state))) { in igc_clean_tx_irq()
3109 netif_wake_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
3110 tx_ring->queue_index); in igc_clean_tx_irq()
3112 u64_stats_update_begin(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3113 tx_ring->tx_stats.restart_queue++; in igc_clean_tx_irq()
3114 u64_stats_update_end(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3124 struct igc_hw *hw = &adapter->hw; in igc_find_mac_filter()
3125 int max_entries = hw->mac.rar_entry_count; in igc_find_mac_filter()
3146 return -1; in igc_find_mac_filter()
3151 struct igc_hw *hw = &adapter->hw; in igc_get_avail_mac_filter_slot()
3152 int max_entries = hw->mac.rar_entry_count; in igc_get_avail_mac_filter_slot()
3163 return -1; in igc_get_avail_mac_filter_slot()
3167 * igc_add_mac_filter() - Add MAC address filter
3171 * @queue: If non-negative, queue assignment feature is enabled and frames
3181 struct net_device *dev = adapter->netdev; in igc_add_mac_filter()
3190 return -ENOSPC; in igc_add_mac_filter()
3202 * igc_del_mac_filter() - Delete MAC address filter
3210 struct net_device *dev = adapter->netdev; in igc_del_mac_filter()
3224 igc_set_mac_filter_hw(adapter, 0, type, addr, -1); in igc_del_mac_filter()
3236 * igc_add_vlan_prio_filter() - Add VLAN priority filter
3246 struct net_device *dev = adapter->netdev; in igc_add_vlan_prio_filter()
3247 struct igc_hw *hw = &adapter->hw; in igc_add_vlan_prio_filter()
3254 return -EEXIST; in igc_add_vlan_prio_filter()
3268 * igc_del_vlan_prio_filter() - Delete VLAN priority filter
3274 struct igc_hw *hw = &adapter->hw; in igc_del_vlan_prio_filter()
3284 netdev_dbg(adapter->netdev, "Delete VLAN priority filter: prio %d\n", in igc_del_vlan_prio_filter()
3290 struct igc_hw *hw = &adapter->hw; in igc_get_avail_etype_filter_slot()
3300 return -1; in igc_get_avail_etype_filter_slot()
3304 * igc_add_etype_filter() - Add ethertype filter
3307 * @queue: If non-negative, queue assignment feature is enabled and frames
3316 struct igc_hw *hw = &adapter->hw; in igc_add_etype_filter()
3322 return -ENOSPC; in igc_add_etype_filter()
3339 netdev_dbg(adapter->netdev, "Add ethertype filter: etype %04x queue %d\n", in igc_add_etype_filter()
3346 struct igc_hw *hw = &adapter->hw; in igc_find_etype_filter()
3356 return -1; in igc_find_etype_filter()
3360 * igc_del_etype_filter() - Delete ethertype filter
3366 struct igc_hw *hw = &adapter->hw; in igc_del_etype_filter()
3375 netdev_dbg(adapter->netdev, "Delete ethertype filter: etype %04x\n", in igc_del_etype_filter()
3383 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_select()
3387 if (input->index >= MAX_FLEX_FILTER) { in igc_flex_filter_select()
3388 dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); in igc_flex_filter_select()
3389 return -EINVAL; in igc_flex_filter_select()
3395 switch (input->index) { in igc_flex_filter_select()
3412 fhft_index = input->index % 8; in igc_flex_filter_select()
3415 IGC_FHFT_EXT(fhft_index - 4); in igc_flex_filter_select()
3423 struct device *dev = &adapter->pdev->dev; in igc_write_flex_filter_ll()
3424 struct igc_hw *hw = &adapter->hw; in igc_write_flex_filter_ll()
3425 u8 *data = input->data; in igc_write_flex_filter_ll()
3426 u8 *mask = input->mask; in igc_write_flex_filter_ll()
3436 if (input->length % 8 != 0) { in igc_write_flex_filter_ll()
3438 return -EINVAL; in igc_write_flex_filter_ll()
3454 queuing = input->length & IGC_FHFT_LENGTH_MASK; in igc_write_flex_filter_ll()
3455 queuing |= FIELD_PREP(IGC_FHFT_QUEUE_MASK, input->rx_queue); in igc_write_flex_filter_ll()
3456 queuing |= FIELD_PREP(IGC_FHFT_PRIO_MASK, input->prio); in igc_write_flex_filter_ll()
3458 if (input->immediate_irq) in igc_write_flex_filter_ll()
3461 if (input->drop) in igc_write_flex_filter_ll()
3495 if (input->index > 8) { in igc_write_flex_filter_ll()
3496 /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ in igc_write_flex_filter_ll()
3499 wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); in igc_write_flex_filter_ll()
3503 wufc |= (IGC_WUFC_FLX0 << input->index); in igc_write_flex_filter_ll()
3507 dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", in igc_write_flex_filter_ll()
3508 input->index); in igc_write_flex_filter_ll()
3520 memcpy(&flex->data[offset], src, len); in igc_flex_filter_add_field()
3529 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3534 flex->mask[idx / 8] |= BIT(idx % 8); in igc_flex_filter_add_field()
3540 struct igc_hw *hw = &adapter->hw; in igc_find_avail_flex_filter_slot()
3552 if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) in igc_find_avail_flex_filter_slot()
3557 return -ENOSPC; in igc_find_avail_flex_filter_slot()
3562 struct igc_hw *hw = &adapter->hw; in igc_flex_filter_in_use()
3581 struct igc_nfc_filter *filter = &rule->filter; in igc_add_flex_filter()
3588 return -ENOSPC; in igc_add_flex_filter()
3591 * -> dest_mac [6] in igc_add_flex_filter()
3592 * -> src_mac [6] in igc_add_flex_filter()
3593 * -> tpid [2] in igc_add_flex_filter()
3594 * -> vlan tci [2] in igc_add_flex_filter()
3595 * -> ether type [2] in igc_add_flex_filter()
3596 * -> user data [8] in igc_add_flex_filter()
3597 * -> = 26 bytes => 32 length in igc_add_flex_filter()
3601 flex.rx_queue = rule->action; in igc_add_flex_filter()
3603 vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; in igc_add_flex_filter()
3608 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_add_flex_filter()
3609 igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, in igc_add_flex_filter()
3613 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_add_flex_filter()
3614 igc_flex_filter_add_field(&flex, &filter->src_addr, 6, in igc_add_flex_filter()
3618 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) in igc_add_flex_filter()
3619 igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, in igc_add_flex_filter()
3620 sizeof(filter->vlan_etype), in igc_add_flex_filter()
3624 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) in igc_add_flex_filter()
3625 igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, in igc_add_flex_filter()
3626 sizeof(filter->vlan_tci), NULL); in igc_add_flex_filter()
3629 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_add_flex_filter()
3630 __be16 etype = cpu_to_be16(filter->etype); in igc_add_flex_filter()
3637 if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) in igc_add_flex_filter()
3638 igc_flex_filter_add_field(&flex, &filter->user_data, in igc_add_flex_filter()
3640 sizeof(filter->user_data), in igc_add_flex_filter()
3641 filter->user_mask); in igc_add_flex_filter()
3648 filter->flex_index = index; in igc_add_flex_filter()
3656 struct igc_hw *hw = &adapter->hw; in igc_del_flex_filter()
3666 wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); in igc_del_flex_filter()
3689 if (rule->flex) { in igc_enable_nfc_rule()
3693 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { in igc_enable_nfc_rule()
3694 err = igc_add_etype_filter(adapter, rule->filter.etype, in igc_enable_nfc_rule()
3695 rule->action); in igc_enable_nfc_rule()
3700 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) { in igc_enable_nfc_rule()
3702 rule->filter.src_addr, rule->action); in igc_enable_nfc_rule()
3707 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) { in igc_enable_nfc_rule()
3709 rule->filter.dst_addr, rule->action); in igc_enable_nfc_rule()
3714 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_enable_nfc_rule()
3715 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_enable_nfc_rule()
3717 err = igc_add_vlan_prio_filter(adapter, prio, rule->action); in igc_enable_nfc_rule()
3728 if (rule->flex) { in igc_disable_nfc_rule()
3729 igc_del_flex_filter(adapter, rule->filter.flex_index); in igc_disable_nfc_rule()
3733 if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) in igc_disable_nfc_rule()
3734 igc_del_etype_filter(adapter, rule->filter.etype); in igc_disable_nfc_rule()
3736 if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) { in igc_disable_nfc_rule()
3737 int prio = FIELD_GET(VLAN_PRIO_MASK, rule->filter.vlan_tci); in igc_disable_nfc_rule()
3742 if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) in igc_disable_nfc_rule()
3744 rule->filter.src_addr); in igc_disable_nfc_rule()
3746 if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) in igc_disable_nfc_rule()
3748 rule->filter.dst_addr); in igc_disable_nfc_rule()
3752 * igc_get_nfc_rule() - Get NFC rule
3756 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3765 list_for_each_entry(rule, &adapter->nfc_rule_list, list) { in igc_get_nfc_rule()
3766 if (rule->location == location) in igc_get_nfc_rule()
3768 if (rule->location > location) in igc_get_nfc_rule()
3776 * igc_del_nfc_rule() - Delete NFC rule
3782 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3788 list_del(&rule->list); in igc_del_nfc_rule()
3789 adapter->nfc_rule_count--; in igc_del_nfc_rule()
3798 mutex_lock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3800 list_for_each_entry_safe(rule, tmp, &adapter->nfc_rule_list, list) in igc_flush_nfc_rules()
3803 mutex_unlock(&adapter->nfc_rule_lock); in igc_flush_nfc_rules()
3807 * igc_add_nfc_rule() - Add NFC rule
3813 * Context: Expects adapter->nfc_rule_lock to be held by caller.
3827 list_for_each_entry(cur, &adapter->nfc_rule_list, list) { in igc_add_nfc_rule()
3828 if (cur->location >= rule->location) in igc_add_nfc_rule()
3833 list_add(&rule->list, pred ? &pred->list : &adapter->nfc_rule_list); in igc_add_nfc_rule()
3834 adapter->nfc_rule_count++; in igc_add_nfc_rule()
3842 mutex_lock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
3844 list_for_each_entry_reverse(rule, &adapter->nfc_rule_list, list) in igc_restore_nfc_rules()
3847 mutex_unlock(&adapter->nfc_rule_lock); in igc_restore_nfc_rules()
3854 return igc_add_mac_filter(adapter, IGC_MAC_FILTER_TYPE_DST, addr, -1); in igc_uc_sync()
3866 * igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3872 * promiscuous mode, and all-multi behavior.
3877 struct igc_hw *hw = &adapter->hw; in igc_set_rx_mode()
3882 if (netdev->flags & IFF_PROMISC) { in igc_set_rx_mode()
3885 if (netdev->flags & IFF_ALLMULTI) { in igc_set_rx_mode()
3910 if (adapter->max_frame_size <= IGC_MAX_FRAME_BUILD_SKB) in igc_set_rx_mode()
3917 * igc_configure - configure the hardware for RX and TX
3922 struct net_device *netdev = adapter->netdev; in igc_configure()
3940 igc_rx_fifo_flush_base(&adapter->hw); in igc_configure()
3946 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_configure()
3947 struct igc_ring *ring = adapter->rx_ring[i]; in igc_configure()
3949 if (ring->xsk_pool) in igc_configure()
3957 * igc_write_ivar - configure ivar for given MSI-X vector
3983 struct igc_adapter *adapter = q_vector->adapter; in igc_assign_vector()
3984 struct igc_hw *hw = &adapter->hw; in igc_assign_vector()
3988 if (q_vector->rx.ring) in igc_assign_vector()
3989 rx_queue = q_vector->rx.ring->reg_idx; in igc_assign_vector()
3990 if (q_vector->tx.ring) in igc_assign_vector()
3991 tx_queue = q_vector->tx.ring->reg_idx; in igc_assign_vector()
3993 switch (hw->mac.type) { in igc_assign_vector()
4003 q_vector->eims_value = BIT(msix_vector); in igc_assign_vector()
4006 WARN_ONCE(hw->mac.type != igc_i225, "Wrong MAC type\n"); in igc_assign_vector()
4011 adapter->eims_enable_mask |= q_vector->eims_value; in igc_assign_vector()
4014 q_vector->set_itr = 1; in igc_assign_vector()
4018 * igc_configure_msix - Configure MSI-X hardware
4022 * generate MSI-X interrupts.
4026 struct igc_hw *hw = &adapter->hw; in igc_configure_msix()
4030 adapter->eims_enable_mask = 0; in igc_configure_msix()
4033 switch (hw->mac.type) { in igc_configure_msix()
4035 /* Turn on MSI-X capability first, or our settings in igc_configure_msix()
4043 adapter->eims_other = BIT(vector); in igc_configure_msix()
4049 /* do nothing, since nothing else supports MSI-X */ in igc_configure_msix()
4051 } /* switch (hw->mac.type) */ in igc_configure_msix()
4053 adapter->eims_enable_mask |= adapter->eims_other; in igc_configure_msix()
4055 for (i = 0; i < adapter->num_q_vectors; i++) in igc_configure_msix()
4056 igc_assign_vector(adapter->q_vector[i], vector++); in igc_configure_msix()
4062 * igc_irq_enable - Enable default interrupt generation settings
4067 struct igc_hw *hw = &adapter->hw; in igc_irq_enable()
4069 if (adapter->msix_entries) { in igc_irq_enable()
4073 wr32(IGC_EIAC, regval | adapter->eims_enable_mask); in igc_irq_enable()
4075 wr32(IGC_EIAM, regval | adapter->eims_enable_mask); in igc_irq_enable()
4076 wr32(IGC_EIMS, adapter->eims_enable_mask); in igc_irq_enable()
4085 * igc_irq_disable - Mask off interrupt generation on the NIC
4090 struct igc_hw *hw = &adapter->hw; in igc_irq_disable()
4092 if (adapter->msix_entries) { in igc_irq_disable()
4095 wr32(IGC_EIAM, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4096 wr32(IGC_EIMC, adapter->eims_enable_mask); in igc_irq_disable()
4098 wr32(IGC_EIAC, regval & ~adapter->eims_enable_mask); in igc_irq_disable()
4105 if (adapter->msix_entries) { in igc_irq_disable()
4108 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4110 for (i = 0; i < adapter->num_q_vectors; i++) in igc_irq_disable()
4111 synchronize_irq(adapter->msix_entries[vector++].vector); in igc_irq_disable()
4113 synchronize_irq(adapter->pdev->irq); in igc_irq_disable()
4122 * order to conserve interrupts due to limited supply. in igc_set_flag_queue_pairs()
4124 if (adapter->rss_queues > (max_rss_queues / 2)) in igc_set_flag_queue_pairs()
4125 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4127 adapter->flags &= ~IGC_FLAG_QUEUE_PAIRS; in igc_set_flag_queue_pairs()
4140 adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); in igc_init_queue_configuration()
4146 * igc_reset_q_vector - Reset config for interrupt vector
4155 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_reset_q_vector()
4163 if (q_vector->tx.ring) in igc_reset_q_vector()
4164 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igc_reset_q_vector()
4166 if (q_vector->rx.ring) in igc_reset_q_vector()
4167 adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL; in igc_reset_q_vector()
4169 netif_napi_del(&q_vector->napi); in igc_reset_q_vector()
4173 * igc_free_q_vector - Free memory allocated for specific interrupt vector
4181 struct igc_q_vector *q_vector = adapter->q_vector[v_idx]; in igc_free_q_vector()
4183 adapter->q_vector[v_idx] = NULL; in igc_free_q_vector()
4193 * igc_free_q_vectors - Free memory allocated for interrupt vectors
4202 int v_idx = adapter->num_q_vectors; in igc_free_q_vectors()
4204 adapter->num_tx_queues = 0; in igc_free_q_vectors()
4205 adapter->num_rx_queues = 0; in igc_free_q_vectors()
4206 adapter->num_q_vectors = 0; in igc_free_q_vectors()
4208 while (v_idx--) { in igc_free_q_vectors()
4215 * igc_update_itr - update the dynamic ITR value based on statistics
4226 * NOTE: These calculations are only valid when operating in a single-
4232 unsigned int packets = ring_container->total_packets; in igc_update_itr()
4233 unsigned int bytes = ring_container->total_bytes; in igc_update_itr()
4234 u8 itrval = ring_container->itr; in igc_update_itr()
4274 ring_container->total_bytes = 0; in igc_update_itr()
4275 ring_container->total_packets = 0; in igc_update_itr()
4278 ring_container->itr = itrval; in igc_update_itr()
4283 struct igc_adapter *adapter = q_vector->adapter; in igc_set_itr()
4284 u32 new_itr = q_vector->itr_val; in igc_set_itr()
4287 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ in igc_set_itr()
4288 switch (adapter->link_speed) { in igc_set_itr()
4298 igc_update_itr(q_vector, &q_vector->tx); in igc_set_itr()
4299 igc_update_itr(q_vector, &q_vector->rx); in igc_set_itr()
4301 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in igc_set_itr()
4305 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_set_itr()
4306 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_set_itr()
4325 if (new_itr != q_vector->itr_val) { in igc_set_itr()
4330 new_itr = new_itr > q_vector->itr_val ? in igc_set_itr()
4331 max((new_itr * q_vector->itr_val) / in igc_set_itr()
4332 (new_itr + (q_vector->itr_val >> 2)), in igc_set_itr()
4340 q_vector->itr_val = new_itr; in igc_set_itr()
4341 q_vector->set_itr = 1; in igc_set_itr()
4347 int v_idx = adapter->num_q_vectors; in igc_reset_interrupt_capability()
4349 if (adapter->msix_entries) { in igc_reset_interrupt_capability()
4350 pci_disable_msix(adapter->pdev); in igc_reset_interrupt_capability()
4351 kfree(adapter->msix_entries); in igc_reset_interrupt_capability()
4352 adapter->msix_entries = NULL; in igc_reset_interrupt_capability()
4353 } else if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_reset_interrupt_capability()
4354 pci_disable_msi(adapter->pdev); in igc_reset_interrupt_capability()
4357 while (v_idx--) in igc_reset_interrupt_capability()
4362 * igc_set_interrupt_capability - set MSI or MSI-X if supported
4364 * @msix: boolean value for MSI-X capability
4377 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4380 adapter->num_rx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4382 adapter->num_tx_queues = adapter->rss_queues; in igc_set_interrupt_capability()
4385 numvecs = adapter->num_rx_queues; in igc_set_interrupt_capability()
4388 if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) in igc_set_interrupt_capability()
4389 numvecs += adapter->num_tx_queues; in igc_set_interrupt_capability()
4392 adapter->num_q_vectors = numvecs; in igc_set_interrupt_capability()
4397 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), in igc_set_interrupt_capability()
4400 if (!adapter->msix_entries) in igc_set_interrupt_capability()
4405 adapter->msix_entries[i].entry = i; in igc_set_interrupt_capability()
4407 err = pci_enable_msix_range(adapter->pdev, in igc_set_interrupt_capability()
4408 adapter->msix_entries, in igc_set_interrupt_capability()
4414 kfree(adapter->msix_entries); in igc_set_interrupt_capability()
4415 adapter->msix_entries = NULL; in igc_set_interrupt_capability()
4420 adapter->flags &= ~IGC_FLAG_HAS_MSIX; in igc_set_interrupt_capability()
4422 adapter->rss_queues = 1; in igc_set_interrupt_capability()
4423 adapter->flags |= IGC_FLAG_QUEUE_PAIRS; in igc_set_interrupt_capability()
4424 adapter->num_rx_queues = 1; in igc_set_interrupt_capability()
4425 adapter->num_tx_queues = 1; in igc_set_interrupt_capability()
4426 adapter->num_q_vectors = 1; in igc_set_interrupt_capability()
4427 if (!pci_enable_msi(adapter->pdev)) in igc_set_interrupt_capability()
4428 adapter->flags |= IGC_FLAG_HAS_MSI; in igc_set_interrupt_capability()
4432 * igc_update_ring_itr - update the dynamic ITR value based on packet size
4447 struct igc_adapter *adapter = q_vector->adapter; in igc_update_ring_itr()
4448 int new_val = q_vector->itr_val; in igc_update_ring_itr()
4452 /* For non-gigabit speeds, just fix the interrupt rate at 4000 in igc_update_ring_itr()
4453 * ints/sec - ITR timer value of 120 ticks. in igc_update_ring_itr()
4455 switch (adapter->link_speed) { in igc_update_ring_itr()
4464 packets = q_vector->rx.total_packets; in igc_update_ring_itr()
4466 avg_wire_size = q_vector->rx.total_bytes / packets; in igc_update_ring_itr()
4468 packets = q_vector->tx.total_packets; in igc_update_ring_itr()
4471 q_vector->tx.total_bytes / packets); in igc_update_ring_itr()
4483 /* Give a little boost to mid-size frames */ in igc_update_ring_itr()
4491 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) || in igc_update_ring_itr()
4492 (!q_vector->rx.ring && adapter->tx_itr_setting == 3))) in igc_update_ring_itr()
4496 if (new_val != q_vector->itr_val) { in igc_update_ring_itr()
4497 q_vector->itr_val = new_val; in igc_update_ring_itr()
4498 q_vector->set_itr = 1; in igc_update_ring_itr()
4501 q_vector->rx.total_bytes = 0; in igc_update_ring_itr()
4502 q_vector->rx.total_packets = 0; in igc_update_ring_itr()
4503 q_vector->tx.total_bytes = 0; in igc_update_ring_itr()
4504 q_vector->tx.total_packets = 0; in igc_update_ring_itr()
4509 struct igc_adapter *adapter = q_vector->adapter; in igc_ring_irq_enable()
4510 struct igc_hw *hw = &adapter->hw; in igc_ring_irq_enable()
4512 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) || in igc_ring_irq_enable()
4513 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) { in igc_ring_irq_enable()
4514 if (adapter->num_q_vectors == 1) in igc_ring_irq_enable()
4520 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_ring_irq_enable()
4521 if (adapter->msix_entries) in igc_ring_irq_enable()
4522 wr32(IGC_EIMS, q_vector->eims_value); in igc_ring_irq_enable()
4531 head->ring = ring; in igc_add_ring()
4532 head->count++; in igc_add_ring()
4536 * igc_cache_ring_register - Descriptor ring to register mapping
4539 * Once we know the feature-set enabled for the device, we'll cache
4546 switch (adapter->hw.mac.type) { in igc_cache_ring_register()
4549 for (; i < adapter->num_rx_queues; i++) in igc_cache_ring_register()
4550 adapter->rx_ring[i]->reg_idx = i; in igc_cache_ring_register()
4551 for (; j < adapter->num_tx_queues; j++) in igc_cache_ring_register()
4552 adapter->tx_ring[j]->reg_idx = j; in igc_cache_ring_register()
4558 * igc_poll - NAPI Rx polling callback
4567 struct igc_ring *rx_ring = q_vector->rx.ring; in igc_poll()
4571 if (q_vector->tx.ring) in igc_poll()
4575 int cleaned = rx_ring->xsk_pool ? in igc_poll()
4588 /* Exit the polling mode, but don't re-enable interrupts if stack might in igc_poll()
4589 * poll us due to busy-polling in igc_poll()
4594 return min(work_done, budget - 1); in igc_poll()
4598 * igc_alloc_q_vector - Allocate memory for a single interrupt vector
4607 * We allocate one q_vector. If allocation fails we return -ENOMEM.
4620 return -ENOMEM; in igc_alloc_q_vector()
4625 q_vector = adapter->q_vector[v_idx]; in igc_alloc_q_vector()
4632 return -ENOMEM; in igc_alloc_q_vector()
4635 netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll); in igc_alloc_q_vector()
4638 adapter->q_vector[v_idx] = q_vector; in igc_alloc_q_vector()
4639 q_vector->adapter = adapter; in igc_alloc_q_vector()
4642 q_vector->tx.work_limit = adapter->tx_work_limit; in igc_alloc_q_vector()
4645 q_vector->itr_register = adapter->io_addr + IGC_EITR(0); in igc_alloc_q_vector()
4646 q_vector->itr_val = IGC_START_ITR; in igc_alloc_q_vector()
4649 ring = q_vector->ring; in igc_alloc_q_vector()
4654 if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3) in igc_alloc_q_vector()
4655 q_vector->itr_val = adapter->rx_itr_setting; in igc_alloc_q_vector()
4658 if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3) in igc_alloc_q_vector()
4659 q_vector->itr_val = adapter->tx_itr_setting; in igc_alloc_q_vector()
4664 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4665 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4668 ring->q_vector = q_vector; in igc_alloc_q_vector()
4671 igc_add_ring(ring, &q_vector->tx); in igc_alloc_q_vector()
4674 ring->count = adapter->tx_ring_count; in igc_alloc_q_vector()
4675 ring->queue_index = txr_idx; in igc_alloc_q_vector()
4678 adapter->tx_ring[txr_idx] = ring; in igc_alloc_q_vector()
4686 ring->dev = &adapter->pdev->dev; in igc_alloc_q_vector()
4687 ring->netdev = adapter->netdev; in igc_alloc_q_vector()
4690 ring->q_vector = q_vector; in igc_alloc_q_vector()
4693 igc_add_ring(ring, &q_vector->rx); in igc_alloc_q_vector()
4696 ring->count = adapter->rx_ring_count; in igc_alloc_q_vector()
4697 ring->queue_index = rxr_idx; in igc_alloc_q_vector()
4700 adapter->rx_ring[rxr_idx] = ring; in igc_alloc_q_vector()
4707 * igc_alloc_q_vectors - Allocate memory for interrupt vectors
4711 * return -ENOMEM.
4715 int rxr_remaining = adapter->num_rx_queues; in igc_alloc_q_vectors()
4716 int txr_remaining = adapter->num_tx_queues; in igc_alloc_q_vectors()
4718 int q_vectors = adapter->num_q_vectors; in igc_alloc_q_vectors()
4730 rxr_remaining--; in igc_alloc_q_vectors()
4736 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4737 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx); in igc_alloc_q_vectors()
4746 rxr_remaining -= rqpv; in igc_alloc_q_vectors()
4747 txr_remaining -= tqpv; in igc_alloc_q_vectors()
4755 adapter->num_tx_queues = 0; in igc_alloc_q_vectors()
4756 adapter->num_rx_queues = 0; in igc_alloc_q_vectors()
4757 adapter->num_q_vectors = 0; in igc_alloc_q_vectors()
4759 while (v_idx--) in igc_alloc_q_vectors()
4762 return -ENOMEM; in igc_alloc_q_vectors()
4766 * igc_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
4768 * @msix: boolean for MSI-X capability
4774 struct net_device *dev = adapter->netdev; in igc_init_interrupt_scheme()
4795 * igc_sw_init - Initialize general software structures (struct igc_adapter)
4804 struct net_device *netdev = adapter->netdev; in igc_sw_init()
4805 struct pci_dev *pdev = adapter->pdev; in igc_sw_init()
4806 struct igc_hw *hw = &adapter->hw; in igc_sw_init()
4808 pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); in igc_sw_init()
4811 adapter->tx_ring_count = IGC_DEFAULT_TXD; in igc_sw_init()
4812 adapter->rx_ring_count = IGC_DEFAULT_RXD; in igc_sw_init()
4815 adapter->rx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4816 adapter->tx_itr_setting = IGC_DEFAULT_ITR; in igc_sw_init()
4819 adapter->tx_work_limit = IGC_DEFAULT_TX_WORK; in igc_sw_init()
4822 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + in igc_sw_init()
4824 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; in igc_sw_init()
4826 mutex_init(&adapter->nfc_rule_lock); in igc_sw_init()
4827 INIT_LIST_HEAD(&adapter->nfc_rule_list); in igc_sw_init()
4828 adapter->nfc_rule_count = 0; in igc_sw_init()
4830 spin_lock_init(&adapter->stats64_lock); in igc_sw_init()
4831 spin_lock_init(&adapter->qbv_tx_lock); in igc_sw_init()
4832 /* Assume MSI-X interrupts, will be checked during IRQ allocation */ in igc_sw_init()
4833 adapter->flags |= IGC_FLAG_HAS_MSIX; in igc_sw_init()
4840 return -ENOMEM; in igc_sw_init()
4846 set_bit(__IGC_DOWN, &adapter->state); in igc_sw_init()
4852 * igc_up - Open the interface and prepare it to handle traffic
4857 struct igc_hw *hw = &adapter->hw; in igc_up()
4863 clear_bit(__IGC_DOWN, &adapter->state); in igc_up()
4865 for (i = 0; i < adapter->num_q_vectors; i++) in igc_up()
4866 napi_enable(&adapter->q_vector[i]->napi); in igc_up()
4868 if (adapter->msix_entries) in igc_up()
4871 igc_assign_vector(adapter->q_vector[0], 0); in igc_up()
4877 netif_tx_start_all_queues(adapter->netdev); in igc_up()
4880 hw->mac.get_link_status = true; in igc_up()
4881 schedule_work(&adapter->watchdog_task); in igc_up()
4885 * igc_update_stats - Update the board statistics counters
4890 struct rtnl_link_stats64 *net_stats = &adapter->stats64; in igc_update_stats()
4891 struct pci_dev *pdev = adapter->pdev; in igc_update_stats()
4892 struct igc_hw *hw = &adapter->hw; in igc_update_stats()
4902 if (adapter->link_speed == 0) in igc_update_stats()
4911 for (i = 0; i < adapter->num_rx_queues; i++) { in igc_update_stats()
4912 struct igc_ring *ring = adapter->rx_ring[i]; in igc_update_stats()
4915 if (hw->mac.type >= igc_i225) in igc_update_stats()
4919 ring->rx_stats.drops += rqdpc; in igc_update_stats()
4920 net_stats->rx_fifo_errors += rqdpc; in igc_update_stats()
4924 start = u64_stats_fetch_begin(&ring->rx_syncp); in igc_update_stats()
4925 _bytes = ring->rx_stats.bytes; in igc_update_stats()
4926 _packets = ring->rx_stats.packets; in igc_update_stats()
4927 } while (u64_stats_fetch_retry(&ring->rx_syncp, start)); in igc_update_stats()
4932 net_stats->rx_bytes = bytes; in igc_update_stats()
4933 net_stats->rx_packets = packets; in igc_update_stats()
4937 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_update_stats()
4938 struct igc_ring *ring = adapter->tx_ring[i]; in igc_update_stats()
4941 start = u64_stats_fetch_begin(&ring->tx_syncp); in igc_update_stats()
4942 _bytes = ring->tx_stats.bytes; in igc_update_stats()
4943 _packets = ring->tx_stats.packets; in igc_update_stats()
4944 } while (u64_stats_fetch_retry(&ring->tx_syncp, start)); in igc_update_stats()
4948 net_stats->tx_bytes = bytes; in igc_update_stats()
4949 net_stats->tx_packets = packets; in igc_update_stats()
4953 adapter->stats.crcerrs += rd32(IGC_CRCERRS); in igc_update_stats()
4954 adapter->stats.gprc += rd32(IGC_GPRC); in igc_update_stats()
4955 adapter->stats.gorc += rd32(IGC_GORCL); in igc_update_stats()
4957 adapter->stats.bprc += rd32(IGC_BPRC); in igc_update_stats()
4958 adapter->stats.mprc += rd32(IGC_MPRC); in igc_update_stats()
4959 adapter->stats.roc += rd32(IGC_ROC); in igc_update_stats()
4961 adapter->stats.prc64 += rd32(IGC_PRC64); in igc_update_stats()
4962 adapter->stats.prc127 += rd32(IGC_PRC127); in igc_update_stats()
4963 adapter->stats.prc255 += rd32(IGC_PRC255); in igc_update_stats()
4964 adapter->stats.prc511 += rd32(IGC_PRC511); in igc_update_stats()
4965 adapter->stats.prc1023 += rd32(IGC_PRC1023); in igc_update_stats()
4966 adapter->stats.prc1522 += rd32(IGC_PRC1522); in igc_update_stats()
4967 adapter->stats.tlpic += rd32(IGC_TLPIC); in igc_update_stats()
4968 adapter->stats.rlpic += rd32(IGC_RLPIC); in igc_update_stats()
4969 adapter->stats.hgptc += rd32(IGC_HGPTC); in igc_update_stats()
4972 adapter->stats.mpc += mpc; in igc_update_stats()
4973 net_stats->rx_fifo_errors += mpc; in igc_update_stats()
4974 adapter->stats.scc += rd32(IGC_SCC); in igc_update_stats()
4975 adapter->stats.ecol += rd32(IGC_ECOL); in igc_update_stats()
4976 adapter->stats.mcc += rd32(IGC_MCC); in igc_update_stats()
4977 adapter->stats.latecol += rd32(IGC_LATECOL); in igc_update_stats()
4978 adapter->stats.dc += rd32(IGC_DC); in igc_update_stats()
4979 adapter->stats.rlec += rd32(IGC_RLEC); in igc_update_stats()
4980 adapter->stats.xonrxc += rd32(IGC_XONRXC); in igc_update_stats()
4981 adapter->stats.xontxc += rd32(IGC_XONTXC); in igc_update_stats()
4982 adapter->stats.xoffrxc += rd32(IGC_XOFFRXC); in igc_update_stats()
4983 adapter->stats.xofftxc += rd32(IGC_XOFFTXC); in igc_update_stats()
4984 adapter->stats.fcruc += rd32(IGC_FCRUC); in igc_update_stats()
4985 adapter->stats.gptc += rd32(IGC_GPTC); in igc_update_stats()
4986 adapter->stats.gotc += rd32(IGC_GOTCL); in igc_update_stats()
4988 adapter->stats.rnbc += rd32(IGC_RNBC); in igc_update_stats()
4989 adapter->stats.ruc += rd32(IGC_RUC); in igc_update_stats()
4990 adapter->stats.rfc += rd32(IGC_RFC); in igc_update_stats()
4991 adapter->stats.rjc += rd32(IGC_RJC); in igc_update_stats()
4992 adapter->stats.tor += rd32(IGC_TORH); in igc_update_stats()
4993 adapter->stats.tot += rd32(IGC_TOTH); in igc_update_stats()
4994 adapter->stats.tpr += rd32(IGC_TPR); in igc_update_stats()
4996 adapter->stats.ptc64 += rd32(IGC_PTC64); in igc_update_stats()
4997 adapter->stats.ptc127 += rd32(IGC_PTC127); in igc_update_stats()
4998 adapter->stats.ptc255 += rd32(IGC_PTC255); in igc_update_stats()
4999 adapter->stats.ptc511 += rd32(IGC_PTC511); in igc_update_stats()
5000 adapter->stats.ptc1023 += rd32(IGC_PTC1023); in igc_update_stats()
5001 adapter->stats.ptc1522 += rd32(IGC_PTC1522); in igc_update_stats()
5003 adapter->stats.mptc += rd32(IGC_MPTC); in igc_update_stats()
5004 adapter->stats.bptc += rd32(IGC_BPTC); in igc_update_stats()
5006 adapter->stats.tpt += rd32(IGC_TPT); in igc_update_stats()
5007 adapter->stats.colc += rd32(IGC_COLC); in igc_update_stats()
5008 adapter->stats.colc += rd32(IGC_RERC); in igc_update_stats()
5010 adapter->stats.algnerrc += rd32(IGC_ALGNERRC); in igc_update_stats()
5012 adapter->stats.tsctc += rd32(IGC_TSCTC); in igc_update_stats()
5014 adapter->stats.iac += rd32(IGC_IAC); in igc_update_stats()
5017 net_stats->multicast = adapter->stats.mprc; in igc_update_stats()
5018 net_stats->collisions = adapter->stats.colc; in igc_update_stats()
5025 net_stats->rx_errors = adapter->stats.rxerrc + in igc_update_stats()
5026 adapter->stats.crcerrs + adapter->stats.algnerrc + in igc_update_stats()
5027 adapter->stats.ruc + adapter->stats.roc + in igc_update_stats()
5028 adapter->stats.cexterr; in igc_update_stats()
5029 net_stats->rx_length_errors = adapter->stats.ruc + in igc_update_stats()
5030 adapter->stats.roc; in igc_update_stats()
5031 net_stats->rx_crc_errors = adapter->stats.crcerrs; in igc_update_stats()
5032 net_stats->rx_frame_errors = adapter->stats.algnerrc; in igc_update_stats()
5033 net_stats->rx_missed_errors = adapter->stats.mpc; in igc_update_stats()
5036 net_stats->tx_errors = adapter->stats.ecol + in igc_update_stats()
5037 adapter->stats.latecol; in igc_update_stats()
5038 net_stats->tx_aborted_errors = adapter->stats.ecol; in igc_update_stats()
5039 net_stats->tx_window_errors = adapter->stats.latecol; in igc_update_stats()
5040 net_stats->tx_carrier_errors = adapter->stats.tncrs; in igc_update_stats()
5043 net_stats->tx_dropped = adapter->stats.txdrop; in igc_update_stats()
5046 adapter->stats.mgptc += rd32(IGC_MGTPTC); in igc_update_stats()
5047 adapter->stats.mgprc += rd32(IGC_MGTPRC); in igc_update_stats()
5048 adapter->stats.mgpdc += rd32(IGC_MGTPDC); in igc_update_stats()
5052 * igc_down - Close the interface
5057 struct net_device *netdev = adapter->netdev; in igc_down()
5058 struct igc_hw *hw = &adapter->hw; in igc_down()
5062 set_bit(__IGC_DOWN, &adapter->state); in igc_down()
5066 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5078 if (pci_device_is_present(adapter->pdev)) { in igc_down()
5090 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_down()
5092 for (i = 0; i < adapter->num_q_vectors; i++) { in igc_down()
5093 if (adapter->q_vector[i]) { in igc_down()
5094 napi_synchronize(&adapter->q_vector[i]->napi); in igc_down()
5095 napi_disable(&adapter->q_vector[i]->napi); in igc_down()
5099 del_timer_sync(&adapter->watchdog_timer); in igc_down()
5100 del_timer_sync(&adapter->phy_info_timer); in igc_down()
5103 spin_lock(&adapter->stats64_lock); in igc_down()
5105 spin_unlock(&adapter->stats64_lock); in igc_down()
5107 adapter->link_speed = 0; in igc_down()
5108 adapter->link_duplex = 0; in igc_down()
5110 if (!pci_channel_offline(adapter->pdev)) in igc_down()
5114 adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; in igc_down()
5123 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_reinit_locked()
5127 clear_bit(__IGC_RESETTING, &adapter->state); in igc_reinit_locked()
5138 if (test_bit(__IGC_DOWN, &adapter->state) || in igc_reset_task()
5139 test_bit(__IGC_RESETTING, &adapter->state)) { in igc_reset_task()
5146 netdev_err(adapter->netdev, "Reset adapter\n"); in igc_reset_task()
5152 * igc_change_mtu - Change the Maximum Transfer Unit
5165 return -EINVAL; in igc_change_mtu()
5172 while (test_and_set_bit(__IGC_RESETTING, &adapter->state)) in igc_change_mtu()
5176 adapter->max_frame_size = max_frame; in igc_change_mtu()
5181 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); in igc_change_mtu()
5182 netdev->mtu = new_mtu; in igc_change_mtu()
5189 clear_bit(__IGC_RESETTING, &adapter->state); in igc_change_mtu()
5195 * igc_tx_timeout - Respond to a Tx Hang
5203 struct igc_hw *hw = &adapter->hw; in igc_tx_timeout()
5206 adapter->tx_timeout_count++; in igc_tx_timeout()
5207 schedule_work(&adapter->reset_task); in igc_tx_timeout()
5209 (adapter->eims_enable_mask & ~adapter->eims_other)); in igc_tx_timeout()
5213 * igc_get_stats64 - Get System Network Statistics
5225 spin_lock(&adapter->stats64_lock); in igc_get_stats64()
5226 if (!test_bit(__IGC_RESETTING, &adapter->state)) in igc_get_stats64()
5228 memcpy(stats, &adapter->stats64, sizeof(*stats)); in igc_get_stats64()
5229 spin_unlock(&adapter->stats64_lock); in igc_get_stats64()
5249 netdev_features_t changed = netdev->features ^ features; in igc_set_features()
5262 netdev->features = features; in igc_set_features()
5279 mac_hdr_len = skb_network_header(skb) - skb->data; in igc_features_check()
5287 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in igc_features_check()
5297 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in igc_features_check()
5306 struct igc_hw *hw = &adapter->hw; in igc_tsync_interrupt()
5315 if (adapter->ptp_caps.pps) in igc_tsync_interrupt()
5316 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5327 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5328 ts = timespec64_add(adapter->perout[0].start, in igc_tsync_interrupt()
5329 adapter->perout[0].period); in igc_tsync_interrupt()
5335 adapter->perout[0].start = ts; in igc_tsync_interrupt()
5336 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5341 spin_lock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5342 ts = timespec64_add(adapter->perout[1].start, in igc_tsync_interrupt()
5343 adapter->perout[1].period); in igc_tsync_interrupt()
5349 adapter->perout[1].start = ts; in igc_tsync_interrupt()
5350 spin_unlock(&adapter->tmreg_lock); in igc_tsync_interrupt()
5360 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5370 ptp_clock_event(adapter->ptp_clock, &event); in igc_tsync_interrupt()
5379 * igc_msix_other - msix other interrupt handler
5386 struct igc_hw *hw = &adapter->hw; in igc_msix_other()
5391 schedule_work(&adapter->reset_task); in igc_msix_other()
5395 adapter->stats.doosync++; in igc_msix_other()
5399 hw->mac.get_link_status = true; in igc_msix_other()
5401 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_msix_other()
5402 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_msix_other()
5408 wr32(IGC_EIMS, adapter->eims_other); in igc_msix_other()
5415 u32 itr_val = q_vector->itr_val & IGC_QVECTOR_MASK; in igc_write_itr()
5417 if (!q_vector->set_itr) in igc_write_itr()
5425 writel(itr_val, q_vector->itr_register); in igc_write_itr()
5426 q_vector->set_itr = 0; in igc_write_itr()
5436 napi_schedule(&q_vector->napi); in igc_msix_ring()
5442 * igc_request_msix - Initialize MSI-X interrupts
5445 * igc_request_msix allocates MSI-X vectors and requests interrupts from the
5450 unsigned int num_q_vectors = adapter->num_q_vectors; in igc_request_msix()
5452 struct net_device *netdev = adapter->netdev; in igc_request_msix()
5454 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5455 &igc_msix_other, 0, netdev->name, adapter); in igc_request_msix()
5461 dev_warn(&adapter->pdev->dev, in igc_request_msix()
5463 adapter->num_q_vectors, MAX_Q_VECTORS); in igc_request_msix()
5466 struct igc_q_vector *q_vector = adapter->q_vector[i]; in igc_request_msix()
5470 q_vector->itr_register = adapter->io_addr + IGC_EITR(vector); in igc_request_msix()
5472 if (q_vector->rx.ring && q_vector->tx.ring) in igc_request_msix()
5473 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, in igc_request_msix()
5474 q_vector->rx.ring->queue_index); in igc_request_msix()
5475 else if (q_vector->tx.ring) in igc_request_msix()
5476 sprintf(q_vector->name, "%s-tx-%u", netdev->name, in igc_request_msix()
5477 q_vector->tx.ring->queue_index); in igc_request_msix()
5478 else if (q_vector->rx.ring) in igc_request_msix()
5479 sprintf(q_vector->name, "%s-rx-%u", netdev->name, in igc_request_msix()
5480 q_vector->rx.ring->queue_index); in igc_request_msix()
5482 sprintf(q_vector->name, "%s-unused", netdev->name); in igc_request_msix()
5484 err = request_irq(adapter->msix_entries[vector].vector, in igc_request_msix()
5485 igc_msix_ring, 0, q_vector->name, in igc_request_msix()
5496 free_irq(adapter->msix_entries[free_vector++].vector, adapter); in igc_request_msix()
5498 vector--; in igc_request_msix()
5500 free_irq(adapter->msix_entries[free_vector++].vector, in igc_request_msix()
5501 adapter->q_vector[i]); in igc_request_msix()
5508 * igc_clear_interrupt_scheme - reset the device to a state of no interrupts
5512 * MSI-X interrupts allocated.
5527 igc_get_phy_info(&adapter->hw); in igc_update_phy_info()
5531 * igc_has_link - check shared code for link and determine up/down
5536 struct igc_hw *hw = &adapter->hw; in igc_has_link()
5544 if (!hw->mac.get_link_status) in igc_has_link()
5546 hw->mac.ops.check_for_link(hw); in igc_has_link()
5547 link_active = !hw->mac.get_link_status; in igc_has_link()
5549 if (hw->mac.type == igc_i225) { in igc_has_link()
5550 if (!netif_carrier_ok(adapter->netdev)) { in igc_has_link()
5551 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5552 } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { in igc_has_link()
5553 adapter->flags |= IGC_FLAG_NEED_LINK_UPDATE; in igc_has_link()
5554 adapter->link_check_timeout = jiffies; in igc_has_link()
5562 * igc_watchdog - Timer Call-back
5569 schedule_work(&adapter->watchdog_task); in igc_watchdog()
5577 struct net_device *netdev = adapter->netdev; in igc_watchdog_task()
5578 struct igc_hw *hw = &adapter->hw; in igc_watchdog_task()
5579 struct igc_phy_info *phy = &hw->phy; in igc_watchdog_task()
5586 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) { in igc_watchdog_task()
5587 if (time_after(jiffies, (adapter->link_check_timeout + HZ))) in igc_watchdog_task()
5588 adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; in igc_watchdog_task()
5595 pm_runtime_resume(netdev->dev.parent); in igc_watchdog_task()
5600 hw->mac.ops.get_speed_and_duplex(hw, in igc_watchdog_task()
5601 &adapter->link_speed, in igc_watchdog_task()
5602 &adapter->link_duplex); in igc_watchdog_task()
5608 adapter->link_speed, in igc_watchdog_task()
5609 adapter->link_duplex == FULL_DUPLEX ? in igc_watchdog_task()
5617 if ((adapter->flags & IGC_FLAG_EEE) && in igc_watchdog_task()
5618 adapter->link_duplex == HALF_DUPLEX) { in igc_watchdog_task()
5620 "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex\n"); in igc_watchdog_task()
5621 adapter->hw.dev_spec._base.eee_enable = false; in igc_watchdog_task()
5622 adapter->flags &= ~IGC_FLAG_EEE; in igc_watchdog_task()
5627 if (phy->speed_downgraded) in igc_watchdog_task()
5631 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5632 switch (adapter->link_speed) { in igc_watchdog_task()
5634 adapter->tx_timeout_factor = 14; in igc_watchdog_task()
5639 adapter->tx_timeout_factor = 1; in igc_watchdog_task()
5645 * based on link-up activity. Write into the register in igc_watchdog_task()
5650 if (adapter->link_speed != SPEED_1000) in igc_watchdog_task()
5660 retry_count--; in igc_watchdog_task()
5666 netdev_err(netdev, "read 1000Base-T Status Reg\n"); in igc_watchdog_task()
5672 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5673 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5678 adapter->link_speed = 0; in igc_watchdog_task()
5679 adapter->link_duplex = 0; in igc_watchdog_task()
5686 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_watchdog_task()
5687 mod_timer(&adapter->phy_info_timer, in igc_watchdog_task()
5690 pm_schedule_suspend(netdev->dev.parent, in igc_watchdog_task()
5695 spin_lock(&adapter->stats64_lock); in igc_watchdog_task()
5697 spin_unlock(&adapter->stats64_lock); in igc_watchdog_task()
5699 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_watchdog_task()
5700 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_watchdog_task()
5708 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
5709 adapter->tx_timeout_count++; in igc_watchdog_task()
5710 schedule_work(&adapter->reset_task); in igc_watchdog_task()
5717 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_watchdog_task()
5721 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_watchdog_task()
5724 for (i = 0; i < adapter->num_q_vectors; i++) in igc_watchdog_task()
5725 eics |= adapter->q_vector[i]->eims_value; in igc_watchdog_task()
5734 if (!test_bit(__IGC_DOWN, &adapter->state)) { in igc_watchdog_task()
5735 if (adapter->flags & IGC_FLAG_NEED_LINK_UPDATE) in igc_watchdog_task()
5736 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5739 mod_timer(&adapter->watchdog_timer, in igc_watchdog_task()
5745 * igc_intr_msi - Interrupt Handler
5752 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr_msi()
5753 struct igc_hw *hw = &adapter->hw; in igc_intr_msi()
5760 schedule_work(&adapter->reset_task); in igc_intr_msi()
5764 adapter->stats.doosync++; in igc_intr_msi()
5768 hw->mac.get_link_status = true; in igc_intr_msi()
5769 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr_msi()
5770 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr_msi()
5776 napi_schedule(&q_vector->napi); in igc_intr_msi()
5782 * igc_intr - Legacy Interrupt Handler
5789 struct igc_q_vector *q_vector = adapter->q_vector[0]; in igc_intr()
5790 struct igc_hw *hw = &adapter->hw; in igc_intr()
5791 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No in igc_intr()
5796 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is in igc_intr()
5805 schedule_work(&adapter->reset_task); in igc_intr()
5809 adapter->stats.doosync++; in igc_intr()
5813 hw->mac.get_link_status = true; in igc_intr()
5815 if (!test_bit(__IGC_DOWN, &adapter->state)) in igc_intr()
5816 mod_timer(&adapter->watchdog_timer, jiffies + 1); in igc_intr()
5822 napi_schedule(&q_vector->napi); in igc_intr()
5829 if (adapter->msix_entries) { in igc_free_irq()
5832 free_irq(adapter->msix_entries[vector++].vector, adapter); in igc_free_irq()
5834 for (i = 0; i < adapter->num_q_vectors; i++) in igc_free_irq()
5835 free_irq(adapter->msix_entries[vector++].vector, in igc_free_irq()
5836 adapter->q_vector[i]); in igc_free_irq()
5838 free_irq(adapter->pdev->irq, adapter); in igc_free_irq()
5843 * igc_request_irq - initialize interrupts
5851 struct net_device *netdev = adapter->netdev; in igc_request_irq()
5852 struct pci_dev *pdev = adapter->pdev; in igc_request_irq()
5855 if (adapter->flags & IGC_FLAG_HAS_MSIX) { in igc_request_irq()
5872 igc_assign_vector(adapter->q_vector[0], 0); in igc_request_irq()
5874 if (adapter->flags & IGC_FLAG_HAS_MSI) { in igc_request_irq()
5875 err = request_irq(pdev->irq, &igc_intr_msi, 0, in igc_request_irq()
5876 netdev->name, adapter); in igc_request_irq()
5882 adapter->flags &= ~IGC_FLAG_HAS_MSI; in igc_request_irq()
5885 err = request_irq(pdev->irq, &igc_intr, IRQF_SHARED, in igc_request_irq()
5886 netdev->name, adapter); in igc_request_irq()
5896 * __igc_open - Called when a network interface is made active
5911 struct pci_dev *pdev = adapter->pdev; in __igc_open()
5912 struct igc_hw *hw = &adapter->hw; in __igc_open()
5918 if (test_bit(__IGC_TESTING, &adapter->state)) { in __igc_open()
5920 return -EBUSY; in __igc_open()
5924 pm_runtime_get_sync(&pdev->dev); in __igc_open()
5947 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); in __igc_open()
5951 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); in __igc_open()
5955 clear_bit(__IGC_DOWN, &adapter->state); in __igc_open()
5957 for (i = 0; i < adapter->num_q_vectors; i++) in __igc_open()
5958 napi_enable(&adapter->q_vector[i]->napi); in __igc_open()
5965 pm_runtime_put(&pdev->dev); in __igc_open()
5970 hw->mac.get_link_status = true; in __igc_open()
5971 schedule_work(&adapter->watchdog_task); in __igc_open()
5979 igc_power_down_phy_copper_base(&adapter->hw); in __igc_open()
5986 pm_runtime_put(&pdev->dev); in __igc_open()
5997 * __igc_close - Disables a network interface
6003 * The close entry point is called when an interface is de-activated
6011 struct pci_dev *pdev = adapter->pdev; in __igc_close()
6013 WARN_ON(test_bit(__IGC_RESETTING, &adapter->state)); in __igc_close()
6016 pm_runtime_get_sync(&pdev->dev); in __igc_close()
6028 pm_runtime_put_sync(&pdev->dev); in __igc_close()
6035 if (netif_device_present(netdev) || netdev->dismantle) in igc_close()
6041 * igc_ioctl - Access the hwtstamp interface
6054 return -EOPNOTSUPP; in igc_ioctl()
6063 if (queue < 0 || queue >= adapter->num_tx_queues) in igc_save_launchtime_params()
6064 return -EINVAL; in igc_save_launchtime_params()
6066 ring = adapter->tx_ring[queue]; in igc_save_launchtime_params()
6067 ring->launchtime_enable = enable; in igc_save_launchtime_params()
6085 struct igc_hw *hw = &adapter->hw; in validate_schedule()
6089 if (qopt->cycle_time_extension) in validate_schedule()
6100 if (!is_base_time_past(qopt->base_time, &now) && in validate_schedule()
6104 for (n = 0; n < qopt->num_entries; n++) { in validate_schedule()
6108 prev = n ? &qopt->entries[n - 1] : NULL; in validate_schedule()
6109 e = &qopt->entries[n]; in validate_schedule()
6114 if (e->command != TC_TAPRIO_CMD_SET_GATES) in validate_schedule()
6117 for (i = 0; i < adapter->num_tx_queues; i++) in validate_schedule()
6118 if (e->gate_mask & BIT(i)) { in validate_schedule()
6126 !(prev->gate_mask & BIT(i))) in validate_schedule()
6137 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_launchtime()
6140 if (hw->mac.type != igc_i225) in igc_tsn_enable_launchtime()
6141 return -EOPNOTSUPP; in igc_tsn_enable_launchtime()
6143 err = igc_save_launchtime_params(adapter, qopt->queue, qopt->enable); in igc_tsn_enable_launchtime()
6155 adapter->base_time = 0; in igc_qbv_clear_schedule()
6156 adapter->cycle_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6157 adapter->taprio_offload_enable = false; in igc_qbv_clear_schedule()
6158 adapter->qbv_config_change_errors = 0; in igc_qbv_clear_schedule()
6159 adapter->qbv_count = 0; in igc_qbv_clear_schedule()
6161 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6162 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6164 ring->start_time = 0; in igc_qbv_clear_schedule()
6165 ring->end_time = NSEC_PER_SEC; in igc_qbv_clear_schedule()
6166 ring->max_sdu = 0; in igc_qbv_clear_schedule()
6169 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6171 adapter->qbv_transition = false; in igc_qbv_clear_schedule()
6173 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_clear_schedule()
6174 struct igc_ring *ring = adapter->tx_ring[i]; in igc_qbv_clear_schedule()
6176 ring->oper_gate_closed = false; in igc_qbv_clear_schedule()
6177 ring->admin_gate_closed = false; in igc_qbv_clear_schedule()
6180 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_clear_schedule()
6198 stats->tx_overruns = 0; in igc_taprio_stats()
6204 struct tc_taprio_qopt_stats *stats = &queue_stats->stats; in igc_taprio_queue_stats()
6209 stats->tx_overruns = 0; in igc_taprio_queue_stats()
6216 struct igc_hw *hw = &adapter->hw; in igc_save_qbv_schedule()
6223 switch (qopt->cmd) { in igc_save_qbv_schedule()
6229 igc_taprio_stats(adapter->netdev, &qopt->stats); in igc_save_qbv_schedule()
6232 igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats); in igc_save_qbv_schedule()
6235 return -EOPNOTSUPP; in igc_save_qbv_schedule()
6238 if (qopt->base_time < 0) in igc_save_qbv_schedule()
6239 return -ERANGE; in igc_save_qbv_schedule()
6241 if (igc_is_device_id_i225(hw) && adapter->taprio_offload_enable) in igc_save_qbv_schedule()
6242 return -EALREADY; in igc_save_qbv_schedule()
6245 return -EINVAL; in igc_save_qbv_schedule()
6247 adapter->cycle_time = qopt->cycle_time; in igc_save_qbv_schedule()
6248 adapter->base_time = qopt->base_time; in igc_save_qbv_schedule()
6249 adapter->taprio_offload_enable = true; in igc_save_qbv_schedule()
6253 for (n = 0; n < qopt->num_entries; n++) { in igc_save_qbv_schedule()
6254 struct tc_taprio_sched_entry *e = &qopt->entries[n]; in igc_save_qbv_schedule()
6256 end_time += e->interval; in igc_save_qbv_schedule()
6264 * 2. According to IEEE Std. 802.1Q-2018 section 8.6.9.2, in igc_save_qbv_schedule()
6269 if (end_time > adapter->cycle_time || in igc_save_qbv_schedule()
6270 n + 1 == qopt->num_entries) in igc_save_qbv_schedule()
6271 end_time = adapter->cycle_time; in igc_save_qbv_schedule()
6273 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6274 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6276 if (!(e->gate_mask & BIT(i))) in igc_save_qbv_schedule()
6284 ring->start_time = start_time; in igc_save_qbv_schedule()
6285 ring->end_time = end_time; in igc_save_qbv_schedule()
6287 if (ring->start_time >= adapter->cycle_time) in igc_save_qbv_schedule()
6293 start_time += e->interval; in igc_save_qbv_schedule()
6296 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6301 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6302 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6304 if (!is_base_time_past(qopt->base_time, &now)) { in igc_save_qbv_schedule()
6305 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6307 ring->oper_gate_closed = false; in igc_save_qbv_schedule()
6308 ring->admin_gate_closed = false; in igc_save_qbv_schedule()
6312 if (!is_base_time_past(qopt->base_time, &now)) in igc_save_qbv_schedule()
6313 ring->admin_gate_closed = true; in igc_save_qbv_schedule()
6315 ring->oper_gate_closed = true; in igc_save_qbv_schedule()
6317 ring->start_time = end_time; in igc_save_qbv_schedule()
6318 ring->end_time = end_time; in igc_save_qbv_schedule()
6322 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_save_qbv_schedule()
6324 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_save_qbv_schedule()
6325 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
6326 struct net_device *dev = adapter->netdev; in igc_save_qbv_schedule()
6328 if (qopt->max_sdu[i]) in igc_save_qbv_schedule()
6329 ring->max_sdu = qopt->max_sdu[i] + dev->hard_header_len - ETH_TLEN; in igc_save_qbv_schedule()
6331 ring->max_sdu = 0; in igc_save_qbv_schedule()
6340 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_qbv_scheduling()
6343 if (hw->mac.type != igc_i225) in igc_tsn_enable_qbv_scheduling()
6344 return -EOPNOTSUPP; in igc_tsn_enable_qbv_scheduling()
6358 struct net_device *netdev = adapter->netdev; in igc_save_cbs_params()
6362 /* i225 has two sets of credit-based shaper logic. in igc_save_cbs_params()
6366 return -EINVAL; in igc_save_cbs_params()
6368 ring = adapter->tx_ring[queue]; in igc_save_cbs_params()
6371 if (adapter->tx_ring[i]) in igc_save_cbs_params()
6372 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; in igc_save_cbs_params()
6381 return -EINVAL; in igc_save_cbs_params()
6387 return -EINVAL; in igc_save_cbs_params()
6391 ring->cbs_enable = enable; in igc_save_cbs_params()
6392 ring->idleslope = idleslope; in igc_save_cbs_params()
6393 ring->sendslope = sendslope; in igc_save_cbs_params()
6394 ring->hicredit = hicredit; in igc_save_cbs_params()
6395 ring->locredit = locredit; in igc_save_cbs_params()
6403 struct igc_hw *hw = &adapter->hw; in igc_tsn_enable_cbs()
6406 if (hw->mac.type != igc_i225) in igc_tsn_enable_cbs()
6407 return -EOPNOTSUPP; in igc_tsn_enable_cbs()
6409 if (qopt->queue < 0 || qopt->queue > 1) in igc_tsn_enable_cbs()
6410 return -EINVAL; in igc_tsn_enable_cbs()
6412 err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, in igc_tsn_enable_cbs()
6413 qopt->idleslope, qopt->sendslope, in igc_tsn_enable_cbs()
6414 qopt->hicredit, qopt->locredit); in igc_tsn_enable_cbs()
6424 struct igc_hw *hw = &adapter->hw; in igc_tc_query_caps()
6426 switch (base->type) { in igc_tc_query_caps()
6428 struct tc_taprio_caps *caps = base->caps; in igc_tc_query_caps()
6430 caps->broken_mqprio = true; in igc_tc_query_caps()
6432 if (hw->mac.type == igc_i225) { in igc_tc_query_caps()
6433 caps->supports_queue_max_sdu = true; in igc_tc_query_caps()
6434 caps->gate_mask_per_txq = true; in igc_tc_query_caps()
6440 return -EOPNOTSUPP; in igc_tc_query_caps()
6449 adapter->tc_setup_type = type; in igc_setup_tc()
6464 return -EOPNOTSUPP; in igc_setup_tc()
6472 switch (bpf->command) { in igc_bpf()
6474 return igc_xdp_set_prog(adapter, bpf->prog, bpf->extack); in igc_bpf()
6476 return igc_xdp_setup_pool(adapter, bpf->xsk.pool, in igc_bpf()
6477 bpf->xsk.queue_id); in igc_bpf()
6479 return -EOPNOTSUPP; in igc_bpf()
6493 return -ENETDOWN; in igc_xdp_xmit()
6496 return -EINVAL; in igc_xdp_xmit()
6528 struct igc_hw *hw = &adapter->hw; in igc_trigger_rxtxq_interrupt()
6531 eics |= q_vector->eims_value; in igc_trigger_rxtxq_interrupt()
6541 if (test_bit(__IGC_DOWN, &adapter->state)) in igc_xsk_wakeup()
6542 return -ENETDOWN; in igc_xsk_wakeup()
6545 return -ENXIO; in igc_xsk_wakeup()
6547 if (queue_id >= adapter->num_rx_queues) in igc_xsk_wakeup()
6548 return -EINVAL; in igc_xsk_wakeup()
6550 ring = adapter->rx_ring[queue_id]; in igc_xsk_wakeup()
6552 if (!ring->xsk_pool) in igc_xsk_wakeup()
6553 return -ENXIO; in igc_xsk_wakeup()
6555 q_vector = adapter->q_vector[queue_id]; in igc_xsk_wakeup()
6556 if (!napi_if_scheduled_mark_missed(&q_vector->napi)) in igc_xsk_wakeup()
6570 tstamp = hwtstamps->netdev_data; in igc_get_tstamp()
6573 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer1); in igc_get_tstamp()
6575 timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_get_tstamp()
6603 struct igc_adapter *adapter = hw->back; in igc_read_pci_cfg()
6605 pci_read_config_word(adapter->pdev, reg, value); in igc_read_pci_cfg()
6610 struct igc_adapter *adapter = hw->back; in igc_write_pci_cfg()
6612 pci_write_config_word(adapter->pdev, reg, *value); in igc_write_pci_cfg()
6617 struct igc_adapter *adapter = hw->back; in igc_read_pcie_cap_reg()
6619 if (!pci_is_pcie(adapter->pdev)) in igc_read_pcie_cap_reg()
6620 return -IGC_ERR_CONFIG; in igc_read_pcie_cap_reg()
6622 pcie_capability_read_word(adapter->pdev, reg, value); in igc_read_pcie_cap_reg()
6629 struct igc_adapter *adapter = hw->back; in igc_write_pcie_cap_reg()
6631 if (!pci_is_pcie(adapter->pdev)) in igc_write_pcie_cap_reg()
6632 return -IGC_ERR_CONFIG; in igc_write_pcie_cap_reg()
6634 pcie_capability_write_word(adapter->pdev, reg, *value); in igc_write_pcie_cap_reg()
6642 u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); in igc_rd32()
6652 struct net_device *netdev = igc->netdev; in igc_rd32()
6654 hw->hw_addr = NULL; in igc_rd32()
6657 WARN(pci_device_is_present(igc->pdev), in igc_rd32()
6677 [11] = XDP_RSS_TYPE_NONE, /* keep array sized for SW bit-mask */
6689 if (!(ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)) in igc_xdp_rx_hash()
6690 return -ENODATA; in igc_xdp_rx_hash()
6692 *hash = le32_to_cpu(ctx->rx_desc->wb.lower.hi_dword.rss); in igc_xdp_rx_hash()
6693 *rss_type = igc_xdp_rss_type[igc_rss_type(ctx->rx_desc)]; in igc_xdp_rx_hash()
6701 struct igc_adapter *adapter = netdev_priv(ctx->xdp.rxq->dev); in igc_xdp_rx_timestamp()
6702 struct igc_inline_rx_tstamps *tstamp = ctx->rx_ts; in igc_xdp_rx_timestamp()
6704 if (igc_test_staterr(ctx->rx_desc, IGC_RXDADV_STAT_TSIP)) { in igc_xdp_rx_timestamp()
6705 *timestamp = igc_ptp_rx_pktstamp(adapter, tstamp->timer0); in igc_xdp_rx_timestamp()
6710 return -ENODATA; in igc_xdp_rx_timestamp()
6725 spin_lock_irqsave(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6727 adapter->qbv_transition = true; in igc_qbv_scheduling_timer()
6728 for (i = 0; i < adapter->num_tx_queues; i++) { in igc_qbv_scheduling_timer()
6729 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_qbv_scheduling_timer()
6731 if (tx_ring->admin_gate_closed) { in igc_qbv_scheduling_timer()
6732 tx_ring->admin_gate_closed = false; in igc_qbv_scheduling_timer()
6733 tx_ring->oper_gate_closed = true; in igc_qbv_scheduling_timer()
6735 tx_ring->oper_gate_closed = false; in igc_qbv_scheduling_timer()
6738 adapter->qbv_transition = false; in igc_qbv_scheduling_timer()
6740 spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags); in igc_qbv_scheduling_timer()
6746 * igc_probe - Device Initialization Routine
6762 const struct igc_info *ei = igc_info_tbl[ent->driver_data]; in igc_probe()
6769 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in igc_probe()
6771 dev_err(&pdev->dev, in igc_probe()
6782 dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); in igc_probe()
6786 err = -ENOMEM; in igc_probe()
6793 SET_NETDEV_DEV(netdev, &pdev->dev); in igc_probe()
6797 adapter->netdev = netdev; in igc_probe()
6798 adapter->pdev = pdev; in igc_probe()
6799 hw = &adapter->hw; in igc_probe()
6800 hw->back = adapter; in igc_probe()
6801 adapter->port_num = hw->bus.func; in igc_probe()
6802 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in igc_probe()
6808 err = -EIO; in igc_probe()
6809 adapter->io_addr = ioremap(pci_resource_start(pdev, 0), in igc_probe()
6811 if (!adapter->io_addr) in igc_probe()
6814 /* hw->hw_addr can be zeroed, so use adapter->io_addr for unmap */ in igc_probe()
6815 hw->hw_addr = adapter->io_addr; in igc_probe()
6817 netdev->netdev_ops = &igc_netdev_ops; in igc_probe()
6818 netdev->xdp_metadata_ops = &igc_xdp_metadata_ops; in igc_probe()
6820 netdev->watchdog_timeo = 5 * HZ; in igc_probe()
6822 netdev->mem_start = pci_resource_start(pdev, 0); in igc_probe()
6823 netdev->mem_end = pci_resource_end(pdev, 0); in igc_probe()
6826 hw->vendor_id = pdev->vendor; in igc_probe()
6827 hw->device_id = pdev->device; in igc_probe()
6828 hw->revision_id = pdev->revision; in igc_probe()
6829 hw->subsystem_vendor_id = pdev->subsystem_vendor; in igc_probe()
6830 hw->subsystem_device_id = pdev->subsystem_device; in igc_probe()
6833 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); in igc_probe()
6834 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); in igc_probe()
6836 /* Initialize skew-specific constants */ in igc_probe()
6837 err = ei->get_invariants(hw); in igc_probe()
6842 netdev->features |= NETIF_F_SG; in igc_probe()
6843 netdev->features |= NETIF_F_TSO; in igc_probe()
6844 netdev->features |= NETIF_F_TSO6; in igc_probe()
6845 netdev->features |= NETIF_F_TSO_ECN; in igc_probe()
6846 netdev->features |= NETIF_F_RXHASH; in igc_probe()
6847 netdev->features |= NETIF_F_RXCSUM; in igc_probe()
6848 netdev->features |= NETIF_F_HW_CSUM; in igc_probe()
6849 netdev->features |= NETIF_F_SCTP_CRC; in igc_probe()
6850 netdev->features |= NETIF_F_HW_TC; in igc_probe()
6859 netdev->gso_partial_features = IGC_GSO_PARTIAL_FEATURES; in igc_probe()
6860 netdev->features |= NETIF_F_GSO_PARTIAL | IGC_GSO_PARTIAL_FEATURES; in igc_probe()
6868 netdev->hw_features |= NETIF_F_NTUPLE; in igc_probe()
6869 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; in igc_probe()
6870 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in igc_probe()
6871 netdev->hw_features |= netdev->features; in igc_probe()
6873 netdev->features |= NETIF_F_HIGHDMA; in igc_probe()
6875 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in igc_probe()
6876 netdev->mpls_features |= NETIF_F_HW_CSUM; in igc_probe()
6877 netdev->hw_enc_features |= netdev->vlan_features; in igc_probe()
6879 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in igc_probe()
6882 /* MTU range: 68 - 9216 */ in igc_probe()
6883 netdev->min_mtu = ETH_MIN_MTU; in igc_probe()
6884 netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE; in igc_probe()
6889 hw->mac.ops.reset_hw(hw); in igc_probe()
6892 if (hw->nvm.ops.validate(hw) < 0) { in igc_probe()
6893 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); in igc_probe()
6894 err = -EIO; in igc_probe()
6899 if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) { in igc_probe()
6901 if (hw->mac.ops.read_mac_addr(hw)) in igc_probe()
6902 dev_err(&pdev->dev, "NVM Read Error\n"); in igc_probe()
6905 eth_hw_addr_set(netdev, hw->mac.addr); in igc_probe()
6907 if (!is_valid_ether_addr(netdev->dev_addr)) { in igc_probe()
6908 dev_err(&pdev->dev, "Invalid MAC Address\n"); in igc_probe()
6909 err = -EIO; in igc_probe()
6917 timer_setup(&adapter->watchdog_timer, igc_watchdog, 0); in igc_probe()
6918 timer_setup(&adapter->phy_info_timer, igc_update_phy_info, 0); in igc_probe()
6920 INIT_WORK(&adapter->reset_task, igc_reset_task); in igc_probe()
6921 INIT_WORK(&adapter->watchdog_task, igc_watchdog_task); in igc_probe()
6923 hrtimer_init(&adapter->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in igc_probe()
6924 adapter->hrtimer.function = &igc_qbv_scheduling_timer; in igc_probe()
6926 /* Initialize link properties that are user-changeable */ in igc_probe()
6927 adapter->fc_autoneg = true; in igc_probe()
6928 hw->mac.autoneg = true; in igc_probe()
6929 hw->phy.autoneg_advertised = 0xaf; in igc_probe()
6931 hw->fc.requested_mode = igc_fc_default; in igc_probe()
6932 hw->fc.current_mode = igc_fc_default; in igc_probe()
6935 adapter->flags |= IGC_FLAG_WOL_SUPPORTED; in igc_probe()
6938 if (adapter->flags & IGC_FLAG_WOL_SUPPORTED) in igc_probe()
6939 adapter->wol |= IGC_WUFC_MAG; in igc_probe()
6941 device_set_wakeup_enable(&adapter->pdev->dev, in igc_probe()
6942 adapter->flags & IGC_FLAG_WOL_SUPPORTED); in igc_probe()
6956 strscpy(netdev->name, "eth%d", sizeof(netdev->name)); in igc_probe()
6965 adapter->ei = *ei; in igc_probe()
6969 netdev_info(netdev, "MAC: %pM\n", netdev->dev_addr); in igc_probe()
6971 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); in igc_probe()
6973 hw->dev_spec._base.eee_enable = false; in igc_probe()
6974 adapter->flags &= ~IGC_FLAG_EEE; in igc_probe()
6977 pm_runtime_put_noidle(&pdev->dev); in igc_probe()
6988 iounmap(adapter->io_addr); in igc_probe()
7000 * igc_remove - Device Removal Routine
7005 * Hot-Plug event, or because the driver is going to be removed from
7013 pm_runtime_get_noresume(&pdev->dev); in igc_remove()
7022 set_bit(__IGC_DOWN, &adapter->state); in igc_remove()
7024 del_timer_sync(&adapter->watchdog_timer); in igc_remove()
7025 del_timer_sync(&adapter->phy_info_timer); in igc_remove()
7027 cancel_work_sync(&adapter->reset_task); in igc_remove()
7028 cancel_work_sync(&adapter->watchdog_task); in igc_remove()
7029 hrtimer_cancel(&adapter->hrtimer); in igc_remove()
7038 pci_iounmap(pdev, adapter->io_addr); in igc_remove()
7051 u32 wufc = runtime ? IGC_WUFC_LNKC : adapter->wol; in __igc_shutdown()
7052 struct igc_hw *hw = &adapter->hw; in __igc_shutdown()
7075 /* turn on all-multi mode if wake on multicast is enabled */ in __igc_shutdown()
7096 wake = wufc || adapter->en_mng_pt; in __igc_shutdown()
7098 igc_power_down_phy_copper_base(&adapter->hw); in __igc_shutdown()
7124 struct igc_hw *hw = &adapter->hw; in igc_deliver_wake_packet()
7142 /* Ensure reads are 32-bit aligned */ in igc_deliver_wake_packet()
7145 memcpy_fromio(skb->data, hw->hw_addr + IGC_WUPM_REG(0), wupl); in igc_deliver_wake_packet()
7147 skb->protocol = eth_type_trans(skb, netdev); in igc_deliver_wake_packet()
7156 struct igc_hw *hw = &adapter->hw; in igc_resume()
7164 return -ENODEV; in igc_resume()
7177 return -ENOMEM; in igc_resume()
7222 return -EBUSY; in igc_runtime_idle()
7239 * igc_io_error_detected - called when PCI error is detected
7266 * igc_io_slot_reset - called after the PCI bus has been reset.
7269 * Restart the card from scratch, as if from a cold-boot. Implementation
7270 * resembles the first-half of the igc_resume routine.
7276 struct igc_hw *hw = &adapter->hw; in igc_io_slot_reset()
7280 netdev_err(netdev, "Could not re-enable PCI device after reset\n"); in igc_io_slot_reset()
7291 * so we should re-assign it here. in igc_io_slot_reset()
7293 hw->hw_addr = adapter->io_addr; in igc_io_slot_reset()
7304 * igc_io_resume - called when traffic can start to flow again.
7309 * second-half of the igc_resume routine.
7360 * igc_reinit_queues - return error
7365 struct net_device *netdev = adapter->netdev; in igc_reinit_queues()
7375 return -ENOMEM; in igc_reinit_queues()
7385 * igc_get_hw_dev - return device
7392 struct igc_adapter *adapter = hw->back; in igc_get_hw_dev()
7394 return adapter->netdev; in igc_get_hw_dev()
7399 struct igc_hw *hw = &ring->q_vector->adapter->hw; in igc_disable_rx_ring_hw()
7400 u8 idx = ring->reg_idx; in igc_disable_rx_ring_hw()
7417 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_rx_ring()
7421 if (ring->xsk_pool) in igc_enable_rx_ring()
7435 struct igc_adapter *adapter = ring->q_vector->adapter; in igc_enable_tx_ring()
7441 * igc_init_module - Driver Registration Routine
7460 * igc_exit_module - Driver Exit Cleanup Routine