Lines Matching +full:free +full:- +full:flowing
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
5 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
42 "Copyright (c) 2009 - 2018 Intel Corporation.";
56 /* ixgbevf_pci_tbl - PCI Device ID Table
84 static int debug = -1;
92 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_service_event_schedule()
93 !test_bit(__IXGBEVF_REMOVING, &adapter->state) && in ixgbevf_service_event_schedule()
94 !test_and_set_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)) in ixgbevf_service_event_schedule()
95 queue_work(ixgbevf_wq, &adapter->service_task); in ixgbevf_service_event_schedule()
100 BUG_ON(!test_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state)); in ixgbevf_service_event_complete()
104 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_service_event_complete()
117 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_remove_adapter()
119 if (!hw->hw_addr) in ixgbevf_remove_adapter()
121 hw->hw_addr = NULL; in ixgbevf_remove_adapter()
122 dev_err(&adapter->pdev->dev, "Adapter removed\n"); in ixgbevf_remove_adapter()
123 if (test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_remove_adapter()
148 u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); in ixgbevf_read_reg()
160 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
162 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
170 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_ivar()
172 if (direction == -1) { in ixgbevf_set_ivar()
192 return ring->stats.packets; in ixgbevf_get_tx_completed()
197 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev); in ixgbevf_get_tx_pending()
198 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_get_tx_pending()
200 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx)); in ixgbevf_get_tx_pending()
201 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx)); in ixgbevf_get_tx_pending()
205 tail - head : (tail + ring->count - head); in ixgbevf_get_tx_pending()
213 u32 tx_done_old = tx_ring->tx_stats.tx_done_old; in ixgbevf_check_tx_hang()
226 &tx_ring->state); in ixgbevf_check_tx_hang()
229 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &tx_ring->state); in ixgbevf_check_tx_hang()
232 tx_ring->tx_stats.tx_done_old = tx_done; in ixgbevf_check_tx_hang()
240 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_tx_timeout_reset()
241 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); in ixgbevf_tx_timeout_reset()
247 * ixgbevf_tx_timeout - Respond to a Tx Hang
259 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
267 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_tx_irq()
271 unsigned int budget = tx_ring->count / 2; in ixgbevf_clean_tx_irq()
272 unsigned int i = tx_ring->next_to_clean; in ixgbevf_clean_tx_irq()
274 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_clean_tx_irq()
277 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_irq()
279 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
282 union ixgbe_adv_tx_desc *eop_desc = tx_buffer->next_to_watch; in ixgbevf_clean_tx_irq()
292 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbevf_clean_tx_irq()
296 tx_buffer->next_to_watch = NULL; in ixgbevf_clean_tx_irq()
299 total_bytes += tx_buffer->bytecount; in ixgbevf_clean_tx_irq()
300 total_packets += tx_buffer->gso_segs; in ixgbevf_clean_tx_irq()
301 if (tx_buffer->tx_flags & IXGBE_TX_FLAGS_IPSEC) in ixgbevf_clean_tx_irq()
304 /* free the skb */ in ixgbevf_clean_tx_irq()
306 page_frag_free(tx_buffer->data); in ixgbevf_clean_tx_irq()
308 napi_consume_skb(tx_buffer->skb, napi_budget); in ixgbevf_clean_tx_irq()
311 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_irq()
325 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
326 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
332 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_irq()
345 i -= tx_ring->count; in ixgbevf_clean_tx_irq()
346 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_irq()
354 budget--; in ixgbevf_clean_tx_irq()
357 i += tx_ring->count; in ixgbevf_clean_tx_irq()
358 tx_ring->next_to_clean = i; in ixgbevf_clean_tx_irq()
359 u64_stats_update_begin(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
360 tx_ring->stats.bytes += total_bytes; in ixgbevf_clean_tx_irq()
361 tx_ring->stats.packets += total_packets; in ixgbevf_clean_tx_irq()
362 u64_stats_update_end(&tx_ring->syncp); in ixgbevf_clean_tx_irq()
363 q_vector->tx.total_bytes += total_bytes; in ixgbevf_clean_tx_irq()
364 q_vector->tx.total_packets += total_packets; in ixgbevf_clean_tx_irq()
365 adapter->tx_ipsec += total_ipsec; in ixgbevf_clean_tx_irq()
368 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_clean_tx_irq()
371 eop_desc = tx_ring->tx_buffer_info[i].next_to_watch; in ixgbevf_clean_tx_irq()
380 " eop_desc->wb.status <%x>\n" in ixgbevf_clean_tx_irq()
384 tx_ring->queue_index, in ixgbevf_clean_tx_irq()
385 IXGBE_READ_REG(hw, IXGBE_VFTDH(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
386 IXGBE_READ_REG(hw, IXGBE_VFTDT(tx_ring->reg_idx)), in ixgbevf_clean_tx_irq()
387 tx_ring->next_to_use, i, in ixgbevf_clean_tx_irq()
388 eop_desc, (eop_desc ? eop_desc->wb.status : 0), in ixgbevf_clean_tx_irq()
389 tx_ring->tx_buffer_info[i].time_stamp, jiffies); in ixgbevf_clean_tx_irq()
392 netif_stop_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
393 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
405 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in ixgbevf_clean_tx_irq()
412 if (__netif_subqueue_stopped(tx_ring->netdev, in ixgbevf_clean_tx_irq()
413 tx_ring->queue_index) && in ixgbevf_clean_tx_irq()
414 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_clean_tx_irq()
415 netif_wake_subqueue(tx_ring->netdev, in ixgbevf_clean_tx_irq()
416 tx_ring->queue_index); in ixgbevf_clean_tx_irq()
417 ++tx_ring->tx_stats.restart_queue; in ixgbevf_clean_tx_irq()
425 * ixgbevf_rx_skb - Helper function to determine proper Rx method
432 napi_gro_receive(&q_vector->napi, skb); in ixgbevf_rx_skb()
447 if (!(ring->netdev->features & NETIF_F_RXHASH)) in ixgbevf_rx_hash()
450 rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & in ixgbevf_rx_hash()
456 skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), in ixgbevf_rx_hash()
462 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
474 if (!(ring->netdev->features & NETIF_F_RXCSUM)) in ixgbevf_rx_checksum()
480 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum()
488 ring->rx_stats.csum_err++; in ixgbevf_rx_checksum()
493 skb->ip_summed = CHECKSUM_UNNECESSARY; in ixgbevf_rx_checksum()
497 * ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
514 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); in ixgbevf_process_skb_fields()
515 unsigned long *active_vlans = netdev_priv(rx_ring->netdev); in ixgbevf_process_skb_fields()
524 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ixgbevf_process_skb_fields()
533 rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbevf_get_rx_buffer()
534 prefetchw(rx_buffer->page); in ixgbevf_get_rx_buffer()
537 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_get_rx_buffer()
538 rx_buffer->dma, in ixgbevf_get_rx_buffer()
539 rx_buffer->page_offset, in ixgbevf_get_rx_buffer()
543 rx_buffer->pagecnt_bias--; in ixgbevf_get_rx_buffer()
557 /* We are not reusing the buffer so unmap it and free in ixgbevf_put_rx_buffer()
560 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in ixgbevf_put_rx_buffer()
564 __page_frag_cache_drain(rx_buffer->page, in ixgbevf_put_rx_buffer()
565 rx_buffer->pagecnt_bias); in ixgbevf_put_rx_buffer()
569 rx_buffer->page = NULL; in ixgbevf_put_rx_buffer()
573 * ixgbevf_is_non_eop - process handling of non-EOP buffers
580 * that this is in fact a non-EOP buffer.
585 u32 ntc = rx_ring->next_to_clean + 1; in ixgbevf_is_non_eop()
588 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbevf_is_non_eop()
589 rx_ring->next_to_clean = ntc; in ixgbevf_is_non_eop()
607 struct page *page = bi->page; in ixgbevf_alloc_mapped_page()
617 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
622 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in ixgbevf_alloc_mapped_page()
626 /* if mapping failed free memory back to system since in ixgbevf_alloc_mapped_page()
629 if (dma_mapping_error(rx_ring->dev, dma)) { in ixgbevf_alloc_mapped_page()
632 rx_ring->rx_stats.alloc_rx_page_failed++; in ixgbevf_alloc_mapped_page()
636 bi->dma = dma; in ixgbevf_alloc_mapped_page()
637 bi->page = page; in ixgbevf_alloc_mapped_page()
638 bi->page_offset = ixgbevf_rx_offset(rx_ring); in ixgbevf_alloc_mapped_page()
639 bi->pagecnt_bias = 1; in ixgbevf_alloc_mapped_page()
640 rx_ring->rx_stats.alloc_rx_page++; in ixgbevf_alloc_mapped_page()
646 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
655 unsigned int i = rx_ring->next_to_use; in ixgbevf_alloc_rx_buffers()
658 if (!cleaned_count || !rx_ring->netdev) in ixgbevf_alloc_rx_buffers()
662 bi = &rx_ring->rx_buffer_info[i]; in ixgbevf_alloc_rx_buffers()
663 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
670 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ixgbevf_alloc_rx_buffers()
671 bi->page_offset, in ixgbevf_alloc_rx_buffers()
676 * because each write-back erases this info. in ixgbevf_alloc_rx_buffers()
678 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in ixgbevf_alloc_rx_buffers()
685 bi = rx_ring->rx_buffer_info; in ixgbevf_alloc_rx_buffers()
686 i -= rx_ring->count; in ixgbevf_alloc_rx_buffers()
690 rx_desc->wb.upper.length = 0; in ixgbevf_alloc_rx_buffers()
692 cleaned_count--; in ixgbevf_alloc_rx_buffers()
695 i += rx_ring->count; in ixgbevf_alloc_rx_buffers()
697 if (rx_ring->next_to_use != i) { in ixgbevf_alloc_rx_buffers()
699 rx_ring->next_to_use = i; in ixgbevf_alloc_rx_buffers()
702 rx_ring->next_to_alloc = i; in ixgbevf_alloc_rx_buffers()
706 * applicable for weak-ordered memory model archs, in ixgbevf_alloc_rx_buffers()
707 * such as IA-64). in ixgbevf_alloc_rx_buffers()
715 * ixgbevf_cleanup_headers - Correct corrupted or empty headers
743 struct net_device *netdev = rx_ring->netdev; in ixgbevf_cleanup_headers()
745 if (!(netdev->features & NETIF_F_RXALL)) { in ixgbevf_cleanup_headers()
759 * ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
769 u16 nta = rx_ring->next_to_alloc; in ixgbevf_reuse_rx_page()
771 new_buff = &rx_ring->rx_buffer_info[nta]; in ixgbevf_reuse_rx_page()
775 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ixgbevf_reuse_rx_page()
778 new_buff->page = old_buff->page; in ixgbevf_reuse_rx_page()
779 new_buff->dma = old_buff->dma; in ixgbevf_reuse_rx_page()
780 new_buff->page_offset = old_buff->page_offset; in ixgbevf_reuse_rx_page()
781 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in ixgbevf_reuse_rx_page()
786 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in ixgbevf_can_reuse_rx_page()
787 struct page *page = rx_buffer->page; in ixgbevf_can_reuse_rx_page()
789 /* avoid re-using remote and pfmemalloc pages */ in ixgbevf_can_reuse_rx_page()
795 if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) in ixgbevf_can_reuse_rx_page()
799 (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048) in ixgbevf_can_reuse_rx_page()
801 if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET) in ixgbevf_can_reuse_rx_page()
812 rx_buffer->pagecnt_bias = USHRT_MAX; in ixgbevf_can_reuse_rx_page()
819 * ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
825 * This function will add the data contained in rx_buffer->page to the skb.
839 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, in ixgbevf_add_rx_frag()
840 rx_buffer->page_offset, size, truesize); in ixgbevf_add_rx_frag()
842 rx_buffer->page_offset ^= truesize; in ixgbevf_add_rx_frag()
844 rx_buffer->page_offset += truesize; in ixgbevf_add_rx_frag()
854 unsigned int size = xdp->data_end - xdp->data; in ixgbevf_construct_skb()
858 unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_construct_skb()
859 xdp->data_hard_start); in ixgbevf_construct_skb()
865 net_prefetch(xdp->data); in ixgbevf_construct_skb()
867 /* Note, we get here by enabling legacy-rx via: in ixgbevf_construct_skb()
869 * ethtool --set-priv-flags <dev> legacy-rx on in ixgbevf_construct_skb()
872 * opposed to having legacy-rx off, where we process XDP in ixgbevf_construct_skb()
876 * xdp->data_meta will always point to xdp->data, since in ixgbevf_construct_skb()
878 * changed in future for legacy-rx mode on, then lets also in ixgbevf_construct_skb()
879 * add xdp->data_meta handling here. in ixgbevf_construct_skb()
883 skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE); in ixgbevf_construct_skb()
890 headlen = eth_get_headlen(skb->dev, xdp->data, in ixgbevf_construct_skb()
894 memcpy(__skb_put(skb, headlen), xdp->data, in ixgbevf_construct_skb()
898 size -= headlen; in ixgbevf_construct_skb()
900 skb_add_rx_frag(skb, 0, rx_buffer->page, in ixgbevf_construct_skb()
901 (xdp->data + headlen) - in ixgbevf_construct_skb()
902 page_address(rx_buffer->page), in ixgbevf_construct_skb()
905 rx_buffer->page_offset ^= truesize; in ixgbevf_construct_skb()
907 rx_buffer->page_offset += truesize; in ixgbevf_construct_skb()
910 rx_buffer->pagecnt_bias++; in ixgbevf_construct_skb()
919 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable_queues()
929 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbevf_build_skb()
934 SKB_DATA_ALIGN(xdp->data_end - in ixgbevf_build_skb()
935 xdp->data_hard_start); in ixgbevf_build_skb()
939 /* Prefetch first cache line of first page. If xdp->data_meta in ixgbevf_build_skb()
940 * is unused, this points to xdp->data, otherwise, we likely in ixgbevf_build_skb()
944 net_prefetch(xdp->data_meta); in ixgbevf_build_skb()
947 skb = napi_build_skb(xdp->data_hard_start, truesize); in ixgbevf_build_skb()
952 skb_reserve(skb, xdp->data - xdp->data_hard_start); in ixgbevf_build_skb()
953 __skb_put(skb, xdp->data_end - xdp->data); in ixgbevf_build_skb()
959 rx_buffer->page_offset ^= truesize; in ixgbevf_build_skb()
961 rx_buffer->page_offset += truesize; in ixgbevf_build_skb()
980 len = xdp->data_end - xdp->data; in ixgbevf_xmit_xdp_ring()
985 dma = dma_map_single(ring->dev, xdp->data, len, DMA_TO_DEVICE); in ixgbevf_xmit_xdp_ring()
986 if (dma_mapping_error(ring->dev, dma)) in ixgbevf_xmit_xdp_ring()
990 i = ring->next_to_use; in ixgbevf_xmit_xdp_ring()
991 tx_buffer = &ring->tx_buffer_info[i]; in ixgbevf_xmit_xdp_ring()
995 tx_buffer->data = xdp->data; in ixgbevf_xmit_xdp_ring()
996 tx_buffer->bytecount = len; in ixgbevf_xmit_xdp_ring()
997 tx_buffer->gso_segs = 1; in ixgbevf_xmit_xdp_ring()
998 tx_buffer->protocol = 0; in ixgbevf_xmit_xdp_ring()
1003 if (!test_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state)) { in ixgbevf_xmit_xdp_ring()
1006 set_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); in ixgbevf_xmit_xdp_ring()
1009 context_desc->vlan_macip_lens = in ixgbevf_xmit_xdp_ring()
1011 context_desc->fceof_saidx = 0; in ixgbevf_xmit_xdp_ring()
1012 context_desc->type_tucmd_mlhl = in ixgbevf_xmit_xdp_ring()
1015 context_desc->mss_l4len_idx = 0; in ixgbevf_xmit_xdp_ring()
1027 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_xmit_xdp_ring()
1029 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbevf_xmit_xdp_ring()
1030 tx_desc->read.olinfo_status = in ixgbevf_xmit_xdp_ring()
1039 if (i == ring->count) in ixgbevf_xmit_xdp_ring()
1042 tx_buffer->next_to_watch = tx_desc; in ixgbevf_xmit_xdp_ring()
1043 ring->next_to_use = i; in ixgbevf_xmit_xdp_ring()
1057 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbevf_run_xdp()
1067 xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_run_xdp()
1073 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1077 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbevf_run_xdp()
1084 return ERR_PTR(-result); in ixgbevf_run_xdp()
1093 truesize = ixgbevf_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ixgbevf_rx_frame_truesize()
1110 rx_buffer->page_offset ^= truesize; in ixgbevf_rx_buffer_flip()
1112 rx_buffer->page_offset += truesize; in ixgbevf_rx_buffer_flip()
1121 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_clean_rx_irq()
1123 struct sk_buff *skb = rx_ring->skb; in ixgbevf_clean_rx_irq()
1131 xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq); in ixgbevf_clean_rx_irq()
1144 rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbevf_clean_rx_irq()
1145 size = le16_to_cpu(rx_desc->wb.upper.length); in ixgbevf_clean_rx_irq()
1162 hard_start = page_address(rx_buffer->page) + in ixgbevf_clean_rx_irq()
1163 rx_buffer->page_offset - offset; in ixgbevf_clean_rx_irq()
1173 if (PTR_ERR(skb) == -IXGBEVF_XDP_TX) { in ixgbevf_clean_rx_irq()
1178 rx_buffer->pagecnt_bias++; in ixgbevf_clean_rx_irq()
1194 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbevf_clean_rx_irq()
1195 rx_buffer->pagecnt_bias++; in ixgbevf_clean_rx_irq()
1202 /* fetch next buffer in frame if non-eop */ in ixgbevf_clean_rx_irq()
1213 total_rx_bytes += skb->len; in ixgbevf_clean_rx_irq()
1218 if ((skb->pkt_type == PACKET_BROADCAST || in ixgbevf_clean_rx_irq()
1219 skb->pkt_type == PACKET_MULTICAST) && in ixgbevf_clean_rx_irq()
1220 ether_addr_equal(rx_ring->netdev->dev_addr, in ixgbevf_clean_rx_irq()
1221 eth_hdr(skb)->h_source)) { in ixgbevf_clean_rx_irq()
1239 rx_ring->skb = skb; in ixgbevf_clean_rx_irq()
1243 adapter->xdp_ring[rx_ring->queue_index]; in ixgbevf_clean_rx_irq()
1249 ixgbevf_write_tail(xdp_ring, xdp_ring->next_to_use); in ixgbevf_clean_rx_irq()
1252 u64_stats_update_begin(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1253 rx_ring->stats.packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1254 rx_ring->stats.bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1255 u64_stats_update_end(&rx_ring->syncp); in ixgbevf_clean_rx_irq()
1256 q_vector->rx.total_packets += total_rx_packets; in ixgbevf_clean_rx_irq()
1257 q_vector->rx.total_bytes += total_rx_bytes; in ixgbevf_clean_rx_irq()
1263 * ixgbevf_poll - NAPI polling calback
1274 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_poll()
1279 ixgbevf_for_each_ring(ring, q_vector->tx) { in ixgbevf_poll()
1290 if (q_vector->rx.count > 1) in ixgbevf_poll()
1291 per_ring_budget = max(budget/q_vector->rx.count, 1); in ixgbevf_poll()
1295 ixgbevf_for_each_ring(ring, q_vector->rx) { in ixgbevf_poll()
1307 /* Exit the polling mode, but don't re-enable interrupts if stack might in ixgbevf_poll()
1308 * poll us due to busy-polling in ixgbevf_poll()
1311 if (adapter->rx_itr_setting == 1) in ixgbevf_poll()
1313 if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && in ixgbevf_poll()
1314 !test_bit(__IXGBEVF_REMOVING, &adapter->state)) in ixgbevf_poll()
1316 BIT(q_vector->v_idx)); in ixgbevf_poll()
1319 return min(work_done, budget - 1); in ixgbevf_poll()
1323 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
1328 struct ixgbevf_adapter *adapter = q_vector->adapter; in ixgbevf_write_eitr()
1329 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_eitr()
1330 int v_idx = q_vector->v_idx; in ixgbevf_write_eitr()
1331 u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; in ixgbevf_write_eitr()
1342 * ixgbevf_configure_msix - Configure MSI-X hardware
1345 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
1353 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_configure_msix()
1354 adapter->eims_enable_mask = 0; in ixgbevf_configure_msix()
1362 q_vector = adapter->q_vector[v_idx]; in ixgbevf_configure_msix()
1364 ixgbevf_for_each_ring(ring, q_vector->rx) in ixgbevf_configure_msix()
1365 ixgbevf_set_ivar(adapter, 0, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1367 ixgbevf_for_each_ring(ring, q_vector->tx) in ixgbevf_configure_msix()
1368 ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); in ixgbevf_configure_msix()
1370 if (q_vector->tx.ring && !q_vector->rx.ring) { in ixgbevf_configure_msix()
1372 if (adapter->tx_itr_setting == 1) in ixgbevf_configure_msix()
1373 q_vector->itr = IXGBE_12K_ITR; in ixgbevf_configure_msix()
1375 q_vector->itr = adapter->tx_itr_setting; in ixgbevf_configure_msix()
1378 if (adapter->rx_itr_setting == 1) in ixgbevf_configure_msix()
1379 q_vector->itr = IXGBE_20K_ITR; in ixgbevf_configure_msix()
1381 q_vector->itr = adapter->rx_itr_setting; in ixgbevf_configure_msix()
1385 adapter->eims_enable_mask |= BIT(v_idx); in ixgbevf_configure_msix()
1390 ixgbevf_set_ivar(adapter, -1, 1, v_idx); in ixgbevf_configure_msix()
1392 adapter->eims_other = BIT(v_idx); in ixgbevf_configure_msix()
1393 adapter->eims_enable_mask |= adapter->eims_other; in ixgbevf_configure_msix()
1404 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
1419 int bytes = ring_container->total_bytes; in ixgbevf_update_itr()
1420 int packets = ring_container->total_packets; in ixgbevf_update_itr()
1423 u8 itr_setting = ring_container->itr; in ixgbevf_update_itr()
1429 * 0-20MB/s lowest (100000 ints/s) in ixgbevf_update_itr()
1430 * 20-100MB/s low (20000 ints/s) in ixgbevf_update_itr()
1431 * 100-1249MB/s bulk (12000 ints/s) in ixgbevf_update_itr()
1434 timepassed_us = q_vector->itr >> 2; in ixgbevf_update_itr()
1458 ring_container->total_bytes = 0; in ixgbevf_update_itr()
1459 ring_container->total_packets = 0; in ixgbevf_update_itr()
1462 ring_container->itr = itr_setting; in ixgbevf_update_itr()
1467 u32 new_itr = q_vector->itr; in ixgbevf_set_itr()
1470 ixgbevf_update_itr(q_vector, &q_vector->tx); in ixgbevf_set_itr()
1471 ixgbevf_update_itr(q_vector, &q_vector->rx); in ixgbevf_set_itr()
1473 current_itr = max(q_vector->rx.itr, q_vector->tx.itr); in ixgbevf_set_itr()
1490 if (new_itr != q_vector->itr) { in ixgbevf_set_itr()
1492 new_itr = (10 * new_itr * q_vector->itr) / in ixgbevf_set_itr()
1493 ((9 * new_itr) + q_vector->itr); in ixgbevf_set_itr()
1496 q_vector->itr = new_itr; in ixgbevf_set_itr()
1505 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_msix_other()
1507 hw->mac.get_link_status = 1; in ixgbevf_msix_other()
1511 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_other); in ixgbevf_msix_other()
1517 * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
1526 if (q_vector->rx.ring || q_vector->tx.ring) in ixgbevf_msix_clean_rings()
1527 napi_schedule_irqoff(&q_vector->napi); in ixgbevf_msix_clean_rings()
1533 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1536 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1541 struct net_device *netdev = adapter->netdev; in ixgbevf_request_msix_irqs()
1542 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_request_msix_irqs()
1547 struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector]; in ixgbevf_request_msix_irqs()
1548 struct msix_entry *entry = &adapter->msix_entries[vector]; in ixgbevf_request_msix_irqs()
1550 if (q_vector->tx.ring && q_vector->rx.ring) { in ixgbevf_request_msix_irqs()
1551 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1552 "%s-TxRx-%u", netdev->name, ri++); in ixgbevf_request_msix_irqs()
1554 } else if (q_vector->rx.ring) { in ixgbevf_request_msix_irqs()
1555 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1556 "%s-rx-%u", netdev->name, ri++); in ixgbevf_request_msix_irqs()
1557 } else if (q_vector->tx.ring) { in ixgbevf_request_msix_irqs()
1558 snprintf(q_vector->name, sizeof(q_vector->name), in ixgbevf_request_msix_irqs()
1559 "%s-tx-%u", netdev->name, ti++); in ixgbevf_request_msix_irqs()
1564 err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0, in ixgbevf_request_msix_irqs()
1565 q_vector->name, q_vector); in ixgbevf_request_msix_irqs()
1567 hw_dbg(&adapter->hw, in ixgbevf_request_msix_irqs()
1574 err = request_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1575 &ixgbevf_msix_other, 0, netdev->name, adapter); in ixgbevf_request_msix_irqs()
1577 hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n", in ixgbevf_request_msix_irqs()
1586 vector--; in ixgbevf_request_msix_irqs()
1587 free_irq(adapter->msix_entries[vector].vector, in ixgbevf_request_msix_irqs()
1588 adapter->q_vector[vector]); in ixgbevf_request_msix_irqs()
1590 /* This failure is non-recoverable - it indicates the system is in ixgbevf_request_msix_irqs()
1600 adapter->num_msix_vectors = 0; in ixgbevf_request_msix_irqs()
1605 * ixgbevf_request_irq - initialize interrupts
1616 hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err); in ixgbevf_request_irq()
1625 if (!adapter->msix_entries) in ixgbevf_free_irq()
1628 q_vectors = adapter->num_msix_vectors; in ixgbevf_free_irq()
1629 i = q_vectors - 1; in ixgbevf_free_irq()
1631 free_irq(adapter->msix_entries[i].vector, adapter); in ixgbevf_free_irq()
1632 i--; in ixgbevf_free_irq()
1634 for (; i >= 0; i--) { in ixgbevf_free_irq()
1635 /* free only the irqs that were actually requested */ in ixgbevf_free_irq()
1636 if (!adapter->q_vector[i]->rx.ring && in ixgbevf_free_irq()
1637 !adapter->q_vector[i]->tx.ring) in ixgbevf_free_irq()
1640 free_irq(adapter->msix_entries[i].vector, in ixgbevf_free_irq()
1641 adapter->q_vector[i]); in ixgbevf_free_irq()
1646 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1651 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_disable()
1660 for (i = 0; i < adapter->num_msix_vectors; i++) in ixgbevf_irq_disable()
1661 synchronize_irq(adapter->msix_entries[i].vector); in ixgbevf_irq_disable()
1665 * ixgbevf_irq_enable - Enable default interrupt generation settings
1670 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_irq_enable()
1672 IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1673 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1674 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, adapter->eims_enable_mask); in ixgbevf_irq_enable()
1678 * ixgbevf_configure_tx_ring - Configure 82599 VF Tx ring after Reset
1687 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_tx_ring()
1688 u64 tdba = ring->dma; in ixgbevf_configure_tx_ring()
1691 u8 reg_idx = ring->reg_idx; in ixgbevf_configure_tx_ring()
1700 ring->count * sizeof(union ixgbe_adv_tx_desc)); in ixgbevf_configure_tx_ring()
1714 ring->tail = adapter->io_addr + IXGBE_VFTDT(reg_idx); in ixgbevf_configure_tx_ring()
1717 ring->next_to_clean = 0; in ixgbevf_configure_tx_ring()
1718 ring->next_to_use = 0; in ixgbevf_configure_tx_ring()
1731 memset(ring->tx_buffer_info, 0, in ixgbevf_configure_tx_ring()
1732 sizeof(struct ixgbevf_tx_buffer) * ring->count); in ixgbevf_configure_tx_ring()
1734 clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state); in ixgbevf_configure_tx_ring()
1735 clear_bit(__IXGBEVF_TX_XDP_RING_PRIMED, &ring->state); in ixgbevf_configure_tx_ring()
1743 } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); in ixgbevf_configure_tx_ring()
1749 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1759 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_configure_tx()
1760 ixgbevf_configure_tx_ring(adapter, adapter->tx_ring[i]); in ixgbevf_configure_tx()
1761 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_configure_tx()
1762 ixgbevf_configure_tx_ring(adapter, adapter->xdp_ring[i]); in ixgbevf_configure_tx()
1770 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_srrctl()
1787 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_psrtype()
1794 if (adapter->num_rx_queues > 1) in ixgbevf_setup_psrtype()
1804 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_disable_rx_queue()
1807 u8 reg_idx = ring->reg_idx; in ixgbevf_disable_rx_queue()
1809 if (IXGBE_REMOVED(hw->hw_addr)) in ixgbevf_disable_rx_queue()
1821 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbevf_disable_rx_queue()
1831 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_rx_desc_queue_enable()
1834 u8 reg_idx = ring->reg_idx; in ixgbevf_rx_desc_queue_enable()
1836 if (IXGBE_REMOVED(hw->hw_addr)) in ixgbevf_rx_desc_queue_enable()
1841 } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); in ixgbevf_rx_desc_queue_enable()
1849 * ixgbevf_init_rss_key - Initialize adapter RSS key
1858 if (!adapter->rss_key) { in ixgbevf_init_rss_key()
1861 return -ENOMEM; in ixgbevf_init_rss_key()
1864 adapter->rss_key = rss_key; in ixgbevf_init_rss_key()
1872 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_setup_vfmrqc()
1874 u16 rss_i = adapter->num_rx_queues; in ixgbevf_setup_vfmrqc()
1879 IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), *(adapter->rss_key + i)); in ixgbevf_setup_vfmrqc()
1885 adapter->rss_indir_tbl[i] = j; in ixgbevf_setup_vfmrqc()
1908 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx_ring()
1910 u64 rdba = ring->dma; in ixgbevf_configure_rx_ring()
1912 u8 reg_idx = ring->reg_idx; in ixgbevf_configure_rx_ring()
1921 ring->count * sizeof(union ixgbe_adv_rx_desc)); in ixgbevf_configure_rx_ring()
1936 ring->tail = adapter->io_addr + IXGBE_VFRDT(reg_idx); in ixgbevf_configure_rx_ring()
1939 memset(ring->rx_buffer_info, 0, in ixgbevf_configure_rx_ring()
1940 sizeof(struct ixgbevf_rx_buffer) * ring->count); in ixgbevf_configure_rx_ring()
1944 rx_desc->wb.upper.length = 0; in ixgbevf_configure_rx_ring()
1947 ring->next_to_clean = 0; in ixgbevf_configure_rx_ring()
1948 ring->next_to_use = 0; in ixgbevf_configure_rx_ring()
1949 ring->next_to_alloc = 0; in ixgbevf_configure_rx_ring()
1954 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) { in ixgbevf_configure_rx_ring()
1977 struct net_device *netdev = adapter->netdev; in ixgbevf_set_rx_buffer_len()
1978 unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; in ixgbevf_set_rx_buffer_len()
1984 if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX) in ixgbevf_set_rx_buffer_len()
1992 if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring)) in ixgbevf_set_rx_buffer_len()
1999 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
2006 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_rx()
2007 struct net_device *netdev = adapter->netdev; in ixgbevf_configure_rx()
2011 if (hw->mac.type >= ixgbe_mac_X550_vf) in ixgbevf_configure_rx()
2014 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_rx()
2016 ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); in ixgbevf_configure_rx()
2017 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_rx()
2019 dev_err(&adapter->pdev->dev, in ixgbevf_configure_rx()
2020 "Failed to set MTU at %d\n", netdev->mtu); in ixgbevf_configure_rx()
2025 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_configure_rx()
2026 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_configure_rx()
2037 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_add_vid()
2040 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
2043 err = hw->mac.ops.set_vfta(hw, vid, 0, true); in ixgbevf_vlan_rx_add_vid()
2045 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_add_vid()
2052 return -EIO; in ixgbevf_vlan_rx_add_vid()
2055 return -EACCES; in ixgbevf_vlan_rx_add_vid()
2058 set_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_add_vid()
2067 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_vlan_rx_kill_vid()
2070 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
2073 err = hw->mac.ops.set_vfta(hw, vid, 0, false); in ixgbevf_vlan_rx_kill_vid()
2075 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_vlan_rx_kill_vid()
2080 clear_bit(vid, adapter->active_vlans); in ixgbevf_vlan_rx_kill_vid()
2089 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) in ixgbevf_restore_vlan()
2090 ixgbevf_vlan_rx_add_vid(adapter->netdev, in ixgbevf_restore_vlan()
2097 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_write_uc_addr_list()
2104 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); in ixgbevf_write_uc_addr_list()
2111 hw->mac.ops.set_uc_addr(hw, 0, NULL); in ixgbevf_write_uc_addr_list()
2118 * ixgbevf_set_rx_mode - Multicast and unicast set
2129 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_rx_mode()
2130 unsigned int flags = netdev->flags; in ixgbevf_set_rx_mode()
2143 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
2145 hw->mac.ops.update_xcast_mode(hw, xcast_mode); in ixgbevf_set_rx_mode()
2148 hw->mac.ops.update_mc_addr_list(hw, netdev); in ixgbevf_set_rx_mode()
2152 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_rx_mode()
2159 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_enable_all()
2162 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_enable_all()
2163 napi_enable(&q_vector->napi); in ixgbevf_napi_enable_all()
2171 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_napi_disable_all()
2174 q_vector = adapter->q_vector[q_idx]; in ixgbevf_napi_disable_all()
2175 napi_disable(&q_vector->napi); in ixgbevf_napi_disable_all()
2181 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_configure_dcb()
2184 unsigned int num_rx_queues = adapter->num_rx_queues; in ixgbevf_configure_dcb()
2185 unsigned int num_tx_queues = adapter->num_tx_queues; in ixgbevf_configure_dcb()
2188 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
2193 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_configure_dcb()
2203 adapter->tx_ring[0]->reg_idx = def_q; in ixgbevf_configure_dcb()
2210 if ((adapter->num_rx_queues != num_rx_queues) || in ixgbevf_configure_dcb()
2211 (adapter->num_tx_queues != num_tx_queues)) { in ixgbevf_configure_dcb()
2213 hw->mbx.timeout = 0; in ixgbevf_configure_dcb()
2216 set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state); in ixgbevf_configure_dcb()
2226 ixgbevf_set_rx_mode(adapter->netdev); in ixgbevf_configure()
2237 /* Only save pre-reset stats if there are some */ in ixgbevf_save_reset_stats()
2238 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { in ixgbevf_save_reset_stats()
2239 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - in ixgbevf_save_reset_stats()
2240 adapter->stats.base_vfgprc; in ixgbevf_save_reset_stats()
2241 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - in ixgbevf_save_reset_stats()
2242 adapter->stats.base_vfgptc; in ixgbevf_save_reset_stats()
2243 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - in ixgbevf_save_reset_stats()
2244 adapter->stats.base_vfgorc; in ixgbevf_save_reset_stats()
2245 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - in ixgbevf_save_reset_stats()
2246 adapter->stats.base_vfgotc; in ixgbevf_save_reset_stats()
2247 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - in ixgbevf_save_reset_stats()
2248 adapter->stats.base_vfmprc; in ixgbevf_save_reset_stats()
2254 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_init_last_counter_stats()
2256 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); in ixgbevf_init_last_counter_stats()
2257 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); in ixgbevf_init_last_counter_stats()
2258 adapter->stats.last_vfgorc |= in ixgbevf_init_last_counter_stats()
2260 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); in ixgbevf_init_last_counter_stats()
2261 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); in ixgbevf_init_last_counter_stats()
2262 adapter->stats.last_vfgotc |= in ixgbevf_init_last_counter_stats()
2264 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); in ixgbevf_init_last_counter_stats()
2266 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; in ixgbevf_init_last_counter_stats()
2267 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; in ixgbevf_init_last_counter_stats()
2268 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; in ixgbevf_init_last_counter_stats()
2269 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; in ixgbevf_init_last_counter_stats()
2270 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; in ixgbevf_init_last_counter_stats()
2275 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_negotiate_api()
2287 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2290 err = hw->mac.ops.negotiate_api_version(hw, api[idx]); in ixgbevf_negotiate_api()
2296 if (hw->api_version >= ixgbe_mbox_api_15) { in ixgbevf_negotiate_api()
2297 hw->mbx.ops.init_params(hw); in ixgbevf_negotiate_api()
2298 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, in ixgbevf_negotiate_api()
2302 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_negotiate_api()
2307 struct net_device *netdev = adapter->netdev; in ixgbevf_up_complete()
2308 struct pci_dev *pdev = adapter->pdev; in ixgbevf_up_complete()
2309 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_up_complete()
2314 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2316 if (is_valid_ether_addr(hw->mac.addr)) in ixgbevf_up_complete()
2317 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); in ixgbevf_up_complete()
2319 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); in ixgbevf_up_complete()
2321 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_up_complete()
2323 state = adapter->link_state; in ixgbevf_up_complete()
2324 hw->mac.ops.get_link_state(hw, &adapter->link_state); in ixgbevf_up_complete()
2325 if (state && state != adapter->link_state) in ixgbevf_up_complete()
2326 dev_info(&pdev->dev, "VF is administratively disabled\n"); in ixgbevf_up_complete()
2329 clear_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_up_complete()
2342 hw->mac.get_link_status = 1; in ixgbevf_up_complete()
2343 mod_timer(&adapter->service_timer, jiffies); in ixgbevf_up_complete()
2354 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
2355 * @rx_ring: ring to free buffers from
2359 u16 i = rx_ring->next_to_clean; in ixgbevf_clean_rx_ring()
2361 /* Free Rx ring sk_buff */ in ixgbevf_clean_rx_ring()
2362 if (rx_ring->skb) { in ixgbevf_clean_rx_ring()
2363 dev_kfree_skb(rx_ring->skb); in ixgbevf_clean_rx_ring()
2364 rx_ring->skb = NULL; in ixgbevf_clean_rx_ring()
2367 /* Free all the Rx ring pages */ in ixgbevf_clean_rx_ring()
2368 while (i != rx_ring->next_to_alloc) { in ixgbevf_clean_rx_ring()
2371 rx_buffer = &rx_ring->rx_buffer_info[i]; in ixgbevf_clean_rx_ring()
2376 dma_sync_single_range_for_cpu(rx_ring->dev, in ixgbevf_clean_rx_ring()
2377 rx_buffer->dma, in ixgbevf_clean_rx_ring()
2378 rx_buffer->page_offset, in ixgbevf_clean_rx_ring()
2382 /* free resources associated with mapping */ in ixgbevf_clean_rx_ring()
2383 dma_unmap_page_attrs(rx_ring->dev, in ixgbevf_clean_rx_ring()
2384 rx_buffer->dma, in ixgbevf_clean_rx_ring()
2389 __page_frag_cache_drain(rx_buffer->page, in ixgbevf_clean_rx_ring()
2390 rx_buffer->pagecnt_bias); in ixgbevf_clean_rx_ring()
2393 if (i == rx_ring->count) in ixgbevf_clean_rx_ring()
2397 rx_ring->next_to_alloc = 0; in ixgbevf_clean_rx_ring()
2398 rx_ring->next_to_clean = 0; in ixgbevf_clean_rx_ring()
2399 rx_ring->next_to_use = 0; in ixgbevf_clean_rx_ring()
2403 * ixgbevf_clean_tx_ring - Free Tx Buffers
2408 u16 i = tx_ring->next_to_clean; in ixgbevf_clean_tx_ring()
2409 struct ixgbevf_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_clean_tx_ring()
2411 while (i != tx_ring->next_to_use) { in ixgbevf_clean_tx_ring()
2414 /* Free all the Tx ring sk_buffs */ in ixgbevf_clean_tx_ring()
2416 page_frag_free(tx_buffer->data); in ixgbevf_clean_tx_ring()
2418 dev_kfree_skb_any(tx_buffer->skb); in ixgbevf_clean_tx_ring()
2421 dma_unmap_single(tx_ring->dev, in ixgbevf_clean_tx_ring()
2427 eop_desc = tx_buffer->next_to_watch; in ixgbevf_clean_tx_ring()
2435 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2437 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2443 dma_unmap_page(tx_ring->dev, in ixgbevf_clean_tx_ring()
2452 if (unlikely(i == tx_ring->count)) { in ixgbevf_clean_tx_ring()
2454 tx_buffer = tx_ring->tx_buffer_info; in ixgbevf_clean_tx_ring()
2459 tx_ring->next_to_use = 0; in ixgbevf_clean_tx_ring()
2460 tx_ring->next_to_clean = 0; in ixgbevf_clean_tx_ring()
2465 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
2472 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_clean_all_rx_rings()
2473 ixgbevf_clean_rx_ring(adapter->rx_ring[i]); in ixgbevf_clean_all_rx_rings()
2477 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
2484 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_clean_all_tx_rings()
2485 ixgbevf_clean_tx_ring(adapter->tx_ring[i]); in ixgbevf_clean_all_tx_rings()
2486 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_clean_all_tx_rings()
2487 ixgbevf_clean_tx_ring(adapter->xdp_ring[i]); in ixgbevf_clean_all_tx_rings()
2492 struct net_device *netdev = adapter->netdev; in ixgbevf_down()
2493 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_down()
2497 if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) in ixgbevf_down()
2501 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_down()
2502 ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); in ixgbevf_down()
2516 del_timer_sync(&adapter->service_timer); in ixgbevf_down()
2519 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_down()
2520 u8 reg_idx = adapter->tx_ring[i]->reg_idx; in ixgbevf_down()
2526 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbevf_down()
2527 u8 reg_idx = adapter->xdp_ring[i]->reg_idx; in ixgbevf_down()
2533 if (!pci_channel_offline(adapter->pdev)) in ixgbevf_down()
2542 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_reinit_locked()
2546 pci_set_master(adapter->pdev); in ixgbevf_reinit_locked()
2549 clear_bit(__IXGBEVF_RESETTING, &adapter->state); in ixgbevf_reinit_locked()
2554 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_reset()
2555 struct net_device *netdev = adapter->netdev; in ixgbevf_reset()
2557 if (hw->mac.ops.reset_hw(hw)) { in ixgbevf_reset()
2560 hw->mac.ops.init_hw(hw); in ixgbevf_reset()
2564 if (is_valid_ether_addr(adapter->hw.mac.addr)) { in ixgbevf_reset()
2565 eth_hw_addr_set(netdev, adapter->hw.mac.addr); in ixgbevf_reset()
2566 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr); in ixgbevf_reset()
2569 adapter->last_reset = jiffies; in ixgbevf_reset()
2588 vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, in ixgbevf_acquire_msix_vectors()
2592 dev_err(&adapter->pdev->dev, in ixgbevf_acquire_msix_vectors()
2593 "Unable to allocate MSI-X interrupts\n"); in ixgbevf_acquire_msix_vectors()
2594 kfree(adapter->msix_entries); in ixgbevf_acquire_msix_vectors()
2595 adapter->msix_entries = NULL; in ixgbevf_acquire_msix_vectors()
2603 adapter->num_msix_vectors = vectors; in ixgbevf_acquire_msix_vectors()
2609 * ixgbevf_set_num_queues - Allocate queues for device, feature dependent
2621 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_num_queues()
2627 adapter->num_rx_queues = 1; in ixgbevf_set_num_queues()
2628 adapter->num_tx_queues = 1; in ixgbevf_set_num_queues()
2629 adapter->num_xdp_queues = 0; in ixgbevf_set_num_queues()
2631 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2636 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_num_queues()
2643 adapter->num_rx_queues = num_tcs; in ixgbevf_set_num_queues()
2647 switch (hw->api_version) { in ixgbevf_set_num_queues()
2653 if (adapter->xdp_prog && in ixgbevf_set_num_queues()
2654 hw->mac.max_tx_queues == rss) in ixgbevf_set_num_queues()
2657 adapter->num_rx_queues = rss; in ixgbevf_set_num_queues()
2658 adapter->num_tx_queues = rss; in ixgbevf_set_num_queues()
2659 adapter->num_xdp_queues = adapter->xdp_prog ? rss : 0; in ixgbevf_set_num_queues()
2668 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2678 /* It's easy to be greedy for MSI-X vectors, but it really in ixgbevf_set_interrupt_capability()
2684 v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); in ixgbevf_set_interrupt_capability()
2688 adapter->msix_entries = kcalloc(v_budget, in ixgbevf_set_interrupt_capability()
2690 if (!adapter->msix_entries) in ixgbevf_set_interrupt_capability()
2691 return -ENOMEM; in ixgbevf_set_interrupt_capability()
2694 adapter->msix_entries[vector].entry = vector; in ixgbevf_set_interrupt_capability()
2696 /* A failure in MSI-X entry allocation isn't fatal, but the VF driver in ixgbevf_set_interrupt_capability()
2698 * that we clean up the msix_entries pointer else-where. in ixgbevf_set_interrupt_capability()
2706 ring->next = head->ring; in ixgbevf_add_ring()
2707 head->ring = ring; in ixgbevf_add_ring()
2708 head->count++; in ixgbevf_add_ring()
2712 * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
2722 * We allocate one q_vector. If allocation fails we return -ENOMEM.
2740 return -ENOMEM; in ixgbevf_alloc_q_vector()
2743 netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll); in ixgbevf_alloc_q_vector()
2746 adapter->q_vector[v_idx] = q_vector; in ixgbevf_alloc_q_vector()
2747 q_vector->adapter = adapter; in ixgbevf_alloc_q_vector()
2748 q_vector->v_idx = v_idx; in ixgbevf_alloc_q_vector()
2751 ring = q_vector->ring; in ixgbevf_alloc_q_vector()
2755 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2756 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2759 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2762 ixgbevf_add_ring(ring, &q_vector->tx); in ixgbevf_alloc_q_vector()
2765 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_q_vector()
2766 ring->queue_index = txr_idx; in ixgbevf_alloc_q_vector()
2767 ring->reg_idx = reg_idx; in ixgbevf_alloc_q_vector()
2770 adapter->tx_ring[txr_idx] = ring; in ixgbevf_alloc_q_vector()
2773 txr_count--; in ixgbevf_alloc_q_vector()
2783 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2784 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2787 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2790 ixgbevf_add_ring(ring, &q_vector->tx); in ixgbevf_alloc_q_vector()
2793 ring->count = adapter->tx_ring_count; in ixgbevf_alloc_q_vector()
2794 ring->queue_index = xdp_idx; in ixgbevf_alloc_q_vector()
2795 ring->reg_idx = reg_idx; in ixgbevf_alloc_q_vector()
2799 adapter->xdp_ring[xdp_idx] = ring; in ixgbevf_alloc_q_vector()
2802 xdp_count--; in ixgbevf_alloc_q_vector()
2812 ring->dev = &adapter->pdev->dev; in ixgbevf_alloc_q_vector()
2813 ring->netdev = adapter->netdev; in ixgbevf_alloc_q_vector()
2816 ring->q_vector = q_vector; in ixgbevf_alloc_q_vector()
2819 ixgbevf_add_ring(ring, &q_vector->rx); in ixgbevf_alloc_q_vector()
2822 ring->count = adapter->rx_ring_count; in ixgbevf_alloc_q_vector()
2823 ring->queue_index = rxr_idx; in ixgbevf_alloc_q_vector()
2824 ring->reg_idx = rxr_idx; in ixgbevf_alloc_q_vector()
2827 adapter->rx_ring[rxr_idx] = ring; in ixgbevf_alloc_q_vector()
2830 rxr_count--; in ixgbevf_alloc_q_vector()
2841 * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
2851 struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx]; in ixgbevf_free_q_vector()
2854 ixgbevf_for_each_ring(ring, q_vector->tx) { in ixgbevf_free_q_vector()
2856 adapter->xdp_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2858 adapter->tx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2861 ixgbevf_for_each_ring(ring, q_vector->rx) in ixgbevf_free_q_vector()
2862 adapter->rx_ring[ring->queue_index] = NULL; in ixgbevf_free_q_vector()
2864 adapter->q_vector[v_idx] = NULL; in ixgbevf_free_q_vector()
2865 netif_napi_del(&q_vector->napi); in ixgbevf_free_q_vector()
2874 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2878 * return -ENOMEM.
2882 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_alloc_q_vectors()
2883 int rxr_remaining = adapter->num_rx_queues; in ixgbevf_alloc_q_vectors()
2884 int txr_remaining = adapter->num_tx_queues; in ixgbevf_alloc_q_vectors()
2885 int xdp_remaining = adapter->num_xdp_queues; in ixgbevf_alloc_q_vectors()
2890 for (; rxr_remaining; v_idx++, q_vectors--) { in ixgbevf_alloc_q_vectors()
2899 rxr_remaining -= rqpv; in ixgbevf_alloc_q_vectors()
2904 for (; q_vectors; v_idx++, q_vectors--) { in ixgbevf_alloc_q_vectors()
2918 rxr_remaining -= rqpv; in ixgbevf_alloc_q_vectors()
2920 txr_remaining -= tqpv; in ixgbevf_alloc_q_vectors()
2922 xdp_remaining -= xqpv; in ixgbevf_alloc_q_vectors()
2930 v_idx--; in ixgbevf_alloc_q_vectors()
2934 return -ENOMEM; in ixgbevf_alloc_q_vectors()
2938 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2947 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; in ixgbevf_free_q_vectors()
2950 q_vectors--; in ixgbevf_free_q_vectors()
2956 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2962 if (!adapter->msix_entries) in ixgbevf_reset_interrupt_capability()
2965 pci_disable_msix(adapter->pdev); in ixgbevf_reset_interrupt_capability()
2966 kfree(adapter->msix_entries); in ixgbevf_reset_interrupt_capability()
2967 adapter->msix_entries = NULL; in ixgbevf_reset_interrupt_capability()
2971 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2984 hw_dbg(&adapter->hw, in ixgbevf_init_interrupt_scheme()
2991 hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n"); in ixgbevf_init_interrupt_scheme()
2995 …hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count %u\n… in ixgbevf_init_interrupt_scheme()
2996 (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", in ixgbevf_init_interrupt_scheme()
2997 adapter->num_rx_queues, adapter->num_tx_queues, in ixgbevf_init_interrupt_scheme()
2998 adapter->num_xdp_queues); in ixgbevf_init_interrupt_scheme()
3000 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_init_interrupt_scheme()
3010 * ixgbevf_clear_interrupt_scheme - Clear the current interrupt scheme settings
3014 * to pre-load conditions
3018 adapter->num_tx_queues = 0; in ixgbevf_clear_interrupt_scheme()
3019 adapter->num_xdp_queues = 0; in ixgbevf_clear_interrupt_scheme()
3020 adapter->num_rx_queues = 0; in ixgbevf_clear_interrupt_scheme()
3027 * ixgbevf_sw_init - Initialize general software structures
3036 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_sw_init()
3037 struct pci_dev *pdev = adapter->pdev; in ixgbevf_sw_init()
3038 struct net_device *netdev = adapter->netdev; in ixgbevf_sw_init()
3042 hw->vendor_id = pdev->vendor; in ixgbevf_sw_init()
3043 hw->device_id = pdev->device; in ixgbevf_sw_init()
3044 hw->revision_id = pdev->revision; in ixgbevf_sw_init()
3045 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ixgbevf_sw_init()
3046 hw->subsystem_device_id = pdev->subsystem_device; in ixgbevf_sw_init()
3048 hw->mbx.ops.init_params(hw); in ixgbevf_sw_init()
3050 if (hw->mac.type >= ixgbe_mac_X550_vf) { in ixgbevf_sw_init()
3057 hw->mac.max_tx_queues = 2; in ixgbevf_sw_init()
3058 hw->mac.max_rx_queues = 2; in ixgbevf_sw_init()
3061 spin_lock_init(&adapter->mbx_lock); in ixgbevf_sw_init()
3063 err = hw->mac.ops.reset_hw(hw); in ixgbevf_sw_init()
3065 dev_info(&pdev->dev, in ixgbevf_sw_init()
3068 err = hw->mac.ops.init_hw(hw); in ixgbevf_sw_init()
3074 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr); in ixgbevf_sw_init()
3076 dev_info(&pdev->dev, "Error reading MAC address\n"); in ixgbevf_sw_init()
3077 else if (is_zero_ether_addr(adapter->hw.mac.addr)) in ixgbevf_sw_init()
3078 dev_info(&pdev->dev, in ixgbevf_sw_init()
3080 eth_hw_addr_set(netdev, hw->mac.addr); in ixgbevf_sw_init()
3083 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbevf_sw_init()
3084 dev_info(&pdev->dev, "Assigning random MAC address\n"); in ixgbevf_sw_init()
3086 ether_addr_copy(hw->mac.addr, netdev->dev_addr); in ixgbevf_sw_init()
3087 ether_addr_copy(hw->mac.perm_addr, netdev->dev_addr); in ixgbevf_sw_init()
3091 adapter->rx_itr_setting = 1; in ixgbevf_sw_init()
3092 adapter->tx_itr_setting = 1; in ixgbevf_sw_init()
3095 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; in ixgbevf_sw_init()
3096 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; in ixgbevf_sw_init()
3098 adapter->link_state = true; in ixgbevf_sw_init()
3100 set_bit(__IXGBEVF_DOWN, &adapter->state); in ixgbevf_sw_init()
3130 * ixgbevf_update_stats - Update the board statistics counters.
3135 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_update_stats()
3140 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_update_stats()
3141 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_update_stats()
3144 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, in ixgbevf_update_stats()
3145 adapter->stats.vfgprc); in ixgbevf_update_stats()
3146 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, in ixgbevf_update_stats()
3147 adapter->stats.vfgptc); in ixgbevf_update_stats()
3149 adapter->stats.last_vfgorc, in ixgbevf_update_stats()
3150 adapter->stats.vfgorc); in ixgbevf_update_stats()
3152 adapter->stats.last_vfgotc, in ixgbevf_update_stats()
3153 adapter->stats.vfgotc); in ixgbevf_update_stats()
3154 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, in ixgbevf_update_stats()
3155 adapter->stats.vfmprc); in ixgbevf_update_stats()
3157 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_update_stats()
3158 struct ixgbevf_ring *rx_ring = adapter->rx_ring[i]; in ixgbevf_update_stats()
3160 hw_csum_rx_error += rx_ring->rx_stats.csum_err; in ixgbevf_update_stats()
3161 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed; in ixgbevf_update_stats()
3162 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; in ixgbevf_update_stats()
3163 alloc_rx_page += rx_ring->rx_stats.alloc_rx_page; in ixgbevf_update_stats()
3166 adapter->hw_csum_rx_error = hw_csum_rx_error; in ixgbevf_update_stats()
3167 adapter->alloc_rx_page_failed = alloc_rx_page_failed; in ixgbevf_update_stats()
3168 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; in ixgbevf_update_stats()
3169 adapter->alloc_rx_page = alloc_rx_page; in ixgbevf_update_stats()
3173 * ixgbevf_service_timer - Timer Call-back
3182 mod_timer(&adapter->service_timer, (HZ * 2) + jiffies); in ixgbevf_service_timer()
3189 if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state)) in ixgbevf_reset_subtask()
3194 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_reset_subtask()
3195 test_bit(__IXGBEVF_REMOVING, &adapter->state) || in ixgbevf_reset_subtask()
3196 test_bit(__IXGBEVF_RESETTING, &adapter->state)) { in ixgbevf_reset_subtask()
3201 adapter->tx_timeout_count++; in ixgbevf_reset_subtask()
3208 * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
3218 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_check_hang_subtask()
3223 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_check_hang_subtask()
3224 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_check_hang_subtask()
3228 if (netif_carrier_ok(adapter->netdev)) { in ixgbevf_check_hang_subtask()
3229 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_check_hang_subtask()
3230 set_check_for_tx_hang(adapter->tx_ring[i]); in ixgbevf_check_hang_subtask()
3231 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_check_hang_subtask()
3232 set_check_for_tx_hang(adapter->xdp_ring[i]); in ixgbevf_check_hang_subtask()
3236 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { in ixgbevf_check_hang_subtask()
3237 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; in ixgbevf_check_hang_subtask()
3239 if (qv->rx.ring || qv->tx.ring) in ixgbevf_check_hang_subtask()
3248 * ixgbevf_watchdog_update_link - update the link status
3253 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_watchdog_update_link()
3254 u32 link_speed = adapter->link_speed; in ixgbevf_watchdog_update_link()
3255 bool link_up = adapter->link_up; in ixgbevf_watchdog_update_link()
3258 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
3260 err = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); in ixgbevf_watchdog_update_link()
3262 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_watchdog_update_link()
3265 if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) { in ixgbevf_watchdog_update_link()
3266 set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state); in ixgbevf_watchdog_update_link()
3270 adapter->link_up = link_up; in ixgbevf_watchdog_update_link()
3271 adapter->link_speed = link_speed; in ixgbevf_watchdog_update_link()
3275 * ixgbevf_watchdog_link_is_up - update netif_carrier status and
3281 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_up()
3287 dev_info(&adapter->pdev->dev, "NIC Link is Up %s\n", in ixgbevf_watchdog_link_is_up()
3288 (adapter->link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? in ixgbevf_watchdog_link_is_up()
3290 (adapter->link_speed == IXGBE_LINK_SPEED_1GB_FULL) ? in ixgbevf_watchdog_link_is_up()
3292 (adapter->link_speed == IXGBE_LINK_SPEED_100_FULL) ? in ixgbevf_watchdog_link_is_up()
3300 * ixgbevf_watchdog_link_is_down - update netif_carrier status and
3306 struct net_device *netdev = adapter->netdev; in ixgbevf_watchdog_link_is_down()
3308 adapter->link_speed = 0; in ixgbevf_watchdog_link_is_down()
3314 dev_info(&adapter->pdev->dev, "NIC Link is Down\n"); in ixgbevf_watchdog_link_is_down()
3320 * ixgbevf_watchdog_subtask - worker thread to bring link up
3326 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_watchdog_subtask()
3327 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_watchdog_subtask()
3332 if (adapter->link_up && adapter->link_state) in ixgbevf_watchdog_subtask()
3341 * ixgbevf_service_task - manages and runs subtasks
3349 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_service_task()
3351 if (IXGBE_REMOVED(hw->hw_addr)) { in ixgbevf_service_task()
3352 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) { in ixgbevf_service_task()
3369 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
3372 * Free all transmit software resources
3378 vfree(tx_ring->tx_buffer_info); in ixgbevf_free_tx_resources()
3379 tx_ring->tx_buffer_info = NULL; in ixgbevf_free_tx_resources()
3381 /* if not set, then don't free */ in ixgbevf_free_tx_resources()
3382 if (!tx_ring->desc) in ixgbevf_free_tx_resources()
3385 dma_free_coherent(tx_ring->dev, tx_ring->size, tx_ring->desc, in ixgbevf_free_tx_resources()
3386 tx_ring->dma); in ixgbevf_free_tx_resources()
3388 tx_ring->desc = NULL; in ixgbevf_free_tx_resources()
3392 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
3395 * Free all transmit software resources
3401 for (i = 0; i < adapter->num_tx_queues; i++) in ixgbevf_free_all_tx_resources()
3402 if (adapter->tx_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3403 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_free_all_tx_resources()
3404 for (i = 0; i < adapter->num_xdp_queues; i++) in ixgbevf_free_all_tx_resources()
3405 if (adapter->xdp_ring[i]->desc) in ixgbevf_free_all_tx_resources()
3406 ixgbevf_free_tx_resources(adapter->xdp_ring[i]); in ixgbevf_free_all_tx_resources()
3410 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
3417 struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); in ixgbevf_setup_tx_resources()
3420 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; in ixgbevf_setup_tx_resources()
3421 tx_ring->tx_buffer_info = vmalloc(size); in ixgbevf_setup_tx_resources()
3422 if (!tx_ring->tx_buffer_info) in ixgbevf_setup_tx_resources()
3425 u64_stats_init(&tx_ring->syncp); in ixgbevf_setup_tx_resources()
3428 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); in ixgbevf_setup_tx_resources()
3429 tx_ring->size = ALIGN(tx_ring->size, 4096); in ixgbevf_setup_tx_resources()
3431 tx_ring->desc = dma_alloc_coherent(tx_ring->dev, tx_ring->size, in ixgbevf_setup_tx_resources()
3432 &tx_ring->dma, GFP_KERNEL); in ixgbevf_setup_tx_resources()
3433 if (!tx_ring->desc) in ixgbevf_setup_tx_resources()
3439 vfree(tx_ring->tx_buffer_info); in ixgbevf_setup_tx_resources()
3440 tx_ring->tx_buffer_info = NULL; in ixgbevf_setup_tx_resources()
3441 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n"); in ixgbevf_setup_tx_resources()
3442 return -ENOMEM; in ixgbevf_setup_tx_resources()
3446 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
3459 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_setup_all_tx_resources()
3460 err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3463 hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i); in ixgbevf_setup_all_tx_resources()
3467 for (j = 0; j < adapter->num_xdp_queues; j++) { in ixgbevf_setup_all_tx_resources()
3468 err = ixgbevf_setup_tx_resources(adapter->xdp_ring[j]); in ixgbevf_setup_all_tx_resources()
3471 hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); in ixgbevf_setup_all_tx_resources()
3478 while (j--) in ixgbevf_setup_all_tx_resources()
3479 ixgbevf_free_tx_resources(adapter->xdp_ring[j]); in ixgbevf_setup_all_tx_resources()
3480 while (i--) in ixgbevf_setup_all_tx_resources()
3481 ixgbevf_free_tx_resources(adapter->tx_ring[i]); in ixgbevf_setup_all_tx_resources()
3487 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
3498 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; in ixgbevf_setup_rx_resources()
3499 rx_ring->rx_buffer_info = vmalloc(size); in ixgbevf_setup_rx_resources()
3500 if (!rx_ring->rx_buffer_info) in ixgbevf_setup_rx_resources()
3503 u64_stats_init(&rx_ring->syncp); in ixgbevf_setup_rx_resources()
3506 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); in ixgbevf_setup_rx_resources()
3507 rx_ring->size = ALIGN(rx_ring->size, 4096); in ixgbevf_setup_rx_resources()
3509 rx_ring->desc = dma_alloc_coherent(rx_ring->dev, rx_ring->size, in ixgbevf_setup_rx_resources()
3510 &rx_ring->dma, GFP_KERNEL); in ixgbevf_setup_rx_resources()
3512 if (!rx_ring->desc) in ixgbevf_setup_rx_resources()
3515 /* XDP RX-queue info */ in ixgbevf_setup_rx_resources()
3516 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev, in ixgbevf_setup_rx_resources()
3517 rx_ring->queue_index, 0) < 0) in ixgbevf_setup_rx_resources()
3520 rx_ring->xdp_prog = adapter->xdp_prog; in ixgbevf_setup_rx_resources()
3524 vfree(rx_ring->rx_buffer_info); in ixgbevf_setup_rx_resources()
3525 rx_ring->rx_buffer_info = NULL; in ixgbevf_setup_rx_resources()
3526 dev_err(rx_ring->dev, "Unable to allocate memory for the Rx descriptor ring\n"); in ixgbevf_setup_rx_resources()
3527 return -ENOMEM; in ixgbevf_setup_rx_resources()
3531 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
3544 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_setup_all_rx_resources()
3545 err = ixgbevf_setup_rx_resources(adapter, adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3548 hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i); in ixgbevf_setup_all_rx_resources()
3555 while (i--) in ixgbevf_setup_all_rx_resources()
3556 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_setup_all_rx_resources()
3561 * ixgbevf_free_rx_resources - Free Rx Resources
3564 * Free all receive software resources
3570 rx_ring->xdp_prog = NULL; in ixgbevf_free_rx_resources()
3571 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ixgbevf_free_rx_resources()
3572 vfree(rx_ring->rx_buffer_info); in ixgbevf_free_rx_resources()
3573 rx_ring->rx_buffer_info = NULL; in ixgbevf_free_rx_resources()
3575 dma_free_coherent(rx_ring->dev, rx_ring->size, rx_ring->desc, in ixgbevf_free_rx_resources()
3576 rx_ring->dma); in ixgbevf_free_rx_resources()
3578 rx_ring->desc = NULL; in ixgbevf_free_rx_resources()
3582 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
3585 * Free all receive software resources
3591 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_free_all_rx_resources()
3592 if (adapter->rx_ring[i]->desc) in ixgbevf_free_all_rx_resources()
3593 ixgbevf_free_rx_resources(adapter->rx_ring[i]); in ixgbevf_free_all_rx_resources()
3597 * ixgbevf_open - Called when a network interface is made active
3611 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_open()
3620 if (!adapter->num_msix_vectors) in ixgbevf_open()
3621 return -ENOMEM; in ixgbevf_open()
3623 if (hw->adapter_stopped) { in ixgbevf_open()
3628 if (hw->adapter_stopped) { in ixgbevf_open()
3630 pr_err("Unable to start - perhaps the PF Driver isn't up yet\n"); in ixgbevf_open()
3636 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) in ixgbevf_open()
3637 return -EBUSY; in ixgbevf_open()
3658 err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); in ixgbevf_open()
3662 err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues); in ixgbevf_open()
3684 * ixgbevf_close_suspend - actions necessary to both suspend and close flows
3699 * ixgbevf_close - Disables a network interface
3704 * The close entry point is called when an interface is de-activated
3721 struct net_device *dev = adapter->netdev; in ixgbevf_queue_reset_subtask()
3724 &adapter->state)) in ixgbevf_queue_reset_subtask()
3728 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || in ixgbevf_queue_reset_subtask()
3729 test_bit(__IXGBEVF_RESETTING, &adapter->state)) in ixgbevf_queue_reset_subtask()
3755 u16 i = tx_ring->next_to_use; in ixgbevf_tx_ctxtdesc()
3760 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in ixgbevf_tx_ctxtdesc()
3765 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); in ixgbevf_tx_ctxtdesc()
3766 context_desc->fceof_saidx = cpu_to_le32(fceof_saidx); in ixgbevf_tx_ctxtdesc()
3767 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); in ixgbevf_tx_ctxtdesc()
3768 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); in ixgbevf_tx_ctxtdesc()
3777 struct sk_buff *skb = first->skb; in ixgbevf_tso()
3791 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbevf_tso()
3801 if (eth_p_mpls(first->protocol)) in ixgbevf_tso()
3811 if (ip.v4->version == 4) { in ixgbevf_tso()
3813 unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4); in ixgbevf_tso()
3814 int len = csum_start - trans_start; in ixgbevf_tso()
3820 ip.v4->check = (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) ? in ixgbevf_tso()
3825 ip.v4->tot_len = 0; in ixgbevf_tso()
3826 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbevf_tso()
3830 ip.v6->payload_len = 0; in ixgbevf_tso()
3831 first->tx_flags |= IXGBE_TX_FLAGS_TSO | in ixgbevf_tso()
3836 l4_offset = l4.hdr - skb->data; in ixgbevf_tso()
3839 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in ixgbevf_tso()
3842 paylen = skb->len - l4_offset; in ixgbevf_tso()
3843 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in ixgbevf_tso()
3846 first->gso_segs = skb_shinfo(skb)->gso_segs; in ixgbevf_tso()
3847 first->bytecount += (first->gso_segs - 1) * *hdr_len; in ixgbevf_tso()
3850 mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; in ixgbevf_tso()
3851 mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; in ixgbevf_tso()
3854 fceof_saidx |= itd->pfsa; in ixgbevf_tso()
3855 type_tucmd |= itd->flags | itd->trailer_len; in ixgbevf_tso()
3858 vlan_macip_lens = l4.hdr - ip.hdr; in ixgbevf_tso()
3859 vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; in ixgbevf_tso()
3860 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbevf_tso()
3872 struct sk_buff *skb = first->skb; in ixgbevf_tx_csum()
3877 if (skb->ip_summed != CHECKSUM_PARTIAL) in ixgbevf_tx_csum()
3880 switch (skb->csum_offset) { in ixgbevf_tx_csum()
3898 if (first->protocol == htons(ETH_P_IP)) in ixgbevf_tx_csum()
3902 first->tx_flags |= IXGBE_TX_FLAGS_CSUM; in ixgbevf_tx_csum()
3903 vlan_macip_lens = skb_checksum_start_offset(skb) - in ixgbevf_tx_csum()
3908 vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; in ixgbevf_tx_csum()
3910 fceof_saidx |= itd->pfsa; in ixgbevf_tx_csum()
3911 type_tucmd |= itd->flags | itd->trailer_len; in ixgbevf_tx_csum()
3961 tx_desc->read.olinfo_status = olinfo_status; in ixgbevf_tx_olinfo_status()
3968 struct sk_buff *skb = first->skb; in ixgbevf_tx_map()
3974 u32 tx_flags = first->tx_flags; in ixgbevf_tx_map()
3976 u16 i = tx_ring->next_to_use; in ixgbevf_tx_map()
3980 ixgbevf_tx_olinfo_status(tx_desc, tx_flags, skb->len - hdr_len); in ixgbevf_tx_map()
3983 data_len = skb->data_len; in ixgbevf_tx_map()
3985 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in ixgbevf_tx_map()
3989 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in ixgbevf_tx_map()
3990 if (dma_mapping_error(tx_ring->dev, dma)) in ixgbevf_tx_map()
3997 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_tx_map()
4000 tx_desc->read.cmd_type_len = in ixgbevf_tx_map()
4005 if (i == tx_ring->count) { in ixgbevf_tx_map()
4009 tx_desc->read.olinfo_status = 0; in ixgbevf_tx_map()
4012 size -= IXGBE_MAX_DATA_PER_TXD; in ixgbevf_tx_map()
4014 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbevf_tx_map()
4020 tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size); in ixgbevf_tx_map()
4024 if (i == tx_ring->count) { in ixgbevf_tx_map()
4028 tx_desc->read.olinfo_status = 0; in ixgbevf_tx_map()
4031 data_len -= size; in ixgbevf_tx_map()
4033 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in ixgbevf_tx_map()
4036 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4041 tx_desc->read.cmd_type_len = cmd_type; in ixgbevf_tx_map()
4044 first->time_stamp = jiffies; in ixgbevf_tx_map()
4049 * are new descriptors to fetch. (Only applicable for weak-ordered in ixgbevf_tx_map()
4050 * memory model archs, such as IA-64). in ixgbevf_tx_map()
4058 first->next_to_watch = tx_desc; in ixgbevf_tx_map()
4061 if (i == tx_ring->count) in ixgbevf_tx_map()
4064 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4071 dev_err(tx_ring->dev, "TX DMA map failed\n"); in ixgbevf_tx_map()
4072 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4077 dma_unmap_page(tx_ring->dev, in ixgbevf_tx_map()
4083 if (i-- == 0) in ixgbevf_tx_map()
4084 i += tx_ring->count; in ixgbevf_tx_map()
4085 tx_buffer = &tx_ring->tx_buffer_info[i]; in ixgbevf_tx_map()
4089 dma_unmap_single(tx_ring->dev, in ixgbevf_tx_map()
4095 dev_kfree_skb_any(tx_buffer->skb); in ixgbevf_tx_map()
4096 tx_buffer->skb = NULL; in ixgbevf_tx_map()
4098 tx_ring->next_to_use = i; in ixgbevf_tx_map()
4103 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4114 return -EBUSY; in __ixgbevf_maybe_stop_tx()
4116 /* A reprieve! - use start_queue because it doesn't call schedule */ in __ixgbevf_maybe_stop_tx()
4117 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __ixgbevf_maybe_stop_tx()
4118 ++tx_ring->tx_stats.restart_queue; in __ixgbevf_maybe_stop_tx()
4156 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { in ixgbevf_xmit_frame_ring()
4157 skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; in ixgbevf_xmit_frame_ring()
4162 count += skb_shinfo(skb)->nr_frags; in ixgbevf_xmit_frame_ring()
4165 tx_ring->tx_stats.tx_busy++; in ixgbevf_xmit_frame_ring()
4170 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in ixgbevf_xmit_frame_ring()
4171 first->skb = skb; in ixgbevf_xmit_frame_ring()
4172 first->bytecount = skb->len; in ixgbevf_xmit_frame_ring()
4173 first->gso_segs = 1; in ixgbevf_xmit_frame_ring()
4182 first->tx_flags = tx_flags; in ixgbevf_xmit_frame_ring()
4183 first->protocol = vlan_get_protocol(skb); in ixgbevf_xmit_frame_ring()
4202 dev_kfree_skb_any(first->skb); in ixgbevf_xmit_frame_ring()
4203 first->skb = NULL; in ixgbevf_xmit_frame_ring()
4213 if (skb->len <= 0) { in ixgbevf_xmit_frame()
4221 if (skb->len < 17) { in ixgbevf_xmit_frame()
4224 skb->len = 17; in ixgbevf_xmit_frame()
4227 tx_ring = adapter->tx_ring[skb->queue_mapping]; in ixgbevf_xmit_frame()
4232 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
4241 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_set_mac()
4245 if (!is_valid_ether_addr(addr->sa_data)) in ixgbevf_set_mac()
4246 return -EADDRNOTAVAIL; in ixgbevf_set_mac()
4248 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
4250 err = hw->mac.ops.set_rar(hw, 0, addr->sa_data, 0); in ixgbevf_set_mac()
4252 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_set_mac()
4255 return -EPERM; in ixgbevf_set_mac()
4257 ether_addr_copy(hw->mac.addr, addr->sa_data); in ixgbevf_set_mac()
4258 ether_addr_copy(hw->mac.perm_addr, addr->sa_data); in ixgbevf_set_mac()
4259 eth_hw_addr_set(netdev, addr->sa_data); in ixgbevf_set_mac()
4265 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
4274 struct ixgbe_hw *hw = &adapter->hw; in ixgbevf_change_mtu()
4279 if (adapter->xdp_prog) { in ixgbevf_change_mtu()
4280 dev_warn(&adapter->pdev->dev, "MTU cannot be changed while XDP program is loaded\n"); in ixgbevf_change_mtu()
4281 return -EPERM; in ixgbevf_change_mtu()
4284 spin_lock_bh(&adapter->mbx_lock); in ixgbevf_change_mtu()
4286 ret = hw->mac.ops.set_rlpml(hw, max_frame); in ixgbevf_change_mtu()
4287 spin_unlock_bh(&adapter->mbx_lock); in ixgbevf_change_mtu()
4289 return -EINVAL; in ixgbevf_change_mtu()
4292 netdev->mtu, new_mtu); in ixgbevf_change_mtu()
4295 netdev->mtu = new_mtu; in ixgbevf_change_mtu()
4327 adapter->hw.hw_addr = adapter->io_addr; in ixgbevf_resume()
4329 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_resume()
4349 ixgbevf_suspend(&pdev->dev); in ixgbevf_shutdown()
4360 start = u64_stats_fetch_begin(&ring->syncp); in ixgbevf_get_tx_ring_stats()
4361 bytes = ring->stats.bytes; in ixgbevf_get_tx_ring_stats()
4362 packets = ring->stats.packets; in ixgbevf_get_tx_ring_stats()
4363 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbevf_get_tx_ring_stats()
4364 stats->tx_bytes += bytes; in ixgbevf_get_tx_ring_stats()
4365 stats->tx_packets += packets; in ixgbevf_get_tx_ring_stats()
4380 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; in ixgbevf_get_stats()
4383 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_get_stats()
4384 ring = adapter->rx_ring[i]; in ixgbevf_get_stats()
4386 start = u64_stats_fetch_begin(&ring->syncp); in ixgbevf_get_stats()
4387 bytes = ring->stats.bytes; in ixgbevf_get_stats()
4388 packets = ring->stats.packets; in ixgbevf_get_stats()
4389 } while (u64_stats_fetch_retry(&ring->syncp, start)); in ixgbevf_get_stats()
4390 stats->rx_bytes += bytes; in ixgbevf_get_stats()
4391 stats->rx_packets += packets; in ixgbevf_get_stats()
4394 for (i = 0; i < adapter->num_tx_queues; i++) { in ixgbevf_get_stats()
4395 ring = adapter->tx_ring[i]; in ixgbevf_get_stats()
4399 for (i = 0; i < adapter->num_xdp_queues; i++) { in ixgbevf_get_stats()
4400 ring = adapter->xdp_ring[i]; in ixgbevf_get_stats()
4416 mac_hdr_len = skb_network_header(skb) - skb->data; in ixgbevf_features_check()
4424 network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); in ixgbevf_features_check()
4434 if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) in ixgbevf_features_check()
4442 int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; in ixgbevf_xdp_setup()
4447 for (i = 0; i < adapter->num_rx_queues; i++) { in ixgbevf_xdp_setup()
4448 struct ixgbevf_ring *ring = adapter->rx_ring[i]; in ixgbevf_xdp_setup()
4451 return -EINVAL; in ixgbevf_xdp_setup()
4454 old_prog = xchg(&adapter->xdp_prog, prog); in ixgbevf_xdp_setup()
4471 for (i = 0; i < adapter->num_rx_queues; i++) in ixgbevf_xdp_setup()
4472 xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); in ixgbevf_xdp_setup()
4483 switch (xdp->command) { in ixgbevf_xdp()
4485 return ixgbevf_xdp_setup(dev, xdp->prog); in ixgbevf_xdp()
4487 return -EINVAL; in ixgbevf_xdp()
4509 dev->netdev_ops = &ixgbevf_netdev_ops; in ixgbevf_assign_netdev_ops()
4511 dev->watchdog_timeo = 5 * HZ; in ixgbevf_assign_netdev_ops()
4515 * ixgbevf_probe - Device Initialization Routine
4530 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; in ixgbevf_probe()
4538 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in ixgbevf_probe()
4540 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); in ixgbevf_probe()
4546 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); in ixgbevf_probe()
4555 err = -ENOMEM; in ixgbevf_probe()
4559 SET_NETDEV_DEV(netdev, &pdev->dev); in ixgbevf_probe()
4563 adapter->netdev = netdev; in ixgbevf_probe()
4564 adapter->pdev = pdev; in ixgbevf_probe()
4565 hw = &adapter->hw; in ixgbevf_probe()
4566 hw->back = adapter; in ixgbevf_probe()
4567 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); in ixgbevf_probe()
4574 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), in ixgbevf_probe()
4576 adapter->io_addr = hw->hw_addr; in ixgbevf_probe()
4577 if (!hw->hw_addr) { in ixgbevf_probe()
4578 err = -EIO; in ixgbevf_probe()
4585 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); in ixgbevf_probe()
4586 hw->mac.type = ii->mac; in ixgbevf_probe()
4588 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy, in ixgbevf_probe()
4597 if (!is_valid_ether_addr(netdev->dev_addr)) { in ixgbevf_probe()
4599 err = -EIO; in ixgbevf_probe()
4603 netdev->hw_features = NETIF_F_SG | in ixgbevf_probe()
4617 netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES; in ixgbevf_probe()
4618 netdev->hw_features |= NETIF_F_GSO_PARTIAL | in ixgbevf_probe()
4621 netdev->features = netdev->hw_features | NETIF_F_HIGHDMA; in ixgbevf_probe()
4623 netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; in ixgbevf_probe()
4624 netdev->mpls_features |= NETIF_F_SG | in ixgbevf_probe()
4628 netdev->mpls_features |= IXGBEVF_GSO_PARTIAL_FEATURES; in ixgbevf_probe()
4629 netdev->hw_enc_features |= netdev->vlan_features; in ixgbevf_probe()
4632 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in ixgbevf_probe()
4636 netdev->priv_flags |= IFF_UNICAST_FLT; in ixgbevf_probe()
4637 netdev->xdp_features = NETDEV_XDP_ACT_BASIC; in ixgbevf_probe()
4639 /* MTU range: 68 - 1504 or 9710 */ in ixgbevf_probe()
4640 netdev->min_mtu = ETH_MIN_MTU; in ixgbevf_probe()
4641 switch (adapter->hw.api_version) { in ixgbevf_probe()
4647 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - in ixgbevf_probe()
4651 if (adapter->hw.mac.type != ixgbe_mac_82599_vf) in ixgbevf_probe()
4652 netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - in ixgbevf_probe()
4655 netdev->max_mtu = ETH_DATA_LEN + ETH_FCS_LEN; in ixgbevf_probe()
4659 if (IXGBE_REMOVED(hw->hw_addr)) { in ixgbevf_probe()
4660 err = -EIO; in ixgbevf_probe()
4664 timer_setup(&adapter->service_timer, ixgbevf_service_timer, 0); in ixgbevf_probe()
4666 INIT_WORK(&adapter->service_task, ixgbevf_service_task); in ixgbevf_probe()
4667 set_bit(__IXGBEVF_SERVICE_INITED, &adapter->state); in ixgbevf_probe()
4668 clear_bit(__IXGBEVF_SERVICE_SCHED, &adapter->state); in ixgbevf_probe()
4674 strcpy(netdev->name, "eth%d"); in ixgbevf_probe()
4687 dev_info(&pdev->dev, "%pM\n", netdev->dev_addr); in ixgbevf_probe()
4688 dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type); in ixgbevf_probe()
4690 switch (hw->mac.type) { in ixgbevf_probe()
4692 dev_info(&pdev->dev, "Intel(R) X550 Virtual Function\n"); in ixgbevf_probe()
4695 dev_info(&pdev->dev, "Intel(R) X540 Virtual Function\n"); in ixgbevf_probe()
4699 dev_info(&pdev->dev, "Intel(R) 82599 Virtual Function\n"); in ixgbevf_probe()
4709 iounmap(adapter->io_addr); in ixgbevf_probe()
4710 kfree(adapter->rss_key); in ixgbevf_probe()
4712 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_probe()
4724 * ixgbevf_remove - Device Removal Routine
4729 * Hot-Plug event, or because the driver is going to be removed from
4743 set_bit(__IXGBEVF_REMOVING, &adapter->state); in ixgbevf_remove()
4744 cancel_work_sync(&adapter->service_task); in ixgbevf_remove()
4746 if (netdev->reg_state == NETREG_REGISTERED) in ixgbevf_remove()
4753 iounmap(adapter->io_addr); in ixgbevf_remove()
4756 hw_dbg(&adapter->hw, "Remove complete\n"); in ixgbevf_remove()
4758 kfree(adapter->rss_key); in ixgbevf_remove()
4759 disable_dev = !test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_remove()
4767 * ixgbevf_io_error_detected - called when PCI error is detected
4780 if (!test_bit(__IXGBEVF_SERVICE_INITED, &adapter->state)) in ixgbevf_io_error_detected()
4794 if (!test_and_set_bit(__IXGBEVF_DISABLED, &adapter->state)) in ixgbevf_io_error_detected()
4803 * ixgbevf_io_slot_reset - called after the pci bus has been reset.
4806 * Restart the card from scratch, as if from a cold-boot. Implementation
4807 * resembles the first-half of the ixgbevf_resume routine.
4815 dev_err(&pdev->dev, in ixgbevf_io_slot_reset()
4816 "Cannot re-enable PCI device after reset.\n"); in ixgbevf_io_slot_reset()
4820 adapter->hw.hw_addr = adapter->io_addr; in ixgbevf_io_slot_reset()
4822 clear_bit(__IXGBEVF_DISABLED, &adapter->state); in ixgbevf_io_slot_reset()
4831 * ixgbevf_io_resume - called when traffic can start flowing again.
4836 * second-half of the ixgbevf_resume routine.
4873 * ixgbevf_init_module - Driver Registration Routine
4887 return -ENOMEM; in ixgbevf_init_module()
4902 * ixgbevf_exit_module - Driver Exit Cleanup Routine
4918 * ixgbevf_get_hw_dev_name - return device name string
4924 struct ixgbevf_adapter *adapter = hw->back; in ixgbevf_get_hw_dev_name()
4926 return adapter->netdev->name; in ixgbevf_get_hw_dev_name()