Lines Matching +full:num +full:- +full:tx +full:- +full:queues
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
56 if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) in ena_tx_timeout()
60 ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp); in ena_tx_timeout()
69 for (i = 0; i < adapter->num_io_queues; i++) in update_rx_ring_mtu()
70 adapter->rx_ring[i].mtu = mtu; in update_rx_ring_mtu()
78 ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu); in ena_change_mtu()
82 dev->mtu = new_mtu; in ena_change_mtu()
100 if (unlikely(ena_com_is_doorbell_needed(ring->ena_com_io_sq, in ena_xmit_common()
102 netif_dbg(adapter, tx_queued, adapter->netdev, in ena_xmit_common()
103 "llq tx max burst size of queue %d achieved, writing doorbell to send burst\n", in ena_xmit_common()
104 ring->qid); in ena_xmit_common()
109 rc = ena_com_prepare_tx(ring->ena_com_io_sq, ena_tx_ctx, in ena_xmit_common()
117 netif_err(adapter, tx_queued, adapter->netdev, in ena_xmit_common()
118 "Failed to prepare tx bufs\n"); in ena_xmit_common()
119 ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, in ena_xmit_common()
120 &ring->syncp); in ena_xmit_common()
121 if (rc != -ENOMEM) in ena_xmit_common()
127 u64_stats_update_begin(&ring->syncp); in ena_xmit_common()
128 ring->tx_stats.cnt++; in ena_xmit_common()
129 ring->tx_stats.bytes += bytes; in ena_xmit_common()
130 u64_stats_update_end(&ring->syncp); in ena_xmit_common()
132 tx_info->tx_descs = nb_hw_desc; in ena_xmit_common()
133 tx_info->total_tx_size = bytes; in ena_xmit_common()
134 tx_info->last_jiffies = jiffies; in ena_xmit_common()
135 tx_info->print_once = 0; in ena_xmit_common()
137 ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, in ena_xmit_common()
138 ring->ring_size); in ena_xmit_common()
148 adapter->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(adapter->num_io_queues); in ena_init_rx_cpu_rmap()
149 if (!adapter->netdev->rx_cpu_rmap) in ena_init_rx_cpu_rmap()
150 return -ENOMEM; in ena_init_rx_cpu_rmap()
151 for (i = 0; i < adapter->num_io_queues; i++) { in ena_init_rx_cpu_rmap()
154 rc = irq_cpu_rmap_add(adapter->netdev->rx_cpu_rmap, in ena_init_rx_cpu_rmap()
155 pci_irq_vector(adapter->pdev, irq_idx)); in ena_init_rx_cpu_rmap()
157 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); in ena_init_rx_cpu_rmap()
158 adapter->netdev->rx_cpu_rmap = NULL; in ena_init_rx_cpu_rmap()
169 ring->qid = qid; in ena_init_io_rings_common()
170 ring->pdev = adapter->pdev; in ena_init_io_rings_common()
171 ring->dev = &adapter->pdev->dev; in ena_init_io_rings_common()
172 ring->netdev = adapter->netdev; in ena_init_io_rings_common()
173 ring->napi = &adapter->ena_napi[qid].napi; in ena_init_io_rings_common()
174 ring->adapter = adapter; in ena_init_io_rings_common()
175 ring->ena_dev = adapter->ena_dev; in ena_init_io_rings_common()
176 ring->per_napi_packets = 0; in ena_init_io_rings_common()
177 ring->cpu = 0; in ena_init_io_rings_common()
178 ring->numa_node = 0; in ena_init_io_rings_common()
179 ring->no_interrupt_event_cnt = 0; in ena_init_io_rings_common()
180 u64_stats_init(&ring->syncp); in ena_init_io_rings_common()
190 ena_dev = adapter->ena_dev; in ena_init_io_rings()
193 txr = &adapter->tx_ring[i]; in ena_init_io_rings()
194 rxr = &adapter->rx_ring[i]; in ena_init_io_rings()
196 /* TX common ring state */ in ena_init_io_rings()
199 /* TX specific ring state */ in ena_init_io_rings()
200 txr->ring_size = adapter->requested_tx_ring_size; in ena_init_io_rings()
201 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_init_io_rings()
202 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_init_io_rings()
203 txr->sgl_size = adapter->max_tx_sgl_size; in ena_init_io_rings()
204 txr->smoothed_interval = in ena_init_io_rings()
206 txr->disable_meta_caching = adapter->disable_meta_caching; in ena_init_io_rings()
207 spin_lock_init(&txr->xdp_tx_lock); in ena_init_io_rings()
209 /* Don't init RX queues for xdp queues */ in ena_init_io_rings()
215 rxr->ring_size = adapter->requested_rx_ring_size; in ena_init_io_rings()
216 rxr->rx_copybreak = adapter->rx_copybreak; in ena_init_io_rings()
217 rxr->sgl_size = adapter->max_rx_sgl_size; in ena_init_io_rings()
218 rxr->smoothed_interval = in ena_init_io_rings()
220 rxr->empty_rx_queue = 0; in ena_init_io_rings()
221 rxr->rx_headroom = NET_SKB_PAD; in ena_init_io_rings()
222 adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ena_init_io_rings()
223 rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues]; in ena_init_io_rings()
228 /* ena_setup_tx_resources - allocate I/O Tx resources (Descriptors)
236 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources()
237 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; in ena_setup_tx_resources()
240 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
242 adapter->netdev, "tx_buffer_info info is not NULL"); in ena_setup_tx_resources()
243 return -EEXIST; in ena_setup_tx_resources()
246 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
247 node = cpu_to_node(ena_irq->cpu); in ena_setup_tx_resources()
249 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
250 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
251 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
252 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
256 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
257 tx_ring->free_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
258 if (!tx_ring->free_ids) { in ena_setup_tx_resources()
259 tx_ring->free_ids = vzalloc(size); in ena_setup_tx_resources()
260 if (!tx_ring->free_ids) in ena_setup_tx_resources()
264 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
265 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); in ena_setup_tx_resources()
266 if (!tx_ring->push_buf_intermediate_buf) { in ena_setup_tx_resources()
267 tx_ring->push_buf_intermediate_buf = vzalloc(size); in ena_setup_tx_resources()
268 if (!tx_ring->push_buf_intermediate_buf) in ena_setup_tx_resources()
272 /* Req id ring for TX out of order completions */ in ena_setup_tx_resources()
273 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
274 tx_ring->free_ids[i] = i; in ena_setup_tx_resources()
276 /* Reset tx statistics */ in ena_setup_tx_resources()
277 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
279 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
280 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
281 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
282 tx_ring->numa_node = node; in ena_setup_tx_resources()
286 vfree(tx_ring->free_ids); in ena_setup_tx_resources()
287 tx_ring->free_ids = NULL; in ena_setup_tx_resources()
289 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
290 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
292 return -ENOMEM; in ena_setup_tx_resources()
295 /* ena_free_tx_resources - Free I/O Tx Resources per Queue
303 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources()
305 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
306 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
308 vfree(tx_ring->free_ids); in ena_free_tx_resources()
309 tx_ring->free_ids = NULL; in ena_free_tx_resources()
311 vfree(tx_ring->push_buf_intermediate_buf); in ena_free_tx_resources()
312 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
330 netif_err(adapter, ifup, adapter->netdev, in ena_setup_tx_resources_in_range()
331 "Tx queue %d: allocation failed\n", i); in ena_setup_tx_resources_in_range()
334 while (first_index < i--) in ena_setup_tx_resources_in_range()
348 /* ena_free_all_io_tx_resources - Free I/O Tx Resources for All Queues
357 adapter->xdp_num_queues + in ena_free_all_io_tx_resources()
358 adapter->num_io_queues); in ena_free_all_io_tx_resources()
361 /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
370 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_setup_rx_resources()
371 struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; in ena_setup_rx_resources()
374 if (rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
375 netif_err(adapter, ifup, adapter->netdev, in ena_setup_rx_resources()
377 return -EEXIST; in ena_setup_rx_resources()
383 size = sizeof(struct ena_rx_buffer) * (rx_ring->ring_size + 1); in ena_setup_rx_resources()
384 node = cpu_to_node(ena_irq->cpu); in ena_setup_rx_resources()
386 rx_ring->rx_buffer_info = vzalloc_node(size, node); in ena_setup_rx_resources()
387 if (!rx_ring->rx_buffer_info) { in ena_setup_rx_resources()
388 rx_ring->rx_buffer_info = vzalloc(size); in ena_setup_rx_resources()
389 if (!rx_ring->rx_buffer_info) in ena_setup_rx_resources()
390 return -ENOMEM; in ena_setup_rx_resources()
393 size = sizeof(u16) * rx_ring->ring_size; in ena_setup_rx_resources()
394 rx_ring->free_ids = vzalloc_node(size, node); in ena_setup_rx_resources()
395 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
396 rx_ring->free_ids = vzalloc(size); in ena_setup_rx_resources()
397 if (!rx_ring->free_ids) { in ena_setup_rx_resources()
398 vfree(rx_ring->rx_buffer_info); in ena_setup_rx_resources()
399 rx_ring->rx_buffer_info = NULL; in ena_setup_rx_resources()
400 return -ENOMEM; in ena_setup_rx_resources()
405 for (i = 0; i < rx_ring->ring_size; i++) in ena_setup_rx_resources()
406 rx_ring->free_ids[i] = i; in ena_setup_rx_resources()
409 memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); in ena_setup_rx_resources()
411 rx_ring->next_to_clean = 0; in ena_setup_rx_resources()
412 rx_ring->next_to_use = 0; in ena_setup_rx_resources()
413 rx_ring->cpu = ena_irq->cpu; in ena_setup_rx_resources()
414 rx_ring->numa_node = node; in ena_setup_rx_resources()
419 /* ena_free_rx_resources - Free I/O Rx Resources
428 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_resources()
430 vfree(rx_ring->rx_buffer_info); in ena_free_rx_resources()
431 rx_ring->rx_buffer_info = NULL; in ena_free_rx_resources()
433 vfree(rx_ring->free_ids); in ena_free_rx_resources()
434 rx_ring->free_ids = NULL; in ena_free_rx_resources()
437 /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
446 for (i = 0; i < adapter->num_io_queues; i++) { in ena_setup_all_rx_resources()
456 netif_err(adapter, ifup, adapter->netdev, in ena_setup_all_rx_resources()
460 while (i--) in ena_setup_all_rx_resources()
465 /* ena_free_all_io_rx_resources - Free I/O Rx Resources for All Queues
474 for (i = 0; i < adapter->num_io_queues; i++) in ena_free_all_io_rx_resources()
488 ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, in ena_alloc_map_page()
489 &rx_ring->syncp); in ena_alloc_map_page()
490 return ERR_PTR(-ENOSPC); in ena_alloc_map_page()
493 /* To enable NIC-side port-mirroring, AKA SPAN port, in ena_alloc_map_page()
496 *dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, in ena_alloc_map_page()
498 if (unlikely(dma_mapping_error(rx_ring->dev, *dma))) { in ena_alloc_map_page()
499 ena_increase_stat(&rx_ring->rx_stats.dma_mapping_err, 1, in ena_alloc_map_page()
500 &rx_ring->syncp); in ena_alloc_map_page()
502 return ERR_PTR(-EIO); in ena_alloc_map_page()
511 int headroom = rx_ring->rx_headroom; in ena_alloc_rx_buffer()
518 rx_info->buf_offset = headroom; in ena_alloc_rx_buffer()
521 if (unlikely(rx_info->page)) in ena_alloc_rx_buffer()
529 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_alloc_rx_buffer()
534 rx_info->page = page; in ena_alloc_rx_buffer()
535 rx_info->dma_addr = dma; in ena_alloc_rx_buffer()
536 rx_info->page_offset = 0; in ena_alloc_rx_buffer()
537 ena_buf = &rx_info->ena_buf; in ena_alloc_rx_buffer()
538 ena_buf->paddr = dma + headroom; in ena_alloc_rx_buffer()
539 ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom; in ena_alloc_rx_buffer()
548 dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, in ena_unmap_rx_buff_attrs()
555 struct page *page = rx_info->page; in ena_free_rx_page()
558 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_free_rx_page()
566 rx_info->page = NULL; in ena_free_rx_page()
569 static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) in ena_refill_rx_bufs() argument
575 next_to_use = rx_ring->next_to_use; in ena_refill_rx_bufs()
577 for (i = 0; i < num; i++) { in ena_refill_rx_bufs()
580 req_id = rx_ring->free_ids[next_to_use]; in ena_refill_rx_bufs()
582 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_refill_rx_bufs()
586 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
588 rx_ring->qid); in ena_refill_rx_bufs()
591 rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, in ena_refill_rx_bufs()
592 &rx_info->ena_buf, in ena_refill_rx_bufs()
595 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_rx_bufs()
597 rx_ring->qid); in ena_refill_rx_bufs()
601 rx_ring->ring_size); in ena_refill_rx_bufs()
604 if (unlikely(i < num)) { in ena_refill_rx_bufs()
605 ena_increase_stat(&rx_ring->rx_stats.refil_partial, 1, in ena_refill_rx_bufs()
606 &rx_ring->syncp); in ena_refill_rx_bufs()
607 netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_refill_rx_bufs()
609 rx_ring->qid, i, num); in ena_refill_rx_bufs()
614 ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); in ena_refill_rx_bufs()
616 rx_ring->next_to_use = next_to_use; in ena_refill_rx_bufs()
624 struct ena_ring *rx_ring = &adapter->rx_ring[qid]; in ena_free_rx_bufs()
627 for (i = 0; i < rx_ring->ring_size; i++) { in ena_free_rx_bufs()
628 struct ena_rx_buffer *rx_info = &rx_ring->rx_buffer_info[i]; in ena_free_rx_bufs()
630 if (rx_info->page) in ena_free_rx_bufs()
635 /* ena_refill_all_rx_bufs - allocate all queues Rx buffers
643 for (i = 0; i < adapter->num_io_queues; i++) { in ena_refill_all_rx_bufs()
644 rx_ring = &adapter->rx_ring[i]; in ena_refill_all_rx_bufs()
645 bufs_num = rx_ring->ring_size - 1; in ena_refill_all_rx_bufs()
649 netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_refill_all_rx_bufs()
659 for (i = 0; i < adapter->num_io_queues; i++) in ena_free_all_rx_bufs()
670 ena_buf = tx_info->bufs; in ena_unmap_tx_buff()
671 cnt = tx_info->num_of_bufs; in ena_unmap_tx_buff()
676 if (tx_info->map_linear_data) { in ena_unmap_tx_buff()
677 dma_unmap_single(tx_ring->dev, in ena_unmap_tx_buff()
682 cnt--; in ena_unmap_tx_buff()
687 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_unmap_tx_buff()
693 /* ena_free_tx_bufs - Free Tx Buffers per Queue
694 * @tx_ring: TX ring for which buffers be freed
701 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
702 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
704 if (!tx_info->skb) in ena_free_tx_bufs()
708 netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
709 "Free uncompleted tx skb qid %d idx 0x%x\n", in ena_free_tx_bufs()
710 tx_ring->qid, i); in ena_free_tx_bufs()
713 netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev, in ena_free_tx_bufs()
714 "Free uncompleted tx skb qid %d idx 0x%x\n", in ena_free_tx_bufs()
715 tx_ring->qid, i); in ena_free_tx_bufs()
720 dev_kfree_skb_any(tx_info->skb); in ena_free_tx_bufs()
722 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
723 tx_ring->qid)); in ena_free_tx_bufs()
731 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { in ena_free_all_tx_bufs()
732 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
742 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { in ena_destroy_all_tx_queues()
744 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); in ena_destroy_all_tx_queues()
753 for (i = 0; i < adapter->num_io_queues; i++) { in ena_destroy_all_rx_queues()
755 cancel_work_sync(&adapter->ena_napi[i].dim.work); in ena_destroy_all_rx_queues()
756 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_destroy_all_rx_queues()
757 ena_com_destroy_io_queue(adapter->ena_dev, ena_qid); in ena_destroy_all_rx_queues()
771 netif_err(ring->adapter, in handle_invalid_req_id()
773 ring->netdev, in handle_invalid_req_id()
775 is_xdp ? "xdp frame" : "skb", ring->qid, req_id); in handle_invalid_req_id()
777 netif_err(ring->adapter, in handle_invalid_req_id()
779 ring->netdev, in handle_invalid_req_id()
781 req_id, ring->qid); in handle_invalid_req_id()
783 ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp); in handle_invalid_req_id()
784 ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID); in handle_invalid_req_id()
786 return -EFAULT; in handle_invalid_req_id()
793 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
794 if (likely(tx_info->skb)) in validate_tx_req_id()
811 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
812 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
818 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
821 if (unlikely(rc == -EINVAL)) in ena_clean_tx_irq()
832 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
833 skb = tx_info->skb; in ena_clean_tx_irq()
836 prefetch(&skb->end); in ena_clean_tx_irq()
838 tx_info->skb = NULL; in ena_clean_tx_irq()
839 tx_info->last_jiffies = 0; in ena_clean_tx_irq()
843 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
844 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
847 tx_bytes += tx_info->total_tx_size; in ena_clean_tx_irq()
850 total_done += tx_info->tx_descs; in ena_clean_tx_irq()
852 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
854 tx_ring->ring_size); in ena_clean_tx_irq()
857 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
858 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
859 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); in ena_clean_tx_irq()
863 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
865 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
872 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
877 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
880 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_clean_tx_irq()
882 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_clean_tx_irq()
883 &tx_ring->syncp); in ena_clean_tx_irq()
896 skb = napi_alloc_skb(rx_ring->napi, len); in ena_alloc_skb()
901 ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1, in ena_alloc_skb()
902 &rx_ring->syncp); in ena_alloc_skb()
904 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_alloc_skb()
915 struct ena_com_buf *ena_buf = &rx_info->ena_buf; in ena_try_rx_buf_page_reuse()
920 if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) { in ena_try_rx_buf_page_reuse()
921 page_ref_inc(rx_info->page); in ena_try_rx_buf_page_reuse()
922 rx_info->page_offset += buf_len; in ena_try_rx_buf_page_reuse()
923 ena_buf->paddr += buf_len; in ena_try_rx_buf_page_reuse()
924 ena_buf->len -= buf_len; in ena_try_rx_buf_page_reuse()
952 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
954 if (unlikely(!rx_info->page)) { in ena_rx_skb()
955 adapter = rx_ring->adapter; in ena_rx_skb()
956 netif_err(adapter, rx_err, rx_ring->netdev, in ena_rx_skb()
957 "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id); in ena_rx_skb()
958 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp); in ena_rx_skb()
963 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
965 rx_info, rx_info->page); in ena_rx_skb()
967 buf_offset = rx_info->buf_offset; in ena_rx_skb()
968 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
969 page_offset = rx_info->page_offset; in ena_rx_skb()
970 buf_addr = page_address(rx_info->page) + page_offset; in ena_rx_skb()
972 if (len <= rx_ring->rx_copybreak) { in ena_rx_skb()
978 dma_sync_single_for_device(rx_ring->dev, in ena_rx_skb()
979 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, in ena_rx_skb()
984 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
985 "RX allocated small packet. len %d.\n", skb->len); in ena_rx_skb()
986 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
987 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
989 rx_ring->ring_size); in ena_rx_skb()
1009 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in ena_rx_skb()
1012 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_rx_skb()
1014 skb->len, skb->data_len); in ena_rx_skb()
1017 rx_info->page = NULL; in ena_rx_skb()
1019 rx_ring->free_ids[*next_to_clean] = req_id; in ena_rx_skb()
1022 rx_ring->ring_size); in ena_rx_skb()
1023 if (likely(--descs == 0)) in ena_rx_skb()
1030 rx_info = &rx_ring->rx_buffer_info[req_id]; in ena_rx_skb()
1032 /* rx_info->buf_offset includes rx_ring->rx_headroom */ in ena_rx_skb()
1033 buf_offset = rx_info->buf_offset; in ena_rx_skb()
1034 pkt_offset = buf_offset - rx_ring->rx_headroom; in ena_rx_skb()
1036 page_offset = rx_info->page_offset; in ena_rx_skb()
1038 pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr); in ena_rx_skb()
1043 dma_sync_single_for_cpu(rx_ring->dev, in ena_rx_skb()
1052 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, in ena_rx_skb()
1060 /* ena_rx_checksum - indicate in skb if hw indicated a good cksum
1070 if (unlikely(!(rx_ring->netdev->features & NETIF_F_RXCSUM))) { in ena_rx_checksum()
1071 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1076 if (ena_rx_ctx->frag) { in ena_rx_checksum()
1077 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1082 if (unlikely((ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) && in ena_rx_checksum()
1083 (ena_rx_ctx->l3_csum_err))) { in ena_rx_checksum()
1085 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1086 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1087 &rx_ring->syncp); in ena_rx_checksum()
1088 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1094 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || in ena_rx_checksum()
1095 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) { in ena_rx_checksum()
1096 if (unlikely(ena_rx_ctx->l4_csum_err)) { in ena_rx_checksum()
1098 ena_increase_stat(&rx_ring->rx_stats.csum_bad, 1, in ena_rx_checksum()
1099 &rx_ring->syncp); in ena_rx_checksum()
1100 netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, in ena_rx_checksum()
1102 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1106 if (likely(ena_rx_ctx->l4_csum_checked)) { in ena_rx_checksum()
1107 skb->ip_summed = CHECKSUM_UNNECESSARY; in ena_rx_checksum()
1108 ena_increase_stat(&rx_ring->rx_stats.csum_good, 1, in ena_rx_checksum()
1109 &rx_ring->syncp); in ena_rx_checksum()
1111 ena_increase_stat(&rx_ring->rx_stats.csum_unchecked, 1, in ena_rx_checksum()
1112 &rx_ring->syncp); in ena_rx_checksum()
1113 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1116 skb->ip_summed = CHECKSUM_NONE; in ena_rx_checksum()
1128 if (likely(rx_ring->netdev->features & NETIF_F_RXHASH)) { in ena_set_rx_hash()
1129 if (likely((ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) || in ena_set_rx_hash()
1130 (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP))) in ena_set_rx_hash()
1137 if (ena_rx_ctx->frag) in ena_set_rx_hash()
1140 skb_set_hash(skb, ena_rx_ctx->hash, hash_type); in ena_set_rx_hash()
1149 /* XDP multi-buffer packets not supported */ in ena_xdp_handle_buff()
1151 netdev_err_once(rx_ring->adapter->netdev, in ena_xdp_handle_buff()
1152 "xdp: dropped unsupported multi-buffer packets\n"); in ena_xdp_handle_buff()
1153 ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp); in ena_xdp_handle_buff()
1157 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_xdp_handle_buff()
1158 xdp_prepare_buff(xdp, page_address(rx_info->page), in ena_xdp_handle_buff()
1159 rx_info->buf_offset, in ena_xdp_handle_buff()
1160 rx_ring->ena_bufs[0].len, false); in ena_xdp_handle_buff()
1166 rx_info->buf_offset = xdp->data - xdp->data_hard_start; in ena_xdp_handle_buff()
1167 rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data; in ena_xdp_handle_buff()
1173 /* ena_clean_rx_irq - Cleanup RX irq
1183 u16 next_to_clean = rx_ring->next_to_clean; in ena_clean_rx_irq()
1200 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1201 "%s qid %d\n", __func__, rx_ring->qid); in ena_clean_rx_irq()
1203 xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq); in ena_clean_rx_irq()
1208 ena_rx_ctx.ena_bufs = rx_ring->ena_bufs; in ena_clean_rx_irq()
1209 ena_rx_ctx.max_bufs = rx_ring->sgl_size; in ena_clean_rx_irq()
1212 rc = ena_com_rx_pkt(rx_ring->ena_com_io_cq, in ena_clean_rx_irq()
1213 rx_ring->ena_com_io_sq, in ena_clean_rx_irq()
1222 rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]; in ena_clean_rx_irq()
1224 rx_info->buf_offset += pkt_offset; in ena_clean_rx_irq()
1226 netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, in ena_clean_rx_irq()
1228 rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto, in ena_clean_rx_irq()
1231 dma_sync_single_for_cpu(rx_ring->dev, in ena_clean_rx_irq()
1232 dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset, in ena_clean_rx_irq()
1233 rx_ring->ena_bufs[0].len, in ena_clean_rx_irq()
1242 rx_ring->ena_bufs, in ena_clean_rx_irq()
1248 int req_id = rx_ring->ena_bufs[i].req_id; in ena_clean_rx_irq()
1250 rx_ring->free_ids[next_to_clean] = req_id; in ena_clean_rx_irq()
1253 rx_ring->ring_size); in ena_clean_rx_irq()
1260 &rx_ring->rx_buffer_info[req_id], in ena_clean_rx_irq()
1262 rx_ring->rx_buffer_info[req_id].page = NULL; in ena_clean_rx_irq()
1268 res_budget--; in ena_clean_rx_irq()
1278 skb_record_rx_queue(skb, rx_ring->qid); in ena_clean_rx_irq()
1280 if (rx_ring->ena_bufs[0].len <= rx_ring->rx_copybreak) in ena_clean_rx_irq()
1283 total_len += skb->len; in ena_clean_rx_irq()
1287 res_budget--; in ena_clean_rx_irq()
1290 work_done = budget - res_budget; in ena_clean_rx_irq()
1291 rx_ring->per_napi_packets += work_done; in ena_clean_rx_irq()
1292 u64_stats_update_begin(&rx_ring->syncp); in ena_clean_rx_irq()
1293 rx_ring->rx_stats.bytes += total_len; in ena_clean_rx_irq()
1294 rx_ring->rx_stats.cnt += work_done; in ena_clean_rx_irq()
1295 rx_ring->rx_stats.rx_copybreak_pkt += rx_copybreak_pkt; in ena_clean_rx_irq()
1296 u64_stats_update_end(&rx_ring->syncp); in ena_clean_rx_irq()
1298 rx_ring->next_to_clean = next_to_clean; in ena_clean_rx_irq()
1300 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in ena_clean_rx_irq()
1302 min_t(int, rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER, in ena_clean_rx_irq()
1307 ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq); in ena_clean_rx_irq()
1320 adapter = netdev_priv(rx_ring->netdev); in ena_clean_rx_irq()
1322 if (rc == -ENOSPC) { in ena_clean_rx_irq()
1323 ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, in ena_clean_rx_irq()
1324 &rx_ring->syncp); in ena_clean_rx_irq()
1327 ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, in ena_clean_rx_irq()
1328 &rx_ring->syncp); in ena_clean_rx_irq()
1338 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in ena_dim_work()
1341 ena_napi->rx_ring->smoothed_interval = cur_moder.usec; in ena_dim_work()
1342 dim->state = DIM_START_MEASURE; in ena_dim_work()
1348 struct ena_ring *rx_ring = ena_napi->rx_ring; in ena_adjust_adaptive_rx_intr_moderation()
1350 if (!rx_ring->per_napi_packets) in ena_adjust_adaptive_rx_intr_moderation()
1353 rx_ring->non_empty_napi_events++; in ena_adjust_adaptive_rx_intr_moderation()
1355 dim_update_sample(rx_ring->non_empty_napi_events, in ena_adjust_adaptive_rx_intr_moderation()
1356 rx_ring->rx_stats.cnt, in ena_adjust_adaptive_rx_intr_moderation()
1357 rx_ring->rx_stats.bytes, in ena_adjust_adaptive_rx_intr_moderation()
1360 net_dim(&ena_napi->dim, dim_sample); in ena_adjust_adaptive_rx_intr_moderation()
1362 rx_ring->per_napi_packets = 0; in ena_adjust_adaptive_rx_intr_moderation()
1368 u32 rx_interval = tx_ring->smoothed_interval; in ena_unmask_interrupt()
1371 /* Rx ring can be NULL when for XDP tx queues which don't have an in ena_unmask_interrupt()
1375 rx_interval = ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev) ? in ena_unmask_interrupt()
1376 rx_ring->smoothed_interval : in ena_unmask_interrupt()
1377 ena_com_get_nonadaptive_moderation_interval_rx(rx_ring->ena_dev); in ena_unmask_interrupt()
1380 * tx intr delay and interrupt unmask in ena_unmask_interrupt()
1384 tx_ring->smoothed_interval, in ena_unmask_interrupt()
1387 ena_increase_stat(&tx_ring->tx_stats.unmask_interrupt, 1, in ena_unmask_interrupt()
1388 &tx_ring->syncp); in ena_unmask_interrupt()
1390 /* It is a shared MSI-X. in ena_unmask_interrupt()
1391 * Tx and Rx CQ have pointer to it. in ena_unmask_interrupt()
1393 * The Tx ring is used because the rx_ring is NULL for XDP queues in ena_unmask_interrupt()
1395 ena_com_unmask_intr(tx_ring->ena_com_io_cq, &intr_reg); in ena_unmask_interrupt()
1405 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1408 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1410 rx_ring->cpu = cpu; in ena_update_ring_numa_node()
1414 if (likely(tx_ring->numa_node == numa_node)) in ena_update_ring_numa_node()
1420 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1421 tx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1423 rx_ring->numa_node = numa_node; in ena_update_ring_numa_node()
1424 ena_com_update_numa_node(rx_ring->ena_com_io_cq, in ena_update_ring_numa_node()
1444 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1445 rx_ring = ena_napi->rx_ring; in ena_io_poll()
1447 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1449 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1450 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_io_poll()
1457 * tx completions. in ena_io_poll()
1465 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1466 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { in ena_io_poll()
1477 READ_ONCE(ena_napi->interrupts_masked)) { in ena_io_poll()
1479 WRITE_ONCE(ena_napi->interrupts_masked, false); in ena_io_poll()
1481 * Tx uses static interrupt moderation. in ena_io_poll()
1483 if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) in ena_io_poll()
1495 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
1496 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
1497 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
1498 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
1500 tx_ring->tx_stats.last_napi_jiffies = jiffies; in ena_io_poll()
1509 ena_com_admin_q_comp_intr_handler(adapter->ena_dev); in ena_intr_msix_mgmnt()
1512 if (likely(test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))) in ena_intr_msix_mgmnt()
1513 ena_com_aenq_intr_handler(adapter->ena_dev, data); in ena_intr_msix_mgmnt()
1518 /* ena_intr_msix_io - MSI-X Interrupt Handler for Tx/Rx
1527 WRITE_ONCE(ena_napi->first_interrupt, true); in ena_intr_msix_io()
1529 WRITE_ONCE(ena_napi->interrupts_masked, true); in ena_intr_msix_io()
1532 napi_schedule_irqoff(&ena_napi->napi); in ena_intr_msix_io()
1537 /* Reserve a single MSI-X vector for management (admin + aenq).
1539 * the number of potential io queues is the minimum of what the device
1546 if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { in ena_enable_msix()
1547 netif_err(adapter, probe, adapter->netdev, in ena_enable_msix()
1548 "Error, MSI-X is already enabled\n"); in ena_enable_msix()
1549 return -EPERM; in ena_enable_msix()
1553 msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues); in ena_enable_msix()
1554 netif_dbg(adapter, probe, adapter->netdev, in ena_enable_msix()
1555 "Trying to enable MSI-X, vectors %d\n", msix_vecs); in ena_enable_msix()
1557 irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC, in ena_enable_msix()
1561 netif_err(adapter, probe, adapter->netdev, in ena_enable_msix()
1562 "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt); in ena_enable_msix()
1563 return -ENOSPC; in ena_enable_msix()
1567 netif_notice(adapter, probe, adapter->netdev, in ena_enable_msix()
1568 "Enable only %d MSI-X (out of %d), reduce the number of queues\n", in ena_enable_msix()
1570 adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC; in ena_enable_msix()
1574 netif_warn(adapter, probe, adapter->netdev, in ena_enable_msix()
1577 adapter->msix_vecs = irq_cnt; in ena_enable_msix()
1578 set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags); in ena_enable_msix()
1587 snprintf(adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].name, in ena_setup_mgmnt_intr()
1588 ENA_IRQNAME_SIZE, "ena-mgmnt@pci:%s", in ena_setup_mgmnt_intr()
1589 pci_name(adapter->pdev)); in ena_setup_mgmnt_intr()
1590 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].handler = in ena_setup_mgmnt_intr()
1592 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].data = adapter; in ena_setup_mgmnt_intr()
1593 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].vector = in ena_setup_mgmnt_intr()
1594 pci_irq_vector(adapter->pdev, ENA_MGMNT_IRQ_IDX); in ena_setup_mgmnt_intr()
1596 adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].cpu = cpu; in ena_setup_mgmnt_intr()
1598 &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX].affinity_hint_mask); in ena_setup_mgmnt_intr()
1607 netdev = adapter->netdev; in ena_setup_io_intr()
1608 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_setup_io_intr()
1614 snprintf(adapter->irq_tbl[irq_idx].name, ENA_IRQNAME_SIZE, in ena_setup_io_intr()
1615 "%s-Tx-Rx-%d", netdev->name, i); in ena_setup_io_intr()
1616 adapter->irq_tbl[irq_idx].handler = ena_intr_msix_io; in ena_setup_io_intr()
1617 adapter->irq_tbl[irq_idx].data = &adapter->ena_napi[i]; in ena_setup_io_intr()
1618 adapter->irq_tbl[irq_idx].vector = in ena_setup_io_intr()
1619 pci_irq_vector(adapter->pdev, irq_idx); in ena_setup_io_intr()
1620 adapter->irq_tbl[irq_idx].cpu = cpu; in ena_setup_io_intr()
1623 &adapter->irq_tbl[irq_idx].affinity_hint_mask); in ena_setup_io_intr()
1633 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; in ena_request_mgmnt_irq()
1634 rc = request_irq(irq->vector, irq->handler, flags, irq->name, in ena_request_mgmnt_irq()
1635 irq->data); in ena_request_mgmnt_irq()
1637 netif_err(adapter, probe, adapter->netdev, in ena_request_mgmnt_irq()
1642 netif_dbg(adapter, probe, adapter->netdev, in ena_request_mgmnt_irq()
1644 irq->affinity_hint_mask.bits[0], irq->vector); in ena_request_mgmnt_irq()
1646 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); in ena_request_mgmnt_irq()
1653 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_request_io_irq()
1658 if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) { in ena_request_io_irq()
1659 netif_err(adapter, ifup, adapter->netdev, in ena_request_io_irq()
1660 "Failed to request I/O IRQ: MSI-X is not enabled\n"); in ena_request_io_irq()
1661 return -EINVAL; in ena_request_io_irq()
1665 irq = &adapter->irq_tbl[i]; in ena_request_io_irq()
1666 rc = request_irq(irq->vector, irq->handler, flags, irq->name, in ena_request_io_irq()
1667 irq->data); in ena_request_io_irq()
1669 netif_err(adapter, ifup, adapter->netdev, in ena_request_io_irq()
1675 netif_dbg(adapter, ifup, adapter->netdev, in ena_request_io_irq()
1677 i, irq->affinity_hint_mask.bits[0], irq->vector); in ena_request_io_irq()
1679 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); in ena_request_io_irq()
1686 irq = &adapter->irq_tbl[k]; in ena_request_io_irq()
1687 free_irq(irq->vector, irq->data); in ena_request_io_irq()
1697 irq = &adapter->irq_tbl[ENA_MGMNT_IRQ_IDX]; in ena_free_mgmnt_irq()
1698 synchronize_irq(irq->vector); in ena_free_mgmnt_irq()
1699 irq_set_affinity_hint(irq->vector, NULL); in ena_free_mgmnt_irq()
1700 free_irq(irq->vector, irq->data); in ena_free_mgmnt_irq()
1705 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_free_io_irq()
1710 if (adapter->msix_vecs >= 1) { in ena_free_io_irq()
1711 free_irq_cpu_rmap(adapter->netdev->rx_cpu_rmap); in ena_free_io_irq()
1712 adapter->netdev->rx_cpu_rmap = NULL; in ena_free_io_irq()
1717 irq = &adapter->irq_tbl[i]; in ena_free_io_irq()
1718 irq_set_affinity_hint(irq->vector, NULL); in ena_free_io_irq()
1719 free_irq(irq->vector, irq->data); in ena_free_io_irq()
1725 if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) in ena_disable_msix()
1726 pci_free_irq_vectors(adapter->pdev); in ena_disable_msix()
1731 u32 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_disable_io_intr_sync()
1734 if (!netif_running(adapter->netdev)) in ena_disable_io_intr_sync()
1738 synchronize_irq(adapter->irq_tbl[i].vector); in ena_disable_io_intr_sync()
1748 netif_napi_del(&adapter->ena_napi[i].napi); in ena_del_napi_in_range()
1751 adapter->ena_napi[i].rx_ring); in ena_del_napi_in_range()
1762 struct ena_napi *napi = &adapter->ena_napi[i]; in ena_init_napi_in_range()
1767 rx_ring = &adapter->rx_ring[i]; in ena_init_napi_in_range()
1768 tx_ring = &adapter->tx_ring[i]; in ena_init_napi_in_range()
1774 netif_napi_add(adapter->netdev, &napi->napi, napi_handler); in ena_init_napi_in_range()
1777 napi->rx_ring = rx_ring; in ena_init_napi_in_range()
1779 napi->tx_ring = tx_ring; in ena_init_napi_in_range()
1780 napi->qid = i; in ena_init_napi_in_range()
1791 napi_disable(&adapter->ena_napi[i].napi); in ena_napi_disable_in_range()
1801 napi_enable(&adapter->ena_napi[i].napi); in ena_napi_enable_in_range()
1807 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_rss_configure()
1811 if (!ena_dev->rss.tbl_log_size) { in ena_rss_configure()
1813 if (rc && (rc != -EOPNOTSUPP)) { in ena_rss_configure()
1814 netif_err(adapter, ifup, adapter->netdev, in ena_rss_configure()
1822 if (unlikely(rc && rc != -EOPNOTSUPP)) in ena_rss_configure()
1827 if (unlikely(rc && (rc != -EOPNOTSUPP))) in ena_rss_configure()
1832 if (unlikely(rc && (rc != -EOPNOTSUPP))) in ena_rss_configure()
1846 ena_change_mtu(adapter->netdev, adapter->netdev->mtu); in ena_up_complete()
1851 netif_tx_start_all_queues(adapter->netdev); in ena_up_complete()
1855 adapter->xdp_num_queues + adapter->num_io_queues); in ena_up_complete()
1869 ena_dev = adapter->ena_dev; in ena_create_io_tx_queue()
1871 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
1879 ctx.mem_queue_type = ena_dev->tx_mem_queue_type; in ena_create_io_tx_queue()
1881 ctx.queue_size = tx_ring->ring_size; in ena_create_io_tx_queue()
1882 ctx.numa_node = tx_ring->numa_node; in ena_create_io_tx_queue()
1886 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_tx_queue()
1887 "Failed to create I/O TX queue num %d rc: %d\n", in ena_create_io_tx_queue()
1893 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
1894 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
1896 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_tx_queue()
1897 "Failed to get TX queue handlers. TX queue num %d rc: %d\n", in ena_create_io_tx_queue()
1903 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
1910 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_create_io_tx_queues_in_range()
1922 while (i-- > first_index) in ena_create_io_tx_queues_in_range()
1937 ena_dev = adapter->ena_dev; in ena_create_io_rx_queue()
1939 rx_ring = &adapter->rx_ring[qid]; in ena_create_io_rx_queue()
1949 ctx.queue_size = rx_ring->ring_size; in ena_create_io_rx_queue()
1950 ctx.numa_node = rx_ring->numa_node; in ena_create_io_rx_queue()
1954 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_rx_queue()
1955 "Failed to create I/O RX queue num %d rc: %d\n", in ena_create_io_rx_queue()
1961 &rx_ring->ena_com_io_sq, in ena_create_io_rx_queue()
1962 &rx_ring->ena_com_io_cq); in ena_create_io_rx_queue()
1964 netif_err(adapter, ifup, adapter->netdev, in ena_create_io_rx_queue()
1965 "Failed to get RX queue handlers. RX queue num %d rc: %d\n", in ena_create_io_rx_queue()
1970 ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_rx_queue()
1980 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_create_all_io_rx_queues()
1983 for (i = 0; i < adapter->num_io_queues; i++) { in ena_create_all_io_rx_queues()
1987 INIT_WORK(&adapter->ena_napi[i].dim.work, ena_dim_work); in ena_create_all_io_rx_queues()
1989 ena_xdp_register_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
1995 while (i--) { in ena_create_all_io_rx_queues()
1996 ena_xdp_unregister_rxq_info(&adapter->rx_ring[i]); in ena_create_all_io_rx_queues()
1997 cancel_work_sync(&adapter->ena_napi[i].dim.work); in ena_create_all_io_rx_queues()
2010 for (i = 0; i < adapter->num_io_queues; i++) { in set_io_rings_size()
2011 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
2012 adapter->rx_ring[i].ring_size = new_rx_size; in set_io_rings_size()
2017 * low on memory. If there is not enough memory to allocate io queues
2018 * the driver will try to allocate smaller queues.
2021 * 1. Try to allocate TX and RX and if successful.
2024 * 2. Divide by 2 the size of the larger of RX and TX queues (or both if their size is the same).
2026 * 3. If TX or RX is smaller than 256
2039 set_io_rings_size(adapter, adapter->requested_tx_ring_size, in create_queues_with_size_backoff()
2040 adapter->requested_rx_ring_size); in create_queues_with_size_backoff()
2051 adapter->num_io_queues); in create_queues_with_size_backoff()
2057 adapter->num_io_queues); in create_queues_with_size_backoff()
2078 if (rc != -ENOMEM) { in create_queues_with_size_backoff()
2079 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2085 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
2086 cur_rx_ring_size = adapter->rx_ring[0].ring_size; in create_queues_with_size_backoff()
2088 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2089 "Not enough memory to create queues with sizes TX=%d, RX=%d\n", in create_queues_with_size_backoff()
2105 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2106 …ed with the smallest possible queue size of %d for both queues. Not retrying with smaller queues\n… in create_queues_with_size_backoff()
2111 netif_err(adapter, ifup, adapter->netdev, in create_queues_with_size_backoff()
2112 "Retrying queue creation with sizes TX=%d, RX=%d\n", in create_queues_with_size_backoff()
2125 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); in ena_up()
2127 io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_up()
2149 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) in ena_up()
2150 netif_carrier_on(adapter->netdev); in ena_up()
2152 ena_increase_stat(&adapter->dev_stats.interface_up, 1, in ena_up()
2153 &adapter->syncp); in ena_up()
2155 set_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_up()
2157 /* Enable completion queues interrupt */ in ena_up()
2158 for (i = 0; i < adapter->num_io_queues; i++) in ena_up()
2159 ena_unmask_interrupt(&adapter->tx_ring[i], in ena_up()
2160 &adapter->rx_ring[i]); in ena_up()
2166 napi_schedule(&adapter->ena_napi[i].napi); in ena_up()
2185 int io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues; in ena_down()
2187 netif_info(adapter, ifdown, adapter->netdev, "%s\n", __func__); in ena_down()
2189 clear_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_down()
2191 ena_increase_stat(&adapter->dev_stats.interface_down, 1, in ena_down()
2192 &adapter->syncp); in ena_down()
2194 netif_carrier_off(adapter->netdev); in ena_down()
2195 netif_tx_disable(adapter->netdev); in ena_down()
2197 /* After this point the napi handler won't enable the tx queue */ in ena_down()
2202 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { in ena_down()
2205 rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); in ena_down()
2207 netif_err(adapter, ifdown, adapter->netdev, in ena_down()
2210 ena_com_set_admin_running_state(adapter->ena_dev, false); in ena_down()
2225 /* ena_open - Called when a network interface is made active
2242 rc = netif_set_real_num_tx_queues(netdev, adapter->num_io_queues); in ena_open()
2244 netif_err(adapter, ifup, netdev, "Can't set num tx queues\n"); in ena_open()
2248 rc = netif_set_real_num_rx_queues(netdev, adapter->num_io_queues); in ena_open()
2250 netif_err(adapter, ifup, netdev, "Can't set num rx queues\n"); in ena_open()
2261 /* ena_close - Disables a network interface
2266 * The close entry point is called when an interface is de-activated
2277 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) in ena_close()
2280 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in ena_close()
2285 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_close()
2286 netif_err(adapter, ifdown, adapter->netdev, in ena_close()
2305 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_update_queue_params()
2306 ena_close(adapter->netdev); in ena_update_queue_params()
2307 adapter->requested_tx_ring_size = new_tx_size; in ena_update_queue_params()
2308 adapter->requested_rx_ring_size = new_rx_size; in ena_update_queue_params()
2311 adapter->xdp_num_queues + in ena_update_queue_params()
2312 adapter->num_io_queues); in ena_update_queue_params()
2314 large_llq_changed = adapter->ena_dev->tx_mem_queue_type == in ena_update_queue_params()
2317 new_llq_header_len != adapter->ena_dev->tx_max_header_size; in ena_update_queue_params()
2321 adapter->large_llq_header_enabled = !adapter->large_llq_header_enabled; in ena_update_queue_params()
2335 if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE)) in ena_set_rx_copybreak()
2336 return -EINVAL; in ena_set_rx_copybreak()
2338 adapter->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
2340 for (i = 0; i < adapter->num_io_queues; i++) { in ena_set_rx_copybreak()
2341 rx_ring = &adapter->rx_ring[i]; in ena_set_rx_copybreak()
2342 rx_ring->rx_copybreak = rx_copybreak; in ena_set_rx_copybreak()
2350 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_update_queue_count()
2354 dev_was_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_update_queue_count()
2355 ena_close(adapter->netdev); in ena_update_queue_count()
2356 prev_channel_count = adapter->num_io_queues; in ena_update_queue_count()
2357 adapter->num_io_queues = new_channel_count; in ena_update_queue_count()
2360 adapter->xdp_first_ring = new_channel_count; in ena_update_queue_count()
2361 adapter->xdp_num_queues = new_channel_count; in ena_update_queue_count()
2369 adapter->xdp_bpf_prog, in ena_update_queue_count()
2380 adapter->xdp_num_queues + in ena_update_queue_count()
2381 adapter->num_io_queues); in ena_update_queue_count()
2382 return dev_was_up ? ena_open(adapter->netdev) : 0; in ena_update_queue_count()
2389 u32 mss = skb_shinfo(skb)->gso_size; in ena_tx_csum()
2390 struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta; in ena_tx_csum()
2393 if ((skb->ip_summed == CHECKSUM_PARTIAL) || mss) { in ena_tx_csum()
2394 ena_tx_ctx->l4_csum_enable = 1; in ena_tx_csum()
2396 ena_tx_ctx->tso_enable = 1; in ena_tx_csum()
2397 ena_meta->l4_hdr_len = tcp_hdr(skb)->doff; in ena_tx_csum()
2398 ena_tx_ctx->l4_csum_partial = 0; in ena_tx_csum()
2400 ena_tx_ctx->tso_enable = 0; in ena_tx_csum()
2401 ena_meta->l4_hdr_len = 0; in ena_tx_csum()
2402 ena_tx_ctx->l4_csum_partial = 1; in ena_tx_csum()
2405 switch (ip_hdr(skb)->version) { in ena_tx_csum()
2407 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV4; in ena_tx_csum()
2408 if (ip_hdr(skb)->frag_off & htons(IP_DF)) in ena_tx_csum()
2409 ena_tx_ctx->df = 1; in ena_tx_csum()
2411 ena_tx_ctx->l3_csum_enable = 1; in ena_tx_csum()
2412 l4_protocol = ip_hdr(skb)->protocol; in ena_tx_csum()
2415 ena_tx_ctx->l3_proto = ENA_ETH_IO_L3_PROTO_IPV6; in ena_tx_csum()
2416 l4_protocol = ipv6_hdr(skb)->nexthdr; in ena_tx_csum()
2423 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_TCP; in ena_tx_csum()
2425 ena_tx_ctx->l4_proto = ENA_ETH_IO_L4_PROTO_UDP; in ena_tx_csum()
2427 ena_meta->mss = mss; in ena_tx_csum()
2428 ena_meta->l3_hdr_len = skb_network_header_len(skb); in ena_tx_csum()
2429 ena_meta->l3_hdr_offset = skb_network_offset(skb); in ena_tx_csum()
2430 ena_tx_ctx->meta_valid = 1; in ena_tx_csum()
2433 ena_tx_ctx->meta_valid = 1; in ena_tx_csum()
2435 ena_tx_ctx->meta_valid = 0; in ena_tx_csum()
2444 num_frags = skb_shinfo(skb)->nr_frags; in ena_check_and_linearize_skb()
2447 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
2450 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
2451 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
2454 ena_increase_stat(&tx_ring->tx_stats.linearize, 1, &tx_ring->syncp); in ena_check_and_linearize_skb()
2458 ena_increase_stat(&tx_ring->tx_stats.linearize_failed, 1, in ena_check_and_linearize_skb()
2459 &tx_ring->syncp); in ena_check_and_linearize_skb()
2471 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_skb()
2480 tx_info->skb = skb; in ena_tx_map_skb()
2481 ena_buf = tx_info->bufs; in ena_tx_map_skb()
2483 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_skb()
2494 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); in ena_tx_map_skb()
2496 tx_ring->push_buf_intermediate_buf); in ena_tx_map_skb()
2498 if (unlikely(skb->data != *push_hdr)) { in ena_tx_map_skb()
2499 ena_increase_stat(&tx_ring->tx_stats.llq_buffer_copy, 1, in ena_tx_map_skb()
2500 &tx_ring->syncp); in ena_tx_map_skb()
2502 delta = push_len - skb_head_len; in ena_tx_map_skb()
2507 tx_ring->tx_max_header_size); in ena_tx_map_skb()
2510 netif_dbg(adapter, tx_queued, adapter->netdev, in ena_tx_map_skb()
2511 "skb: %p header_buf->vaddr: %p push_len: %d\n", skb, in ena_tx_map_skb()
2515 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_tx_map_skb()
2516 skb_head_len - push_len, DMA_TO_DEVICE); in ena_tx_map_skb()
2517 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2520 ena_buf->paddr = dma; in ena_tx_map_skb()
2521 ena_buf->len = skb_head_len - push_len; in ena_tx_map_skb()
2524 tx_info->num_of_bufs++; in ena_tx_map_skb()
2525 tx_info->map_linear_data = 1; in ena_tx_map_skb()
2527 tx_info->map_linear_data = 0; in ena_tx_map_skb()
2530 last_frag = skb_shinfo(skb)->nr_frags; in ena_tx_map_skb()
2533 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in ena_tx_map_skb()
2538 delta -= frag_len; in ena_tx_map_skb()
2542 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, in ena_tx_map_skb()
2543 frag_len - delta, DMA_TO_DEVICE); in ena_tx_map_skb()
2544 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2547 ena_buf->paddr = dma; in ena_tx_map_skb()
2548 ena_buf->len = frag_len - delta; in ena_tx_map_skb()
2550 tx_info->num_of_bufs++; in ena_tx_map_skb()
2557 ena_increase_stat(&tx_ring->tx_stats.dma_mapping_err, 1, in ena_tx_map_skb()
2558 &tx_ring->syncp); in ena_tx_map_skb()
2559 netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n"); in ena_tx_map_skb()
2561 tx_info->skb = NULL; in ena_tx_map_skb()
2563 tx_info->num_of_bufs += i; in ena_tx_map_skb()
2566 return -EINVAL; in ena_tx_map_skb()
2582 /* Determine which tx ring we will be placed on */ in ena_start_xmit()
2584 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
2593 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
2594 req_id = tx_ring->free_ids[next_to_use]; in ena_start_xmit()
2595 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
2596 tx_info->num_of_bufs = 0; in ena_start_xmit()
2598 WARN(tx_info->skb, "SKB isn't NULL req_id %d\n", req_id); in ena_start_xmit()
2605 ena_tx_ctx.ena_bufs = tx_info->bufs; in ena_start_xmit()
2607 ena_tx_ctx.num_bufs = tx_info->num_of_bufs; in ena_start_xmit()
2612 ena_tx_csum(&ena_tx_ctx, skb, tx_ring->disable_meta_caching); in ena_start_xmit()
2619 skb->len); in ena_start_xmit()
2623 netdev_tx_sent_queue(txq, skb->len); in ena_start_xmit()
2629 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2630 tx_ring->sgl_size + 2))) { in ena_start_xmit()
2635 ena_increase_stat(&tx_ring->tx_stats.queue_stop, 1, in ena_start_xmit()
2636 &tx_ring->syncp); in ena_start_xmit()
2648 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2651 ena_increase_stat(&tx_ring->tx_stats.queue_wakeup, 1, in ena_start_xmit()
2652 &tx_ring->syncp); in ena_start_xmit()
2666 tx_info->skb = NULL; in ena_start_xmit()
2677 /* we suspect that this is good for in--kernel network services that in ena_select_queue()
2678 * want to loop incoming skb rx to tx in normal user generated traffic, in ena_select_queue()
2691 struct device *dev = &pdev->dev; in ena_config_host_info()
2702 host_info = ena_dev->host_attr.host_info; in ena_config_host_info()
2704 host_info->bdf = pci_dev_id(pdev); in ena_config_host_info()
2705 host_info->os_type = ENA_ADMIN_OS_LINUX; in ena_config_host_info()
2706 host_info->kernel_ver = LINUX_VERSION_CODE; in ena_config_host_info()
2707 strscpy(host_info->kernel_ver_str, utsname()->version, in ena_config_host_info()
2708 sizeof(host_info->kernel_ver_str) - 1); in ena_config_host_info()
2709 host_info->os_dist = 0; in ena_config_host_info()
2710 strscpy(host_info->os_dist_str, utsname()->release, in ena_config_host_info()
2711 sizeof(host_info->os_dist_str)); in ena_config_host_info()
2712 host_info->driver_version = in ena_config_host_info()
2717 host_info->num_cpus = num_online_cpus(); in ena_config_host_info()
2719 host_info->driver_supported_features = in ena_config_host_info()
2728 if (rc == -EOPNOTSUPP) in ena_config_host_info()
2747 ss_count = ena_get_sset_count(adapter->netdev, ETH_SS_STATS); in ena_config_debug_area()
2749 netif_err(adapter, drv, adapter->netdev, in ena_config_debug_area()
2757 rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size); in ena_config_debug_area()
2759 netif_err(adapter, drv, adapter->netdev, in ena_config_debug_area()
2764 rc = ena_com_set_host_attributes(adapter->ena_dev); in ena_config_debug_area()
2766 if (rc == -EOPNOTSUPP) in ena_config_debug_area()
2767 netif_warn(adapter, drv, adapter->netdev, in ena_config_debug_area()
2770 netif_err(adapter, drv, adapter->netdev, in ena_config_debug_area()
2777 ena_com_delete_debug_area(adapter->ena_dev); in ena_config_debug_area()
2784 rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats); in ena_update_hw_stats()
2786 netdev_err(adapter->netdev, "Failed to get ENI stats\n"); in ena_update_hw_stats()
2804 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in ena_get_stats64()
2807 for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) { in ena_get_stats64()
2810 tx_ring = &adapter->tx_ring[i]; in ena_get_stats64()
2813 start = u64_stats_fetch_begin(&tx_ring->syncp); in ena_get_stats64()
2814 packets = tx_ring->tx_stats.cnt; in ena_get_stats64()
2815 bytes = tx_ring->tx_stats.bytes; in ena_get_stats64()
2816 } while (u64_stats_fetch_retry(&tx_ring->syncp, start)); in ena_get_stats64()
2818 stats->tx_packets += packets; in ena_get_stats64()
2819 stats->tx_bytes += bytes; in ena_get_stats64()
2825 rx_ring = &adapter->rx_ring[i]; in ena_get_stats64()
2828 start = u64_stats_fetch_begin(&rx_ring->syncp); in ena_get_stats64()
2829 packets = rx_ring->rx_stats.cnt; in ena_get_stats64()
2830 bytes = rx_ring->rx_stats.bytes; in ena_get_stats64()
2831 xdp_rx_drops = rx_ring->rx_stats.xdp_drop; in ena_get_stats64()
2832 } while (u64_stats_fetch_retry(&rx_ring->syncp, start)); in ena_get_stats64()
2834 stats->rx_packets += packets; in ena_get_stats64()
2835 stats->rx_bytes += bytes; in ena_get_stats64()
2840 start = u64_stats_fetch_begin(&adapter->syncp); in ena_get_stats64()
2841 rx_drops = adapter->dev_stats.rx_drops; in ena_get_stats64()
2842 tx_drops = adapter->dev_stats.tx_drops; in ena_get_stats64()
2843 } while (u64_stats_fetch_retry(&adapter->syncp, start)); in ena_get_stats64()
2845 stats->rx_dropped = rx_drops + total_xdp_rx_drops; in ena_get_stats64()
2846 stats->tx_dropped = tx_drops; in ena_get_stats64()
2848 stats->multicast = 0; in ena_get_stats64()
2849 stats->collisions = 0; in ena_get_stats64()
2851 stats->rx_length_errors = 0; in ena_get_stats64()
2852 stats->rx_crc_errors = 0; in ena_get_stats64()
2853 stats->rx_frame_errors = 0; in ena_get_stats64()
2854 stats->rx_fifo_errors = 0; in ena_get_stats64()
2855 stats->rx_missed_errors = 0; in ena_get_stats64()
2856 stats->tx_window_errors = 0; in ena_get_stats64()
2858 stats->rx_errors = 0; in ena_get_stats64()
2859 stats->tx_errors = 0; in ena_get_stats64()
2879 struct ena_admin_feature_llq_desc *llq = &get_feat_ctx->llq; in ena_calc_io_queue_size()
2880 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_calc_io_queue_size()
2889 if (adapter->tx_ring->ring_size) in ena_calc_io_queue_size()
2890 tx_queue_size = adapter->tx_ring->ring_size; in ena_calc_io_queue_size()
2892 if (adapter->rx_ring->ring_size) in ena_calc_io_queue_size()
2893 rx_queue_size = adapter->rx_ring->ring_size; in ena_calc_io_queue_size()
2895 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { in ena_calc_io_queue_size()
2897 &get_feat_ctx->max_queue_ext.max_queue_ext; in ena_calc_io_queue_size()
2898 max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, in ena_calc_io_queue_size()
2899 max_queue_ext->max_rx_sq_depth); in ena_calc_io_queue_size()
2900 max_tx_queue_size = max_queue_ext->max_tx_cq_depth; in ena_calc_io_queue_size()
2902 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_calc_io_queue_size()
2904 llq->max_llq_depth); in ena_calc_io_queue_size()
2907 max_queue_ext->max_tx_sq_depth); in ena_calc_io_queue_size()
2909 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2910 max_queue_ext->max_per_packet_tx_descs); in ena_calc_io_queue_size()
2911 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2912 max_queue_ext->max_per_packet_rx_descs); in ena_calc_io_queue_size()
2915 &get_feat_ctx->max_queues; in ena_calc_io_queue_size()
2916 max_rx_queue_size = min_t(u32, max_queues->max_cq_depth, in ena_calc_io_queue_size()
2917 max_queues->max_sq_depth); in ena_calc_io_queue_size()
2918 max_tx_queue_size = max_queues->max_cq_depth; in ena_calc_io_queue_size()
2920 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_calc_io_queue_size()
2922 llq->max_llq_depth); in ena_calc_io_queue_size()
2925 max_queues->max_sq_depth); in ena_calc_io_queue_size()
2927 adapter->max_tx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2928 max_queues->max_packet_tx_descs); in ena_calc_io_queue_size()
2929 adapter->max_rx_sgl_size = min_t(u16, ENA_PKT_MAX_BUFS, in ena_calc_io_queue_size()
2930 max_queues->max_packet_rx_descs); in ena_calc_io_queue_size()
2937 * the queue size by 2, leaving the amount of memory used by the queues unchanged. in ena_calc_io_queue_size()
2939 if (adapter->large_llq_header_enabled) { in ena_calc_io_queue_size()
2940 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && in ena_calc_io_queue_size()
2941 ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_calc_io_queue_size()
2943 dev_info(&adapter->pdev->dev, in ena_calc_io_queue_size()
2944 "Forcing large headers and decreasing maximum TX queue size to %d\n", in ena_calc_io_queue_size()
2947 dev_err(&adapter->pdev->dev, in ena_calc_io_queue_size()
2950 adapter->large_llq_header_enabled = false; in ena_calc_io_queue_size()
2962 adapter->max_tx_ring_size = max_tx_queue_size; in ena_calc_io_queue_size()
2963 adapter->max_rx_ring_size = max_rx_queue_size; in ena_calc_io_queue_size()
2964 adapter->requested_tx_ring_size = tx_queue_size; in ena_calc_io_queue_size()
2965 adapter->requested_rx_ring_size = rx_queue_size; in ena_calc_io_queue_size()
2971 struct net_device *netdev = adapter->netdev; in ena_device_validate_params()
2974 rc = ether_addr_equal(get_feat_ctx->dev_attr.mac_addr, in ena_device_validate_params()
2975 adapter->mac_addr); in ena_device_validate_params()
2979 return -EINVAL; in ena_device_validate_params()
2982 if (get_feat_ctx->dev_attr.max_mtu < netdev->mtu) { in ena_device_validate_params()
2985 return -EINVAL; in ena_device_validate_params()
2995 struct ena_com_dev *ena_dev = adapter->ena_dev; in set_default_llq_configurations()
2997 llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER; in set_default_llq_configurations()
2998 llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY; in set_default_llq_configurations()
2999 llq_config->llq_num_decs_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2; in set_default_llq_configurations()
3001 adapter->large_llq_header_supported = in set_default_llq_configurations()
3002 !!(ena_dev->supported_features & BIT(ENA_ADMIN_LLQ)); in set_default_llq_configurations()
3003 adapter->large_llq_header_supported &= in set_default_llq_configurations()
3004 !!(llq->entry_size_ctrl_supported & in set_default_llq_configurations()
3007 if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) && in set_default_llq_configurations()
3008 adapter->large_llq_header_enabled) { in set_default_llq_configurations()
3009 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B; in set_default_llq_configurations()
3010 llq_config->llq_ring_entry_size_value = 256; in set_default_llq_configurations()
3012 llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B; in set_default_llq_configurations()
3013 llq_config->llq_ring_entry_size_value = 128; in set_default_llq_configurations()
3026 if (!(ena_dev->supported_features & llq_feature_mask)) { in ena_set_queues_placement_policy()
3027 dev_warn(&pdev->dev, in ena_set_queues_placement_policy()
3029 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_set_queues_placement_policy()
3033 if (!ena_dev->mem_bar) { in ena_set_queues_placement_policy()
3034 netdev_err(ena_dev->net_device, in ena_set_queues_placement_policy()
3036 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_set_queues_placement_policy()
3042 dev_err(&pdev->dev, in ena_set_queues_placement_policy()
3044 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; in ena_set_queues_placement_policy()
3058 ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, in ena_map_llq_mem_bar()
3062 if (!ena_dev->mem_bar) in ena_map_llq_mem_bar()
3063 return -EFAULT; in ena_map_llq_mem_bar()
3072 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_device_init()
3074 struct device *dev = &pdev->dev; in ena_device_init()
3089 readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); in ena_device_init()
3126 * of queues. So the driver uses polling mode to retrieve this in ena_device_init()
3147 aenq_groups &= get_feat_ctx->aenq.supported_groups; in ena_device_init()
3157 set_default_llq_configurations(adapter, &llq_config, &get_feat_ctx->llq); in ena_device_init()
3159 rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq, in ena_device_init()
3181 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_enable_msix_and_set_admin_interrupts()
3182 struct device *dev = &adapter->pdev->dev; in ena_enable_msix_and_set_admin_interrupts()
3213 struct net_device *netdev = adapter->netdev; in ena_destroy_device()
3214 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_destroy_device()
3217 if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) in ena_destroy_device()
3222 del_timer_sync(&adapter->timer_service); in ena_destroy_device()
3224 dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); in ena_destroy_device()
3225 adapter->dev_up_before_reset = dev_up; in ena_destroy_device()
3229 if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in ena_destroy_device()
3235 if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) in ena_destroy_device()
3236 ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); in ena_destroy_device()
3251 adapter->reset_reason = ENA_REGS_RESET_NORMAL; in ena_destroy_device()
3253 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); in ena_destroy_device()
3254 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_destroy_device()
3260 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_restore_device()
3261 struct pci_dev *pdev = adapter->pdev; in ena_restore_device()
3266 set_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); in ena_restore_device()
3267 rc = ena_device_init(adapter, adapter->pdev, &get_feat_ctx, &wd_state); in ena_restore_device()
3269 dev_err(&pdev->dev, "Can not initialize device\n"); in ena_restore_device()
3272 adapter->wd_state = wd_state; in ena_restore_device()
3274 count = adapter->xdp_num_queues + adapter->num_io_queues; in ena_restore_device()
3276 txr = &adapter->tx_ring[i]; in ena_restore_device()
3277 txr->tx_mem_queue_type = ena_dev->tx_mem_queue_type; in ena_restore_device()
3278 txr->tx_max_header_size = ena_dev->tx_max_header_size; in ena_restore_device()
3283 dev_err(&pdev->dev, "Validation of device parameters failed\n"); in ena_restore_device()
3289 dev_err(&pdev->dev, "Enable MSI-X failed\n"); in ena_restore_device()
3293 if (adapter->dev_up_before_reset) { in ena_restore_device()
3296 dev_err(&pdev->dev, "Failed to create I/O queues\n"); in ena_restore_device()
3301 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_restore_device()
3303 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); in ena_restore_device()
3304 if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) in ena_restore_device()
3305 netif_carrier_on(adapter->netdev); in ena_restore_device()
3307 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); in ena_restore_device()
3308 adapter->last_keep_alive_jiffies = jiffies; in ena_restore_device()
3321 clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_restore_device()
3322 clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); in ena_restore_device()
3323 dev_err(&pdev->dev, in ena_restore_device()
3336 if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_fw_reset_device()
3340 dev_err(&adapter->pdev->dev, "Device reset completed successfully\n"); in ena_fw_reset_device()
3349 struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi); in check_for_rx_interrupt_queue()
3351 if (likely(READ_ONCE(ena_napi->first_interrupt))) in check_for_rx_interrupt_queue()
3354 if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) in check_for_rx_interrupt_queue()
3357 rx_ring->no_interrupt_event_cnt++; in check_for_rx_interrupt_queue()
3359 if (rx_ring->no_interrupt_event_cnt == ENA_MAX_NO_INTERRUPT_ITERATIONS) { in check_for_rx_interrupt_queue()
3360 netif_err(adapter, rx_err, adapter->netdev, in check_for_rx_interrupt_queue()
3362 rx_ring->qid); in check_for_rx_interrupt_queue()
3365 return -EIO; in check_for_rx_interrupt_queue()
3374 struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi); in check_missing_comp_in_tx_queue()
3383 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
3384 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
3385 last_jiffies = tx_buf->last_jiffies; in check_missing_comp_in_tx_queue()
3388 /* no pending Tx at this location */ in check_missing_comp_in_tx_queue()
3392 2 * adapter->missing_tx_completion_to); in check_missing_comp_in_tx_queue()
3394 if (unlikely(!READ_ONCE(ena_napi->first_interrupt) && is_tx_comp_time_expired)) { in check_missing_comp_in_tx_queue()
3398 netif_err(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3399 "Potential MSIX issue on Tx side Queue = %d. Reset the device\n", in check_missing_comp_in_tx_queue()
3400 tx_ring->qid); in check_missing_comp_in_tx_queue()
3402 return -EIO; in check_missing_comp_in_tx_queue()
3406 adapter->missing_tx_completion_to); in check_missing_comp_in_tx_queue()
3409 if (!tx_buf->print_once) { in check_missing_comp_in_tx_queue()
3410 time_since_last_napi = jiffies_to_usecs(jiffies - tx_ring->tx_stats.last_napi_jiffies); in check_missing_comp_in_tx_queue()
3411 missing_tx_comp_to = jiffies_to_msecs(adapter->missing_tx_completion_to); in check_missing_comp_in_tx_queue()
3412 netif_notice(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3413 …"Found a Tx that wasn't completed on time, qid %d, index %d. %u usecs have passed since last napi … in check_missing_comp_in_tx_queue()
3414 tx_ring->qid, i, time_since_last_napi, missing_tx_comp_to); in check_missing_comp_in_tx_queue()
3417 tx_buf->print_once = 1; in check_missing_comp_in_tx_queue()
3422 if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) { in check_missing_comp_in_tx_queue()
3423 netif_err(adapter, tx_err, adapter->netdev, in check_missing_comp_in_tx_queue()
3424 "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", in check_missing_comp_in_tx_queue()
3426 adapter->missing_tx_completion_threshold); in check_missing_comp_in_tx_queue()
3428 rc = -EIO; in check_missing_comp_in_tx_queue()
3431 ena_increase_stat(&tx_ring->tx_stats.missed_tx, missed_tx, in check_missing_comp_in_tx_queue()
3432 &tx_ring->syncp); in check_missing_comp_in_tx_queue()
3444 io_queue_count = adapter->xdp_num_queues + adapter->num_io_queues; in check_for_missing_completions()
3448 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in check_for_missing_completions()
3451 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) in check_for_missing_completions()
3454 if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT) in check_for_missing_completions()
3459 for (i = adapter->last_monitored_tx_qid; i < io_queue_count; i++) { in check_for_missing_completions()
3460 tx_ring = &adapter->tx_ring[i]; in check_for_missing_completions()
3461 rx_ring = &adapter->rx_ring[i]; in check_for_missing_completions()
3472 budget--; in check_for_missing_completions()
3477 adapter->last_monitored_tx_qid = i % io_queue_count; in check_for_missing_completions()
3494 * When such a situation is detected - Reschedule napi
3501 if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) in check_for_empty_rx_ring()
3504 if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) in check_for_empty_rx_ring()
3507 for (i = 0; i < adapter->num_io_queues; i++) { in check_for_empty_rx_ring()
3508 rx_ring = &adapter->rx_ring[i]; in check_for_empty_rx_ring()
3510 refill_required = ena_com_free_q_entries(rx_ring->ena_com_io_sq); in check_for_empty_rx_ring()
3511 if (unlikely(refill_required == (rx_ring->ring_size - 1))) { in check_for_empty_rx_ring()
3512 rx_ring->empty_rx_queue++; in check_for_empty_rx_ring()
3514 if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { in check_for_empty_rx_ring()
3515 ena_increase_stat(&rx_ring->rx_stats.empty_rx_ring, 1, in check_for_empty_rx_ring()
3516 &rx_ring->syncp); in check_for_empty_rx_ring()
3518 netif_err(adapter, drv, adapter->netdev, in check_for_empty_rx_ring()
3521 napi_schedule(rx_ring->napi); in check_for_empty_rx_ring()
3522 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3525 rx_ring->empty_rx_queue = 0; in check_for_empty_rx_ring()
3535 if (!adapter->wd_state) in check_for_missing_keep_alive()
3538 if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT) in check_for_missing_keep_alive()
3541 keep_alive_expired = adapter->last_keep_alive_jiffies + in check_for_missing_keep_alive()
3542 adapter->keep_alive_timeout; in check_for_missing_keep_alive()
3544 netif_err(adapter, drv, adapter->netdev, in check_for_missing_keep_alive()
3546 ena_increase_stat(&adapter->dev_stats.wd_expired, 1, in check_for_missing_keep_alive()
3547 &adapter->syncp); in check_for_missing_keep_alive()
3554 if (unlikely(!ena_com_get_admin_running_state(adapter->ena_dev))) { in check_for_admin_com_state()
3555 netif_err(adapter, drv, adapter->netdev, in check_for_admin_com_state()
3557 ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1, in check_for_admin_com_state()
3558 &adapter->syncp); in check_for_admin_com_state()
3566 struct net_device *netdev = adapter->netdev; in ena_update_hints()
3568 if (hints->admin_completion_tx_timeout) in ena_update_hints()
3569 adapter->ena_dev->admin_queue.completion_timeout = in ena_update_hints()
3570 hints->admin_completion_tx_timeout * 1000; in ena_update_hints()
3572 if (hints->mmio_read_timeout) in ena_update_hints()
3574 adapter->ena_dev->mmio_read.reg_read_to = in ena_update_hints()
3575 hints->mmio_read_timeout * 1000; in ena_update_hints()
3577 if (hints->missed_tx_completion_count_threshold_to_reset) in ena_update_hints()
3578 adapter->missing_tx_completion_threshold = in ena_update_hints()
3579 hints->missed_tx_completion_count_threshold_to_reset; in ena_update_hints()
3581 if (hints->missing_tx_completion_timeout) { in ena_update_hints()
3582 if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT) in ena_update_hints()
3583 adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT; in ena_update_hints()
3585 adapter->missing_tx_completion_to = in ena_update_hints()
3586 msecs_to_jiffies(hints->missing_tx_completion_timeout); in ena_update_hints()
3589 if (hints->netdev_wd_timeout) in ena_update_hints()
3590 netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout); in ena_update_hints()
3592 if (hints->driver_watchdog_timeout) { in ena_update_hints()
3593 if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT) in ena_update_hints()
3594 adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT; in ena_update_hints()
3596 adapter->keep_alive_timeout = in ena_update_hints()
3597 msecs_to_jiffies(hints->driver_watchdog_timeout); in ena_update_hints()
3604 host_info->supported_network_features[0] = in ena_update_host_info()
3605 netdev->features & GENMASK_ULL(31, 0); in ena_update_host_info()
3606 host_info->supported_network_features[1] = in ena_update_host_info()
3607 (netdev->features & GENMASK_ULL(63, 32)) >> 32; in ena_update_host_info()
3613 u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr; in ena_timer_service()
3615 adapter->ena_dev->host_attr.host_info; in ena_timer_service()
3629 ena_update_host_info(host_info, adapter->netdev); in ena_timer_service()
3631 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_timer_service()
3632 netif_err(adapter, drv, adapter->netdev, in ena_timer_service()
3635 queue_work(ena_wq, &adapter->reset_task); in ena_timer_service()
3640 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); in ena_timer_service()
3649 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { in ena_calc_max_io_queue_num()
3651 &get_feat_ctx->max_queue_ext.max_queue_ext; in ena_calc_max_io_queue_num()
3652 io_rx_num = min_t(u32, max_queue_ext->max_rx_sq_num, in ena_calc_max_io_queue_num()
3653 max_queue_ext->max_rx_cq_num); in ena_calc_max_io_queue_num()
3655 io_tx_sq_num = max_queue_ext->max_tx_sq_num; in ena_calc_max_io_queue_num()
3656 io_tx_cq_num = max_queue_ext->max_tx_cq_num; in ena_calc_max_io_queue_num()
3659 &get_feat_ctx->max_queues; in ena_calc_max_io_queue_num()
3660 io_tx_sq_num = max_queues->max_sq_num; in ena_calc_max_io_queue_num()
3661 io_tx_cq_num = max_queues->max_cq_num; in ena_calc_max_io_queue_num()
3665 /* In case of LLQ use the llq fields for the tx SQ/CQ */ in ena_calc_max_io_queue_num()
3666 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_calc_max_io_queue_num()
3667 io_tx_sq_num = get_feat_ctx->llq.max_llq_num; in ena_calc_max_io_queue_num()
3674 max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); in ena_calc_max_io_queue_num()
3685 if (feat->offload.tx & in ena_set_dev_offloads()
3689 if (feat->offload.tx & in ena_set_dev_offloads()
3693 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK) in ena_set_dev_offloads()
3696 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV6_MASK) in ena_set_dev_offloads()
3699 if (feat->offload.tx & ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_ECN_MASK) in ena_set_dev_offloads()
3702 if (feat->offload.rx_supported & in ena_set_dev_offloads()
3706 if (feat->offload.rx_supported & in ena_set_dev_offloads()
3710 netdev->features = in ena_set_dev_offloads()
3716 netdev->hw_features |= netdev->features; in ena_set_dev_offloads()
3717 netdev->vlan_features |= netdev->features; in ena_set_dev_offloads()
3723 struct net_device *netdev = adapter->netdev; in ena_set_conf_feat_params()
3726 if (!is_valid_ether_addr(feat->dev_attr.mac_addr)) { in ena_set_conf_feat_params()
3728 ether_addr_copy(adapter->mac_addr, netdev->dev_addr); in ena_set_conf_feat_params()
3730 ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr); in ena_set_conf_feat_params()
3731 eth_hw_addr_set(netdev, adapter->mac_addr); in ena_set_conf_feat_params()
3737 adapter->max_mtu = feat->dev_attr.max_mtu; in ena_set_conf_feat_params()
3738 netdev->max_mtu = adapter->max_mtu; in ena_set_conf_feat_params()
3739 netdev->min_mtu = ENA_MIN_MTU; in ena_set_conf_feat_params()
3744 struct ena_com_dev *ena_dev = adapter->ena_dev; in ena_rss_init_default()
3745 struct device *dev = &adapter->pdev->dev; in ena_rss_init_default()
3756 val = ethtool_rxfh_indir_default(i, adapter->num_io_queues); in ena_rss_init_default()
3767 if (unlikely(rc && (rc != -EOPNOTSUPP))) { in ena_rss_init_default()
3773 if (unlikely(rc && (rc != -EOPNOTSUPP))) { in ena_rss_init_default()
3794 /* ena_probe - Device Initialization Routine
3815 dev_dbg(&pdev->dev, "%s\n", __func__); in ena_probe()
3819 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); in ena_probe()
3823 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(ENA_MAX_PHYS_ADDR_SIZE_BITS)); in ena_probe()
3825 dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", rc); in ena_probe()
3833 rc = -ENOMEM; in ena_probe()
3840 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", in ena_probe()
3845 ena_dev->reg_bar = devm_ioremap(&pdev->dev, in ena_probe()
3848 if (!ena_dev->reg_bar) { in ena_probe()
3849 dev_err(&pdev->dev, "Failed to remap regs bar\n"); in ena_probe()
3850 rc = -EFAULT; in ena_probe()
3854 ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US; in ena_probe()
3856 ena_dev->dmadev = &pdev->dev; in ena_probe()
3860 dev_err(&pdev->dev, "alloc_etherdev_mq failed\n"); in ena_probe()
3861 rc = -ENOMEM; in ena_probe()
3865 SET_NETDEV_DEV(netdev, &pdev->dev); in ena_probe()
3867 adapter->ena_dev = ena_dev; in ena_probe()
3868 adapter->netdev = netdev; in ena_probe()
3869 adapter->pdev = pdev; in ena_probe()
3870 adapter->msg_enable = DEFAULT_MSG_ENABLE; in ena_probe()
3872 ena_dev->net_device = netdev; in ena_probe()
3878 dev_err(&pdev->dev, "ENA LLQ bar mapping failed\n"); in ena_probe()
3884 dev_err(&pdev->dev, "ENA device init failed\n"); in ena_probe()
3885 if (rc == -ETIME) in ena_probe()
3886 rc = -EPROBE_DEFER; in ena_probe()
3890 /* Initial TX and RX interrupt delay. Assumes 1 usec granularity. in ena_probe()
3893 ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; in ena_probe()
3894 ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; in ena_probe()
3895 ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; in ena_probe()
3898 rc = -EFAULT; in ena_probe()
3904 adapter->reset_reason = ENA_REGS_RESET_NORMAL; in ena_probe()
3906 adapter->num_io_queues = max_num_io_queues; in ena_probe()
3907 adapter->max_num_io_queues = max_num_io_queues; in ena_probe()
3908 adapter->last_monitored_tx_qid = 0; in ena_probe()
3910 adapter->xdp_first_ring = 0; in ena_probe()
3911 adapter->xdp_num_queues = 0; in ena_probe()
3913 adapter->rx_copybreak = ENA_DEFAULT_RX_COPYBREAK; in ena_probe()
3914 if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) in ena_probe()
3915 adapter->disable_meta_caching = in ena_probe()
3919 adapter->wd_state = wd_state; in ena_probe()
3921 snprintf(adapter->name, ENA_NAME_MAX_LEN, "ena_%d", adapters_found); in ena_probe()
3923 rc = ena_com_init_interrupt_moderation(adapter->ena_dev); in ena_probe()
3925 dev_err(&pdev->dev, in ena_probe()
3932 adapter->xdp_num_queues + in ena_probe()
3933 adapter->num_io_queues); in ena_probe()
3935 netdev->netdev_ops = &ena_netdev_ops; in ena_probe()
3936 netdev->watchdog_timeo = TX_TIMEOUT; in ena_probe()
3939 netdev->priv_flags |= IFF_UNICAST_FLT; in ena_probe()
3941 u64_stats_init(&adapter->syncp); in ena_probe()
3945 dev_err(&pdev->dev, in ena_probe()
3950 if (rc && (rc != -EOPNOTSUPP)) { in ena_probe()
3951 dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); in ena_probe()
3957 if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) in ena_probe()
3958 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | in ena_probe()
3961 memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); in ena_probe()
3967 dev_err(&pdev->dev, "Cannot register net device\n"); in ena_probe()
3971 INIT_WORK(&adapter->reset_task, ena_fw_reset_device); in ena_probe()
3973 adapter->last_keep_alive_jiffies = jiffies; in ena_probe()
3974 adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT; in ena_probe()
3975 adapter->missing_tx_completion_to = TX_TIMEOUT; in ena_probe()
3976 adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS; in ena_probe()
3980 timer_setup(&adapter->timer_service, ena_timer_service, 0); in ena_probe()
3981 mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); in ena_probe()
3983 dev_info(&pdev->dev, in ena_probe()
3986 netdev->dev_addr); in ena_probe()
3988 set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); in ena_probe()
4004 del_timer(&adapter->timer_service); in ena_probe()
4021 /* __ena_shutoff - Helper used in both PCI remove/shutdown routines
4035 ena_dev = adapter->ena_dev; in __ena_shutoff()
4036 netdev = adapter->netdev; in __ena_shutoff()
4039 if ((adapter->msix_vecs >= 1) && (netdev->rx_cpu_rmap)) { in __ena_shutoff()
4040 free_irq_cpu_rmap(netdev->rx_cpu_rmap); in __ena_shutoff()
4041 netdev->rx_cpu_rmap = NULL; in __ena_shutoff()
4048 del_timer_sync(&adapter->timer_service); in __ena_shutoff()
4049 cancel_work_sync(&adapter->reset_task); in __ena_shutoff()
4051 rtnl_lock(); /* lock released inside the below if-else block */ in __ena_shutoff()
4052 adapter->reset_reason = ENA_REGS_RESET_SHUTDOWN; in __ena_shutoff()
4078 /* ena_remove - Device Removal Routine
4090 /* ena_shutdown - Device Shutdown Routine
4102 /* ena_suspend - PM suspend callback
4110 ena_increase_stat(&adapter->dev_stats.suspend, 1, &adapter->syncp); in ena_suspend()
4113 if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { in ena_suspend()
4114 dev_err(&pdev->dev, in ena_suspend()
4116 clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); in ena_suspend()
4123 /* ena_resume - PM resume callback
4131 ena_increase_stat(&adapter->dev_stats.resume, 1, &adapter->syncp); in ena_resume()
4158 return -ENOMEM; in ena_init()
4190 int status = aenq_desc->flags & in ena_update_on_link_change()
4194 netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__); in ena_update_on_link_change()
4195 set_bit(ENA_FLAG_LINK_UP, &adapter->flags); in ena_update_on_link_change()
4196 if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags)) in ena_update_on_link_change()
4197 netif_carrier_on(adapter->netdev); in ena_update_on_link_change()
4199 clear_bit(ENA_FLAG_LINK_UP, &adapter->flags); in ena_update_on_link_change()
4200 netif_carrier_off(adapter->netdev); in ena_update_on_link_change()
4213 adapter->last_keep_alive_jiffies = jiffies; in ena_keep_alive_wd()
4215 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low; in ena_keep_alive_wd()
4216 tx_drops = ((u64)desc->tx_drops_high << 32) | desc->tx_drops_low; in ena_keep_alive_wd()
4218 u64_stats_update_begin(&adapter->syncp); in ena_keep_alive_wd()
4222 adapter->dev_stats.rx_drops = rx_drops; in ena_keep_alive_wd()
4223 adapter->dev_stats.tx_drops = tx_drops; in ena_keep_alive_wd()
4224 u64_stats_update_end(&adapter->syncp); in ena_keep_alive_wd()
4233 WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, in ena_notification()
4235 aenq_e->aenq_common_desc.group, in ena_notification()
4238 switch (aenq_e->aenq_common_desc.syndrome) { in ena_notification()
4241 (&aenq_e->inline_data_w4); in ena_notification()
4245 netif_err(adapter, drv, adapter->netdev, in ena_notification()
4247 aenq_e->aenq_common_desc.syndrome); in ena_notification()
4257 netif_err(adapter, drv, adapter->netdev, in unimplemented_aenq_handler()