Lines Matching +full:queue +full:- +full:rx

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
36 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
38 /* XSK buffer shall store at least Q-in-Q frame */
52 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
76 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_enable_irq()
82 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); in tsnep_disable_irq()
88 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); in tsnep_irq()
92 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); in tsnep_irq()
96 phy_mac_interrupt(adapter->netdev->phydev); in tsnep_irq()
98 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
99 if ((active & adapter->queue[0].irq_mask) != 0) { in tsnep_irq()
100 if (napi_schedule_prep(&adapter->queue[0].napi)) { in tsnep_irq()
101 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); in tsnep_irq()
103 __napi_schedule(&adapter->queue[0].napi); in tsnep_irq()
112 struct tsnep_queue *queue = arg; in tsnep_irq_txrx() local
114 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
115 if (napi_schedule_prep(&queue->napi)) { in tsnep_irq_txrx()
116 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_irq_txrx()
118 __napi_schedule(&queue->napi); in tsnep_irq_txrx()
124 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) in tsnep_set_irq_coalesce() argument
127 return -ERANGE; in tsnep_set_irq_coalesce()
133 queue->irq_delay &= ~ECM_INT_DELAY_MASK; in tsnep_set_irq_coalesce()
134 queue->irq_delay |= usecs; in tsnep_set_irq_coalesce()
135 iowrite8(queue->irq_delay, queue->irq_delay_addr); in tsnep_set_irq_coalesce()
140 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue) in tsnep_get_irq_coalesce() argument
144 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); in tsnep_get_irq_coalesce()
153 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_read()
158 if (!adapter->suppress_preamble) in tsnep_mdiobus_read()
162 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_read()
163 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_read()
174 struct tsnep_adapter *adapter = bus->priv; in tsnep_mdiobus_write()
179 if (!adapter->suppress_preamble) in tsnep_mdiobus_write()
184 iowrite32(md, adapter->addr + ECM_MD_CONTROL); in tsnep_mdiobus_write()
185 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, in tsnep_mdiobus_write()
197 switch (adapter->phydev->speed) { in tsnep_set_link_mode()
208 iowrite32(mode, adapter->addr + ECM_STATUS); in tsnep_set_link_mode()
214 struct phy_device *phydev = netdev->phydev; in tsnep_phy_link_status_change()
216 if (phydev->link) in tsnep_phy_link_status_change()
219 phy_print_status(netdev->phydev); in tsnep_phy_link_status_change()
226 retval = phy_loopback(adapter->phydev, enable); in tsnep_phy_loopback()
244 retval = phy_connect_direct(adapter->netdev, adapter->phydev, in tsnep_phy_open()
246 adapter->phy_mode); in tsnep_phy_open()
249 phydev = adapter->netdev->phydev; in tsnep_phy_open()
261 phy_ethtool_set_eee(adapter->phydev, &ethtool_eee); in tsnep_phy_open()
263 adapter->phydev->irq = PHY_MAC_INTERRUPT; in tsnep_phy_open()
264 phy_start(adapter->phydev); in tsnep_phy_open()
271 phy_stop(adapter->netdev->phydev); in tsnep_phy_close()
272 phy_disconnect(adapter->netdev->phydev); in tsnep_phy_close()
277 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
280 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
283 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
284 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
285 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
286 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
287 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
294 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
301 tx->page[i] = in tsnep_tx_ring_create()
302 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
304 if (!tx->page[i]) { in tsnep_tx_ring_create()
305 retval = -ENOMEM; in tsnep_tx_ring_create()
309 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
310 entry->desc_wb = (struct tsnep_tx_desc_wb *) in tsnep_tx_ring_create()
311 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
312 entry->desc = (struct tsnep_tx_desc *) in tsnep_tx_ring_create()
313 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_tx_ring_create()
314 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
315 entry->owner_user_flag = false; in tsnep_tx_ring_create()
319 entry = &tx->entry[i]; in tsnep_tx_ring_create()
320 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
321 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_tx_ring_create()
335 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
336 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
337 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
338 tx->write = 0; in tsnep_tx_init()
339 tx->read = 0; in tsnep_tx_init()
340 tx->owner_counter = 1; in tsnep_tx_init()
341 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
348 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
360 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
367 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
372 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
381 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
383 entry->properties = 0; in tsnep_tx_activate()
385 if (entry->skb) { in tsnep_tx_activate()
386 entry->properties = length & TSNEP_DESC_LENGTH_MASK; in tsnep_tx_activate()
387 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_tx_activate()
388 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_activate()
389 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) in tsnep_tx_activate()
390 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; in tsnep_tx_activate()
410 entry->owner_user_flag = !entry->owner_user_flag; in tsnep_tx_activate()
413 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; in tsnep_tx_activate()
414 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
415 tx->owner_counter++; in tsnep_tx_activate()
416 if (tx->owner_counter == 4) in tsnep_tx_activate()
417 tx->owner_counter = 1; in tsnep_tx_activate()
418 tx->increment_owner_counter--; in tsnep_tx_activate()
419 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
420 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
422 entry->properties |= in tsnep_tx_activate()
423 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
425 if (entry->owner_user_flag) in tsnep_tx_activate()
426 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; in tsnep_tx_activate()
427 entry->desc->more_properties = in tsnep_tx_activate()
428 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); in tsnep_tx_activate()
429 if (entry->type & TSNEP_TX_TYPE_INLINE) in tsnep_tx_activate()
430 entry->properties |= TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG; in tsnep_tx_activate()
437 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_tx_activate()
442 if (tx->read <= tx->write) in tsnep_tx_desc_available()
443 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
445 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
458 return -ENOMEM; in tsnep_tx_map_frag()
459 entry->type = TSNEP_TX_TYPE_SKB_FRAG_MAP_PAGE; in tsnep_tx_map_frag()
465 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
470 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
474 entry->type = TSNEP_TX_TYPE_SKB_FRAG_INLINE; in tsnep_tx_map_frag()
483 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
491 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
496 dma = dma_map_single(dmadev, skb->data, len, in tsnep_tx_map()
499 return -ENOMEM; in tsnep_tx_map()
500 entry->type = TSNEP_TX_TYPE_SKB_MAP; in tsnep_tx_map()
503 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
504 entry->type = TSNEP_TX_TYPE_SKB_INLINE; in tsnep_tx_map()
508 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; in tsnep_tx_map()
516 entry->len = len; in tsnep_tx_map()
519 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
530 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
536 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
538 if (entry->len) { in tsnep_tx_unmap()
539 if (entry->type & TSNEP_TX_TYPE_MAP) in tsnep_tx_unmap()
544 else if (entry->type & TSNEP_TX_TYPE_MAP_PAGE) in tsnep_tx_unmap()
549 map_len += entry->len; in tsnep_tx_unmap()
550 entry->len = 0; in tsnep_tx_unmap()
566 if (skb_shinfo(skb)->nr_frags > 0) in tsnep_xmit_frame_ring()
567 count += skb_shinfo(skb)->nr_frags; in tsnep_xmit_frame_ring()
570 /* ring full, shall not happen because queue is stopped if full in tsnep_xmit_frame_ring()
573 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
578 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
579 entry->skb = skb; in tsnep_xmit_frame_ring()
583 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
584 dev_kfree_skb_any(entry->skb); in tsnep_xmit_frame_ring()
585 entry->skb = NULL; in tsnep_xmit_frame_ring()
587 tx->dropped++; in tsnep_xmit_frame_ring()
593 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) in tsnep_xmit_frame_ring()
594 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in tsnep_xmit_frame_ring()
597 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
598 i == count - 1); in tsnep_xmit_frame_ring()
599 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
606 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
610 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
619 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
630 len = xdpf->len; in tsnep_xdp_tx_map()
632 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
635 xdpf->data; in tsnep_xdp_tx_map()
638 return -ENOMEM; in tsnep_xdp_tx_map()
640 entry->type = TSNEP_TX_TYPE_XDP_NDO_MAP_PAGE; in tsnep_xdp_tx_map()
643 virt_to_page(xdpf->data); in tsnep_xdp_tx_map()
648 dma += sizeof(*xdpf) + xdpf->headroom; in tsnep_xdp_tx_map()
652 entry->type = TSNEP_TX_TYPE_XDP_TX; in tsnep_xdp_tx_map()
655 entry->len = len; in tsnep_xdp_tx_map()
658 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
663 frag = &shinfo->frags[i]; in tsnep_xdp_tx_map()
681 count += shinfo->nr_frags; in tsnep_xdp_xmit_frame_ring()
684 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
690 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
691 entry->xdpf = xdpf; in tsnep_xdp_xmit_frame_ring()
695 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
696 entry->xdpf = NULL; in tsnep_xdp_xmit_frame_ring()
698 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
705 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
706 i == count - 1); in tsnep_xdp_xmit_frame_ring()
707 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
717 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
742 /* Avoid transmit queue timeout since we share it with the slow path */ in tsnep_xdp_xmit_back()
756 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
757 entry->zc = true; in tsnep_xdp_tx_map_zc()
759 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
760 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
762 entry->type = TSNEP_TX_TYPE_XSK; in tsnep_xdp_tx_map_zc()
763 entry->len = xdpd->len; in tsnep_xdp_tx_map_zc()
765 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
767 return xdpd->len; in tsnep_xdp_tx_map_zc()
777 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
778 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
784 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
788 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
793 desc_available -= MAX_SKB_FRAGS + 1; in tsnep_xdp_xmit_zc()
795 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
818 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
822 if (tx->read == tx->write) in tsnep_tx_poll()
825 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
826 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
828 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_poll()
837 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
838 skb_shinfo(entry->skb)->nr_frags > 0) in tsnep_tx_poll()
839 count += skb_shinfo(entry->skb)->nr_frags; in tsnep_tx_poll()
840 else if ((entry->type & TSNEP_TX_TYPE_XDP) && in tsnep_tx_poll()
841 xdp_frame_has_frags(entry->xdpf)) in tsnep_tx_poll()
842 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; in tsnep_tx_poll()
844 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
846 if ((entry->type & TSNEP_TX_TYPE_SKB) && in tsnep_tx_poll()
847 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && in tsnep_tx_poll()
848 (__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_poll()
853 if (skb_shinfo(entry->skb)->tx_flags & in tsnep_tx_poll()
856 __le64_to_cpu(entry->desc_wb->counter); in tsnep_tx_poll()
859 __le64_to_cpu(entry->desc_wb->timestamp); in tsnep_tx_poll()
864 skb_tstamp_tx(entry->skb, &hwtstamps); in tsnep_tx_poll()
867 if (entry->type & TSNEP_TX_TYPE_SKB) in tsnep_tx_poll()
868 napi_consume_skb(entry->skb, napi_budget); in tsnep_tx_poll()
869 else if (entry->type & TSNEP_TX_TYPE_XDP) in tsnep_tx_poll()
870 xdp_return_frame_rx_napi(entry->xdpf); in tsnep_tx_poll()
874 entry->skb = NULL; in tsnep_tx_poll()
876 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
878 tx->packets++; in tsnep_tx_poll()
879 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
881 budget--; in tsnep_tx_poll()
884 if (tx->xsk_pool) { in tsnep_tx_poll()
886 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
887 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
888 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
908 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
911 if (tx->read != tx->write) { in tsnep_tx_pending()
912 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
913 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_tx_pending()
915 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) in tsnep_tx_pending()
942 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
944 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
949 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
950 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
951 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
953 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
954 xsk_buff_free(entry->xdp); in tsnep_rx_ring_cleanup()
956 entry->page = NULL; in tsnep_rx_ring_cleanup()
959 if (rx->page_pool) in tsnep_rx_ring_cleanup()
960 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
962 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
965 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
966 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
967 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
968 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
969 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
974 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
976 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
984 rx->page[i] = in tsnep_rx_ring_create()
985 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
987 if (!rx->page[i]) { in tsnep_rx_ring_create()
988 retval = -ENOMEM; in tsnep_rx_ring_create()
992 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
993 entry->desc_wb = (struct tsnep_rx_desc_wb *) in tsnep_rx_ring_create()
994 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
995 entry->desc = (struct tsnep_rx_desc *) in tsnep_rx_ring_create()
996 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); in tsnep_rx_ring_create()
997 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
1009 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
1010 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
1011 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
1012 rx->page_pool = NULL; in tsnep_rx_ring_create()
1017 entry = &rx->entry[i]; in tsnep_rx_ring_create()
1018 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
1019 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); in tsnep_rx_ring_create()
1025 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
1029 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
1033 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
1034 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
1035 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
1036 rx->write = 0; in tsnep_rx_init()
1037 rx->read = 0; in tsnep_rx_init()
1038 rx->owner_counter = 1; in tsnep_rx_init()
1039 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
1042 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
1047 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
1050 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1054 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1055 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1060 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1062 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1063 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1065 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1068 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1075 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1077 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1083 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1090 for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { in tsnep_rx_alloc_page_buffer()
1091 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1092 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1093 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1095 return -ENOMEM; in tsnep_rx_alloc_page_buffer()
1102 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1105 entry->page = page; in tsnep_rx_set_page()
1106 entry->len = TSNEP_MAX_RX_BUF_SIZE; in tsnep_rx_set_page()
1107 entry->dma = page_pool_get_dma_addr(entry->page); in tsnep_rx_set_page()
1108 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1111 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1113 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1116 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1118 return -ENOMEM; in tsnep_rx_alloc_buffer()
1119 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1124 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1126 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1127 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1129 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1130 read->page = NULL; in tsnep_rx_reuse_buffer()
1133 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1135 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1138 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; in tsnep_rx_activate()
1139 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; in tsnep_rx_activate()
1140 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1141 rx->owner_counter++; in tsnep_rx_activate()
1142 if (rx->owner_counter == 4) in tsnep_rx_activate()
1143 rx->owner_counter = 1; in tsnep_rx_activate()
1144 rx->increment_owner_counter--; in tsnep_rx_activate()
1145 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1146 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1148 entry->properties |= in tsnep_rx_activate()
1149 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1157 entry->desc->properties = __cpu_to_le32(entry->properties); in tsnep_rx_activate()
1160 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1166 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1168 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1169 rx->alloc_failed++; in tsnep_rx_alloc()
1174 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1179 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1183 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1188 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1192 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1194 tsnep_rx_enable(rx); in tsnep_rx_refill()
1199 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1202 entry->xdp = xdp; in tsnep_rx_set_xdp()
1203 entry->len = TSNEP_XSK_RX_BUF_SIZE; in tsnep_rx_set_xdp()
1204 entry->dma = xsk_buff_xdp_get_dma(entry->xdp); in tsnep_rx_set_xdp()
1205 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1208 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1210 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1211 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1213 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1214 read->xdp = NULL; in tsnep_rx_reuse_buffer_zc()
1217 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1222 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1224 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1225 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1227 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1228 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1231 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1234 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1235 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1240 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1245 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1250 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1252 if (entry->xdp) in tsnep_rx_free_zc()
1253 xsk_buff_free(entry->xdp); in tsnep_rx_free_zc()
1254 entry->xdp = NULL; in tsnep_rx_free_zc()
1258 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1262 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1264 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1269 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1277 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; in tsnep_xdp_run_prog()
1284 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1289 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1294 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1298 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1304 sync = xdp->data_end - xdp->data_hard_start - in tsnep_xdp_run_prog()
1307 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1313 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1322 /* XDP_REDIRECT is the main action for zero-copy */ in tsnep_xdp_run_prog_zc()
1324 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1334 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1339 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1343 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1364 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1375 __skb_put(skb, length - ETH_FCS_LEN); in tsnep_build_skb()
1377 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1383 skb_shinfo(skb)->tx_flags |= in tsnep_build_skb()
1386 hwtstamps->netdev_data = rx_inline; in tsnep_build_skb()
1389 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1390 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1395 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1400 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1404 rx->packets++; in tsnep_rx_page()
1405 rx->bytes += length; in tsnep_rx_page()
1406 if (skb->pkt_type == PACKET_MULTICAST) in tsnep_rx_page()
1407 rx->multicast++; in tsnep_rx_page()
1411 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1413 rx->dropped++; in tsnep_rx_page()
1417 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1420 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1432 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1433 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1434 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1436 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1437 rx->tx_queue_index); in tsnep_rx_poll()
1438 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1440 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1443 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1444 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1445 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1447 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll()
1454 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1456 if (!entry->page) { in tsnep_rx_poll()
1458 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1459 * RX processing in tsnep_rx_poll()
1461 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1464 rx->dropped++; in tsnep_rx_poll()
1475 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); in tsnep_rx_poll()
1476 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll()
1478 dma_sync_single_range_for_cpu(dmadev, entry->dma, in tsnep_rx_poll()
1481 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1483 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1486 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll()
1488 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1494 xdp_prepare_buff(&xdp, page_address(entry->page), in tsnep_rx_poll()
1496 length - ETH_FCS_LEN, false); in tsnep_rx_poll()
1498 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1501 rx->packets++; in tsnep_rx_poll()
1502 rx->bytes += length; in tsnep_rx_poll()
1504 entry->page = NULL; in tsnep_rx_poll()
1510 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1511 entry->page = NULL; in tsnep_rx_poll()
1515 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1518 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1523 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1536 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1537 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1539 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1540 rx->tx_queue_index); in tsnep_rx_poll_zc()
1541 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1544 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1545 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1546 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1548 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_poll_zc()
1555 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1557 if (!entry->xdp) { in tsnep_rx_poll_zc()
1559 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1560 * RX processing in tsnep_rx_poll_zc()
1562 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1565 rx->dropped++; in tsnep_rx_poll_zc()
1576 prefetch(entry->xdp->data); in tsnep_rx_poll_zc()
1577 length = __le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_poll_zc()
1579 xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN); in tsnep_rx_poll_zc()
1580 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); in tsnep_rx_poll_zc()
1582 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1584 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1587 length -= TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1589 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1595 entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1596 entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; in tsnep_rx_poll_zc()
1598 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1601 rx->packets++; in tsnep_rx_poll_zc()
1602 rx->bytes += length; in tsnep_rx_poll_zc()
1604 entry->xdp = NULL; in tsnep_rx_poll_zc()
1610 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1613 entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, in tsnep_rx_poll_zc()
1615 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1617 rx->dropped++; in tsnep_rx_poll_zc()
1619 xsk_buff_free(entry->xdp); in tsnep_rx_poll_zc()
1620 entry->xdp = NULL; in tsnep_rx_poll_zc()
1624 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1627 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1629 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1631 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1633 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1641 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1645 if (rx->read != rx->write) { in tsnep_rx_pending()
1646 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1647 if ((__le32_to_cpu(entry->desc_wb->properties) & in tsnep_rx_pending()
1649 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) in tsnep_rx_pending()
1656 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1661 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1665 tsnep_rx_init(rx); in tsnep_rx_open()
1667 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1668 if (rx->xsk_pool) in tsnep_rx_open()
1669 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1671 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1673 retval = -ENOMEM; in tsnep_rx_open()
1681 if (rx->xsk_pool) { in tsnep_rx_open()
1682 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1690 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1694 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1696 if (rx->xsk_pool) in tsnep_rx_close()
1697 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1699 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1702 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1704 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1707 tsnep_rx_init(rx); in tsnep_rx_reopen()
1710 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1715 entry->desc->properties = 0; in tsnep_rx_reopen()
1716 entry->desc_wb->properties = 0; in tsnep_rx_reopen()
1720 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1721 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1722 rx->write++; in tsnep_rx_reopen()
1730 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1732 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1736 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1742 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1743 TSNEP_RING_SIZE - 1); in tsnep_rx_reopen_xsk()
1746 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1751 if (entry->page) { in tsnep_rx_reopen_xsk()
1752 *page = entry->page; in tsnep_rx_reopen_xsk()
1753 entry->page = NULL; in tsnep_rx_reopen_xsk()
1761 entry->desc->properties = 0; in tsnep_rx_reopen_xsk()
1762 entry->desc_wb->properties = 0; in tsnep_rx_reopen_xsk()
1765 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1766 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1767 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1768 rx->write++; in tsnep_rx_reopen_xsk()
1770 allocated--; in tsnep_rx_reopen_xsk()
1778 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_reopen_xsk()
1779 int desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_reopen_xsk()
1782 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1784 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1788 static bool tsnep_pending(struct tsnep_queue *queue) in tsnep_pending() argument
1790 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1793 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1801 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, in tsnep_poll() local
1806 if (queue->tx) in tsnep_poll()
1807 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1813 if (queue->rx) { in tsnep_poll()
1814 done = queue->rx->xsk_pool ? in tsnep_poll()
1815 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1816 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1826 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1832 if (tsnep_pending(queue)) { in tsnep_poll()
1833 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_poll()
1838 return min(done, budget - 1); in tsnep_poll()
1841 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) in tsnep_request_irq() argument
1843 const char *name = netdev_name(queue->adapter->netdev); in tsnep_request_irq()
1849 sprintf(queue->name, "%s-mac", name); in tsnep_request_irq()
1851 dev = queue->adapter; in tsnep_request_irq()
1853 if (queue->tx && queue->rx) in tsnep_request_irq()
1854 snprintf(queue->name, sizeof(queue->name), "%s-txrx-%d", in tsnep_request_irq()
1855 name, queue->rx->queue_index); in tsnep_request_irq()
1856 else if (queue->tx) in tsnep_request_irq()
1857 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", in tsnep_request_irq()
1858 name, queue->tx->queue_index); in tsnep_request_irq()
1860 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", in tsnep_request_irq()
1861 name, queue->rx->queue_index); in tsnep_request_irq()
1863 dev = queue; in tsnep_request_irq()
1866 retval = request_irq(queue->irq, handler, 0, queue->name, dev); in tsnep_request_irq()
1869 memset(queue->name, 0, sizeof(queue->name)); in tsnep_request_irq()
1875 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) in tsnep_free_irq() argument
1879 if (!strlen(queue->name)) in tsnep_free_irq()
1883 dev = queue->adapter; in tsnep_free_irq()
1885 dev = queue; in tsnep_free_irq()
1887 free_irq(queue->irq, dev); in tsnep_free_irq()
1888 memset(queue->name, 0, sizeof(queue->name)); in tsnep_free_irq()
1891 static void tsnep_queue_close(struct tsnep_queue *queue, bool first) in tsnep_queue_close() argument
1893 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1895 tsnep_free_irq(queue, first); in tsnep_queue_close()
1897 if (rx) { in tsnep_queue_close()
1898 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1899 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1900 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1901 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1904 netif_napi_del(&queue->napi); in tsnep_queue_close()
1908 struct tsnep_queue *queue, bool first) in tsnep_queue_open() argument
1910 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1911 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open()
1914 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); in tsnep_queue_open()
1916 if (rx) { in tsnep_queue_open()
1917 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1919 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1920 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1921 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1923 rx->tx_queue_index = 0; in tsnep_queue_open()
1929 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1930 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1933 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1935 rx->page_pool); in tsnep_queue_open()
1938 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1939 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1942 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1947 if (rx->xsk_pool) in tsnep_queue_open()
1948 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1951 retval = tsnep_request_irq(queue, first); in tsnep_queue_open()
1953 netif_err(adapter, drv, adapter->netdev, in tsnep_queue_open()
1954 "can't get assigned irq %d.\n", queue->irq); in tsnep_queue_open()
1961 tsnep_queue_close(queue, first); in tsnep_queue_open()
1966 static void tsnep_queue_enable(struct tsnep_queue *queue) in tsnep_queue_enable() argument
1968 napi_enable(&queue->napi); in tsnep_queue_enable()
1969 tsnep_enable_irq(queue->adapter, queue->irq_mask); in tsnep_queue_enable()
1971 if (queue->tx) in tsnep_queue_enable()
1972 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1974 if (queue->rx) in tsnep_queue_enable()
1975 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1978 static void tsnep_queue_disable(struct tsnep_queue *queue) in tsnep_queue_disable() argument
1980 if (queue->tx) in tsnep_queue_disable()
1981 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
1983 napi_disable(&queue->napi); in tsnep_queue_disable()
1984 tsnep_disable_irq(queue->adapter, queue->irq_mask); in tsnep_queue_disable()
1986 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
1989 if (queue->rx) in tsnep_queue_disable()
1990 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
1998 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
1999 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2000 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2004 if (adapter->queue[i].rx) { in tsnep_netdev_open()
2005 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
2010 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); in tsnep_netdev_open()
2015 retval = netif_set_real_num_tx_queues(adapter->netdev, in tsnep_netdev_open()
2016 adapter->num_tx_queues); in tsnep_netdev_open()
2019 retval = netif_set_real_num_rx_queues(adapter->netdev, in tsnep_netdev_open()
2020 adapter->num_rx_queues); in tsnep_netdev_open()
2029 for (i = 0; i < adapter->num_queues; i++) in tsnep_netdev_open()
2030 tsnep_queue_enable(&adapter->queue[i]); in tsnep_netdev_open()
2037 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_open()
2038 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_open()
2040 if (adapter->queue[i].rx) in tsnep_netdev_open()
2041 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2042 if (adapter->queue[i].tx) in tsnep_netdev_open()
2043 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2056 for (i = 0; i < adapter->num_queues; i++) { in tsnep_netdev_close()
2057 tsnep_queue_disable(&adapter->queue[i]); in tsnep_netdev_close()
2059 tsnep_queue_close(&adapter->queue[i], i == 0); in tsnep_netdev_close()
2061 if (adapter->queue[i].rx) in tsnep_netdev_close()
2062 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2063 if (adapter->queue[i].tx) in tsnep_netdev_close()
2064 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2070 int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool) in tsnep_enable_xsk() argument
2072 bool running = netif_running(queue->adapter->netdev); in tsnep_enable_xsk()
2077 return -EOPNOTSUPP; in tsnep_enable_xsk()
2079 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2080 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2082 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2083 return -ENOMEM; in tsnep_enable_xsk()
2084 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2085 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2087 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2088 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2089 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2091 return -ENOMEM; in tsnep_enable_xsk()
2094 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2097 tsnep_queue_disable(queue); in tsnep_enable_xsk()
2099 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2100 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2103 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2104 tsnep_queue_enable(queue); in tsnep_enable_xsk()
2110 void tsnep_disable_xsk(struct tsnep_queue *queue) in tsnep_disable_xsk() argument
2112 bool running = netif_running(queue->adapter->netdev); in tsnep_disable_xsk()
2115 tsnep_queue_disable(queue); in tsnep_disable_xsk()
2117 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2119 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2120 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2123 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2124 tsnep_queue_enable(queue); in tsnep_disable_xsk()
2127 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2128 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2129 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2130 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2139 if (queue_mapping >= adapter->num_tx_queues) in tsnep_netdev_xmit_frame()
2142 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2149 return -EINVAL; in tsnep_netdev_ioctl()
2152 return phy_mii_ioctl(netdev->phydev, ifr, cmd); in tsnep_netdev_ioctl()
2162 if (netdev->flags & IFF_PROMISC) { in tsnep_netdev_set_multicast()
2165 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { in tsnep_netdev_set_multicast()
2168 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); in tsnep_netdev_set_multicast()
2179 for (i = 0; i < adapter->num_tx_queues; i++) { in tsnep_netdev_get_stats64()
2180 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2181 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2182 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2184 for (i = 0; i < adapter->num_rx_queues; i++) { in tsnep_netdev_get_stats64()
2185 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2186 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2187 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2188 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2190 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + in tsnep_netdev_get_stats64()
2194 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2197 stats->rx_dropped += val; in tsnep_netdev_get_stats64()
2200 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2201 stats->rx_fifo_errors += val; in tsnep_netdev_get_stats64()
2204 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2205 stats->rx_frame_errors += val; in tsnep_netdev_get_stats64()
2208 reg = ioread32(adapter->addr + ECM_STAT); in tsnep_netdev_get_stats64()
2210 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2212 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2213 stats->rx_crc_errors += val; in tsnep_netdev_get_stats64()
2215 stats->rx_errors += val; in tsnep_netdev_get_stats64()
2220 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_set_address()
2222 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_set_address()
2224 ether_addr_copy(adapter->mac_address, addr); in tsnep_mac_set_address()
2225 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", in tsnep_mac_set_address()
2238 eth_hw_addr_set(netdev, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2239 tsnep_mac_set_address(adapter, sock_addr->sa_data); in tsnep_netdev_set_mac_address()
2248 netdev_features_t changed = netdev->features ^ features; in tsnep_netdev_set_features()
2264 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; in tsnep_netdev_get_tstamp()
2268 timestamp = __le64_to_cpu(rx_inline->counter); in tsnep_netdev_get_tstamp()
2270 timestamp = __le64_to_cpu(rx_inline->timestamp); in tsnep_netdev_get_tstamp()
2279 switch (bpf->command) { in tsnep_netdev_bpf()
2281 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); in tsnep_netdev_bpf()
2283 return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, in tsnep_netdev_bpf()
2284 bpf->xsk.queue_id); in tsnep_netdev_bpf()
2286 return -EOPNOTSUPP; in tsnep_netdev_bpf()
2293 cpu &= TSNEP_MAX_QUEUES - 1; in tsnep_xdp_get_tx()
2295 while (cpu >= adapter->num_tx_queues) in tsnep_xdp_get_tx()
2296 cpu -= adapter->num_tx_queues; in tsnep_xdp_get_tx()
2298 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2312 return -EINVAL; in tsnep_netdev_xdp_xmit()
2315 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2325 /* avoid transmit queue timeout since we share it with the slow in tsnep_netdev_xdp_xmit()
2343 struct tsnep_queue *queue; in tsnep_netdev_xsk_wakeup() local
2345 if (queue_id >= adapter->num_rx_queues || in tsnep_netdev_xsk_wakeup()
2346 queue_id >= adapter->num_tx_queues) in tsnep_netdev_xsk_wakeup()
2347 return -EINVAL; in tsnep_netdev_xsk_wakeup()
2349 queue = &adapter->queue[queue_id]; in tsnep_netdev_xsk_wakeup()
2351 if (!napi_if_scheduled_mark_missed(&queue->napi)) in tsnep_netdev_xsk_wakeup()
2352 napi_schedule(&queue->napi); in tsnep_netdev_xsk_wakeup()
2377 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2380 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); in tsnep_mac_init()
2383 * - device tree in tsnep_mac_init()
2384 * - valid MAC address already set in tsnep_mac_init()
2385 * - MAC address register if valid in tsnep_mac_init()
2386 * - random MAC address in tsnep_mac_init()
2388 retval = of_get_mac_address(adapter->pdev->dev.of_node, in tsnep_mac_init()
2389 adapter->mac_address); in tsnep_mac_init()
2390 if (retval == -EPROBE_DEFER) in tsnep_mac_init()
2392 if (retval && !is_valid_ether_addr(adapter->mac_address)) { in tsnep_mac_init()
2393 *(u32 *)adapter->mac_address = in tsnep_mac_init()
2394 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); in tsnep_mac_init()
2395 *(u16 *)(adapter->mac_address + sizeof(u32)) = in tsnep_mac_init()
2396 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); in tsnep_mac_init()
2397 if (!is_valid_ether_addr(adapter->mac_address)) in tsnep_mac_init()
2398 eth_random_addr(adapter->mac_address); in tsnep_mac_init()
2401 tsnep_mac_set_address(adapter, adapter->mac_address); in tsnep_mac_init()
2402 eth_hw_addr_set(adapter->netdev, adapter->mac_address); in tsnep_mac_init()
2409 struct device_node *np = adapter->pdev->dev.of_node; in tsnep_mdio_init()
2417 adapter->suppress_preamble = in tsnep_mdio_init()
2418 of_property_read_bool(np, "suppress-preamble"); in tsnep_mdio_init()
2421 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); in tsnep_mdio_init()
2422 if (!adapter->mdiobus) { in tsnep_mdio_init()
2423 retval = -ENOMEM; in tsnep_mdio_init()
2428 adapter->mdiobus->priv = (void *)adapter; in tsnep_mdio_init()
2429 adapter->mdiobus->parent = &adapter->pdev->dev; in tsnep_mdio_init()
2430 adapter->mdiobus->read = tsnep_mdiobus_read; in tsnep_mdio_init()
2431 adapter->mdiobus->write = tsnep_mdiobus_write; in tsnep_mdio_init()
2432 adapter->mdiobus->name = TSNEP "-mdiobus"; in tsnep_mdio_init()
2433 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", in tsnep_mdio_init()
2434 adapter->pdev->name); in tsnep_mdio_init()
2437 adapter->mdiobus->phy_mask = 0x0000001; in tsnep_mdio_init()
2439 retval = of_mdiobus_register(adapter->mdiobus, np); in tsnep_mdio_init()
2452 retval = of_get_phy_mode(adapter->pdev->dev.of_node, in tsnep_phy_init()
2453 &adapter->phy_mode); in tsnep_phy_init()
2455 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; in tsnep_phy_init()
2457 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", in tsnep_phy_init()
2459 adapter->phydev = of_phy_find_device(phy_node); in tsnep_phy_init()
2461 if (!adapter->phydev && adapter->mdiobus) in tsnep_phy_init()
2462 adapter->phydev = phy_find_first(adapter->mdiobus); in tsnep_phy_init()
2463 if (!adapter->phydev) in tsnep_phy_init()
2464 return -EIO; in tsnep_phy_init()
2476 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2477 if (platform_irq_count(adapter->pdev) == 1) in tsnep_queue_init()
2478 retval = platform_get_irq(adapter->pdev, 0); in tsnep_queue_init()
2480 retval = platform_get_irq_byname(adapter->pdev, "mac"); in tsnep_queue_init()
2483 adapter->num_tx_queues = 1; in tsnep_queue_init()
2484 adapter->num_rx_queues = 1; in tsnep_queue_init()
2485 adapter->num_queues = 1; in tsnep_queue_init()
2486 adapter->queue[0].adapter = adapter; in tsnep_queue_init()
2487 adapter->queue[0].irq = retval; in tsnep_queue_init()
2488 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2489 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2490 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2491 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2492 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2493 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2494 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2495 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2496 adapter->queue[0].irq_mask = irq_mask; in tsnep_queue_init()
2497 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; in tsnep_queue_init()
2498 retval = tsnep_set_irq_coalesce(&adapter->queue[0], in tsnep_queue_init()
2503 adapter->netdev->irq = adapter->queue[0].irq; in tsnep_queue_init()
2505 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2509 sprintf(name, "txrx-%d", i); in tsnep_queue_init()
2510 retval = platform_get_irq_byname_optional(adapter->pdev, name); in tsnep_queue_init()
2514 adapter->num_tx_queues++; in tsnep_queue_init()
2515 adapter->num_rx_queues++; in tsnep_queue_init()
2516 adapter->num_queues++; in tsnep_queue_init()
2517 adapter->queue[i].adapter = adapter; in tsnep_queue_init()
2518 adapter->queue[i].irq = retval; in tsnep_queue_init()
2519 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2520 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2521 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2522 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()
2523 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2524 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2525 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2526 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()
2527 adapter->queue[i].irq_mask = in tsnep_queue_init()
2529 adapter->queue[i].irq_delay_addr = in tsnep_queue_init()
2530 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; in tsnep_queue_init()
2531 retval = tsnep_set_irq_coalesce(&adapter->queue[i], in tsnep_queue_init()
2551 netdev = devm_alloc_etherdev_mqs(&pdev->dev, in tsnep_probe()
2555 return -ENODEV; in tsnep_probe()
2556 SET_NETDEV_DEV(netdev, &pdev->dev); in tsnep_probe()
2559 adapter->pdev = pdev; in tsnep_probe()
2560 adapter->dmadev = &pdev->dev; in tsnep_probe()
2561 adapter->netdev = netdev; in tsnep_probe()
2562 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | in tsnep_probe()
2566 netdev->min_mtu = ETH_MIN_MTU; in tsnep_probe()
2567 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; in tsnep_probe()
2569 mutex_init(&adapter->gate_control_lock); in tsnep_probe()
2570 mutex_init(&adapter->rxnfc_lock); in tsnep_probe()
2571 INIT_LIST_HEAD(&adapter->rxnfc_rules); in tsnep_probe()
2574 adapter->addr = devm_ioremap_resource(&pdev->dev, io); in tsnep_probe()
2575 if (IS_ERR(adapter->addr)) in tsnep_probe()
2576 return PTR_ERR(adapter->addr); in tsnep_probe()
2577 netdev->mem_start = io->start; in tsnep_probe()
2578 netdev->mem_end = io->end; in tsnep_probe()
2580 type = ioread32(adapter->addr + ECM_TYPE); in tsnep_probe()
2584 adapter->gate_control = type & ECM_GATE_CONTROL; in tsnep_probe()
2585 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; in tsnep_probe()
2593 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, in tsnep_probe()
2596 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); in tsnep_probe()
2624 netdev->netdev_ops = &tsnep_netdev_ops; in tsnep_probe()
2625 netdev->ethtool_ops = &tsnep_ethtool_ops; in tsnep_probe()
2626 netdev->features = NETIF_F_SG; in tsnep_probe()
2627 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; in tsnep_probe()
2629 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in tsnep_probe()
2641 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, in tsnep_probe()
2643 if (adapter->gate_control) in tsnep_probe()
2644 dev_info(&adapter->pdev->dev, "gate control detected\n"); in tsnep_probe()
2656 if (adapter->mdiobus) in tsnep_probe()
2657 mdiobus_unregister(adapter->mdiobus); in tsnep_probe()
2666 unregister_netdev(adapter->netdev); in tsnep_remove()
2674 if (adapter->mdiobus) in tsnep_remove()
2675 mdiobus_unregister(adapter->mdiobus); in tsnep_remove()
2696 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");