Lines Matching full:tx
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
98 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
114 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
275 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) in tsnep_tx_ring_cleanup() argument
277 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
280 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
283 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
284 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
285 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
286 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
287 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
292 static int tsnep_tx_ring_create(struct tsnep_tx *tx) in tsnep_tx_ring_create() argument
294 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_create()
301 tx->page[i] = in tsnep_tx_ring_create()
302 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_create()
304 if (!tx->page[i]) { in tsnep_tx_ring_create()
309 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_create()
311 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_create()
314 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_create()
319 entry = &tx->entry[i]; in tsnep_tx_ring_create()
320 next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_tx_ring_create()
327 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_create()
331 static void tsnep_tx_init(struct tsnep_tx *tx) in tsnep_tx_init() argument
335 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_init()
336 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_init()
337 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_init()
338 tx->write = 0; in tsnep_tx_init()
339 tx->read = 0; in tsnep_tx_init()
340 tx->owner_counter = 1; in tsnep_tx_init()
341 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_init()
344 static void tsnep_tx_enable(struct tsnep_tx *tx) in tsnep_tx_enable() argument
348 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_enable()
355 static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) in tsnep_tx_disable() argument
360 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_disable()
366 /* wait until TX is done in hardware */ in tsnep_tx_disable()
367 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_disable()
371 /* wait until TX is also done in software */ in tsnep_tx_disable()
372 while (READ_ONCE(tx->read) != tx->write) { in tsnep_tx_disable()
378 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
381 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
414 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
415 tx->owner_counter++; in tsnep_tx_activate()
416 if (tx->owner_counter == 4) in tsnep_tx_activate()
417 tx->owner_counter = 1; in tsnep_tx_activate()
418 tx->increment_owner_counter--; in tsnep_tx_activate()
419 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
420 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
423 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
440 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
442 if (tx->read <= tx->write) in tsnep_tx_desc_available()
443 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
445 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
465 memcpy(&entry->desc->tx, fragdata, len); in tsnep_tx_map_frag()
470 memcpy(&entry->desc->tx, fragdata + skb_frag_off(frag), in tsnep_tx_map_frag()
481 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
483 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
491 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_tx_map()
503 memcpy(&entry->desc->tx, skb->data, len); in tsnep_tx_map()
519 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
528 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
530 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
536 entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; in tsnep_tx_unmap()
558 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
569 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
573 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
578 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
581 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
583 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
587 tx->dropped++; in tsnep_xmit_frame_ring()
597 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xmit_frame_ring()
599 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xmit_frame_ring()
606 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
608 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
610 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
616 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
619 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
632 entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; in tsnep_xdp_tx_map()
658 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
673 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
683 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_frame_ring()
684 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_frame_ring()
687 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
690 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
693 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
695 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
698 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
705 tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, in tsnep_xdp_xmit_frame_ring()
707 tx->write = (tx->write + count) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring()
715 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
717 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
722 struct netdev_queue *tx_nq, struct tsnep_tx *tx, in tsnep_xdp_xmit_back() argument
740 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, type); in tsnep_xdp_xmit_back()
751 static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) in tsnep_xdp_tx_map_zc() argument
756 entry = &tx->entry[tx->write]; in tsnep_xdp_tx_map_zc()
759 dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); in tsnep_xdp_tx_map_zc()
760 xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); in tsnep_xdp_tx_map_zc()
765 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map_zc()
771 struct tsnep_tx *tx) in tsnep_xdp_xmit_frame_ring_zc() argument
775 length = tsnep_xdp_tx_map_zc(xdpd, tx); in tsnep_xdp_xmit_frame_ring_zc()
777 tsnep_tx_activate(tx, tx->write, length, true); in tsnep_xdp_xmit_frame_ring_zc()
778 tx->write = (tx->write + 1) & TSNEP_RING_MASK; in tsnep_xdp_xmit_frame_ring_zc()
781 static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) in tsnep_xdp_xmit_zc() argument
783 int desc_available = tsnep_tx_desc_available(tx); in tsnep_xdp_xmit_zc()
784 struct xdp_desc *descs = tx->xsk_pool->tx_descs; in tsnep_xdp_xmit_zc()
787 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS in tsnep_xdp_xmit_zc()
788 * will be available for normal TX path and queue is stopped there if in tsnep_xdp_xmit_zc()
795 batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); in tsnep_xdp_xmit_zc()
797 tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); in tsnep_xdp_xmit_zc()
805 tsnep_xdp_xmit_flush(tx); in tsnep_xdp_xmit_zc()
809 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
818 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
822 if (tx->read == tx->write) in tsnep_tx_poll()
825 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
844 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
876 tx->read = (tx->read + count) & TSNEP_RING_MASK; in tsnep_tx_poll()
878 tx->packets++; in tsnep_tx_poll()
879 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
884 if (tx->xsk_pool) { in tsnep_tx_poll()
886 xsk_tx_completed(tx->xsk_pool, xsk_frames); in tsnep_tx_poll()
887 if (xsk_uses_need_wakeup(tx->xsk_pool)) in tsnep_tx_poll()
888 xsk_set_tx_need_wakeup(tx->xsk_pool); in tsnep_tx_poll()
889 tsnep_xdp_xmit_zc(tx); in tsnep_tx_poll()
892 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
902 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
908 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
911 if (tx->read != tx->write) { in tsnep_tx_pending()
912 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
924 static int tsnep_tx_open(struct tsnep_tx *tx) in tsnep_tx_open() argument
928 retval = tsnep_tx_ring_create(tx); in tsnep_tx_open()
932 tsnep_tx_init(tx); in tsnep_tx_open()
937 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
939 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
1271 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
1284 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1316 struct tsnep_tx *tx) in tsnep_xdp_run_prog_zc() argument
1334 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1352 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1356 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1426 struct tsnep_tx *tx; in tsnep_rx_poll() local
1438 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1499 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1515 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1529 struct tsnep_tx *tx; in tsnep_rx_poll_zc() local
1541 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1599 &xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1624 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1790 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1806 if (queue->tx) in tsnep_poll()
1807 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1853 if (queue->tx && queue->rx) in tsnep_request_irq()
1856 else if (queue->tx) in tsnep_request_irq()
1857 snprintf(queue->name, sizeof(queue->name), "%s-tx-%d", in tsnep_request_irq()
1858 name, queue->tx->queue_index); in tsnep_request_irq()
1911 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1917 /* choose TX queue for XDP_TX */ in tsnep_queue_open()
1918 if (tx) in tsnep_queue_open()
1919 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1971 if (queue->tx) in tsnep_queue_enable()
1972 tsnep_tx_enable(queue->tx); in tsnep_queue_enable()
1980 if (queue->tx) in tsnep_queue_disable()
1981 tsnep_tx_disable(queue->tx, &queue->napi); in tsnep_queue_disable()
1999 if (adapter->queue[i].tx) { in tsnep_netdev_open()
2000 retval = tsnep_tx_open(adapter->queue[i].tx); in tsnep_netdev_open()
2042 if (adapter->queue[i].tx) in tsnep_netdev_open()
2043 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
2063 if (adapter->queue[i].tx) in tsnep_netdev_close()
2064 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
2099 queue->tx->xsk_pool = pool; in tsnep_enable_xsk()
2120 queue->tx->xsk_pool = NULL; in tsnep_disable_xsk()
2142 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
2180 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
2181 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
2182 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
2298 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
2307 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
2314 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
2315 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
2320 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
2332 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
2476 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2488 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
2489 adapter->queue[0].tx->adapter = adapter; in tsnep_queue_init()
2490 adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2491 adapter->queue[0].tx->queue_index = 0; in tsnep_queue_init()
2505 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2519 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()
2520 adapter->queue[i].tx->adapter = adapter; in tsnep_queue_init()
2521 adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2522 adapter->queue[i].tx->queue_index = i; in tsnep_queue_init()