Lines Matching full:rx

10  * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
100 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
116 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
951 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
953 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
958 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
959 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
960 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
962 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
968 if (rx->page_pool) in tsnep_rx_ring_cleanup()
969 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
971 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
974 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
975 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
976 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
977 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
978 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
983 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
985 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
993 rx->page[i] = in tsnep_rx_ring_create()
994 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
996 if (!rx->page[i]) { in tsnep_rx_ring_create()
1001 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
1003 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
1006 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
1018 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
1019 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
1020 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
1021 rx->page_pool = NULL; in tsnep_rx_ring_create()
1026 entry = &rx->entry[i]; in tsnep_rx_ring_create()
1027 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
1034 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
1038 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
1042 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
1043 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
1044 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
1045 rx->write = 0; in tsnep_rx_init()
1046 rx->read = 0; in tsnep_rx_init()
1047 rx->owner_counter = 1; in tsnep_rx_init()
1048 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
1051 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
1056 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
1059 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1063 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1064 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1069 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1071 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1072 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1074 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1077 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1084 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1086 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1092 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1100 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1101 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1102 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1111 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1117 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1120 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1122 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1125 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1128 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1133 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1135 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1136 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1138 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1142 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1144 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1149 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1150 rx->owner_counter++; in tsnep_rx_activate()
1151 if (rx->owner_counter == 4) in tsnep_rx_activate()
1152 rx->owner_counter = 1; in tsnep_rx_activate()
1153 rx->increment_owner_counter--; in tsnep_rx_activate()
1154 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1155 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1158 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1169 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1175 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1177 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1178 rx->alloc_failed++; in tsnep_rx_alloc()
1183 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1188 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1192 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1197 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1201 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1203 tsnep_rx_enable(rx); in tsnep_rx_refill()
1208 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1214 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1217 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1219 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1220 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1222 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1226 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1231 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1233 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1234 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1236 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1237 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1240 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1243 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1244 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1249 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1254 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1259 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1267 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1271 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1273 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1278 static void tsnep_xsk_rx_need_wakeup(struct tsnep_rx *rx, int desc_available) in tsnep_xsk_rx_need_wakeup() argument
1281 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_xsk_rx_need_wakeup()
1283 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_xsk_rx_need_wakeup()
1286 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1301 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1306 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1311 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1315 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1324 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1330 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1341 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1351 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1356 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1360 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1381 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1394 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1406 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1407 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1412 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1417 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1421 rx->packets++; in tsnep_rx_page()
1422 rx->bytes += length; in tsnep_rx_page()
1424 rx->multicast++; in tsnep_rx_page()
1428 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1430 rx->dropped++; in tsnep_rx_page()
1434 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1437 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1449 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1450 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1451 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1453 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1454 rx->tx_queue_index); in tsnep_rx_poll()
1455 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1457 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1460 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1461 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1471 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1475 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1476 * RX processing in tsnep_rx_poll()
1478 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1481 rx->dropped++; in tsnep_rx_poll()
1498 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1500 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1505 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1515 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1518 rx->packets++; in tsnep_rx_poll()
1519 rx->bytes += length; in tsnep_rx_poll()
1527 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1532 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1535 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1540 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1553 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1554 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1556 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1557 rx->tx_queue_index); in tsnep_rx_poll_zc()
1558 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1561 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1562 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1572 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1576 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1577 * RX processing in tsnep_rx_poll_zc()
1579 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1582 rx->dropped++; in tsnep_rx_poll_zc()
1599 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1601 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1606 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1615 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1618 rx->packets++; in tsnep_rx_poll_zc()
1619 rx->bytes += length; in tsnep_rx_poll_zc()
1627 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1632 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1634 rx->dropped++; in tsnep_rx_poll_zc()
1641 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1644 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1646 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1647 tsnep_xsk_rx_need_wakeup(rx, desc_available); in tsnep_rx_poll_zc()
1655 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1659 if (rx->read != rx->write) { in tsnep_rx_pending()
1660 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1670 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1675 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1679 tsnep_rx_init(rx); in tsnep_rx_open()
1681 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1682 if (rx->xsk_pool) in tsnep_rx_open()
1683 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1685 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1695 if (rx->xsk_pool) { in tsnep_rx_open()
1696 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1704 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1708 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1710 if (rx->xsk_pool) in tsnep_rx_close()
1711 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1713 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1716 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1718 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1721 tsnep_rx_init(rx); in tsnep_rx_reopen()
1724 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1734 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1735 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1736 rx->write++; in tsnep_rx_reopen()
1744 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1746 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1750 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1756 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1760 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1779 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1780 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1781 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1782 rx->write++; in tsnep_rx_reopen_xsk()
1792 if (xsk_uses_need_wakeup(rx->xsk_pool)) in tsnep_rx_reopen_xsk()
1793 tsnep_xsk_rx_need_wakeup(rx, tsnep_rx_desc_available(rx)); in tsnep_rx_reopen_xsk()
1801 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1821 if (queue->rx) { in tsnep_poll()
1822 done = queue->rx->xsk_pool ? in tsnep_poll()
1823 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1824 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1861 if (queue->tx && queue->rx) in tsnep_request_irq()
1863 name, queue->rx->queue_index); in tsnep_request_irq()
1868 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", in tsnep_request_irq()
1869 name, queue->rx->queue_index); in tsnep_request_irq()
1901 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1905 if (rx) { in tsnep_queue_close()
1906 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1907 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1908 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1909 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1918 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1924 if (rx) { in tsnep_queue_open()
1927 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1928 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1929 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1931 rx->tx_queue_index = 0; in tsnep_queue_open()
1937 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1938 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1941 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1943 rx->page_pool); in tsnep_queue_open()
1946 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1947 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1950 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1955 if (rx->xsk_pool) in tsnep_queue_open()
1956 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1988 if (queue->rx) { in tsnep_queue_enable()
1989 netif_queue_set_napi(adapter->netdev, queue->rx->queue_index, in tsnep_queue_enable()
1991 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1999 if (queue->rx) in tsnep_queue_disable()
2000 netif_queue_set_napi(adapter->netdev, queue->rx->queue_index, in tsnep_queue_disable()
2012 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
2015 if (queue->rx) in tsnep_queue_disable()
2016 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
2030 if (adapter->queue[i].rx) { in tsnep_netdev_open()
2031 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
2066 if (adapter->queue[i].rx) in tsnep_netdev_open()
2067 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2087 if (adapter->queue[i].rx) in tsnep_netdev_close()
2088 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2105 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2106 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2108 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2110 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2111 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2113 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2114 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2115 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2120 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2126 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2129 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2143 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2145 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2149 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2153 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2154 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2155 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2156 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2211 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2212 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2213 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2214 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2403 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2502 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2518 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2519 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2520 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2521 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2531 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2549 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2550 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2551 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2552 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()