Lines Matching full:rx
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
98 /* handle TX/RX queue 0 interrupt */ in tsnep_irq()
114 /* handle TX/RX queue interrupt */ in tsnep_irq_txrx()
942 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) in tsnep_rx_ring_cleanup() argument
944 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_cleanup()
949 entry = &rx->entry[i]; in tsnep_rx_ring_cleanup()
950 if (!rx->xsk_pool && entry->page) in tsnep_rx_ring_cleanup()
951 page_pool_put_full_page(rx->page_pool, entry->page, in tsnep_rx_ring_cleanup()
953 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup()
959 if (rx->page_pool) in tsnep_rx_ring_cleanup()
960 page_pool_destroy(rx->page_pool); in tsnep_rx_ring_cleanup()
962 memset(rx->entry, 0, sizeof(rx->entry)); in tsnep_rx_ring_cleanup()
965 if (rx->page[i]) { in tsnep_rx_ring_cleanup()
966 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], in tsnep_rx_ring_cleanup()
967 rx->page_dma[i]); in tsnep_rx_ring_cleanup()
968 rx->page[i] = NULL; in tsnep_rx_ring_cleanup()
969 rx->page_dma[i] = 0; in tsnep_rx_ring_cleanup()
974 static int tsnep_rx_ring_create(struct tsnep_rx *rx) in tsnep_rx_ring_create() argument
976 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_ring_create()
984 rx->page[i] = in tsnep_rx_ring_create()
985 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], in tsnep_rx_ring_create()
987 if (!rx->page[i]) { in tsnep_rx_ring_create()
992 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_rx_ring_create()
994 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_rx_ring_create()
997 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_rx_ring_create()
1009 rx->page_pool = page_pool_create(&pp_params); in tsnep_rx_ring_create()
1010 if (IS_ERR(rx->page_pool)) { in tsnep_rx_ring_create()
1011 retval = PTR_ERR(rx->page_pool); in tsnep_rx_ring_create()
1012 rx->page_pool = NULL; in tsnep_rx_ring_create()
1017 entry = &rx->entry[i]; in tsnep_rx_ring_create()
1018 next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; in tsnep_rx_ring_create()
1025 tsnep_rx_ring_cleanup(rx); in tsnep_rx_ring_create()
1029 static void tsnep_rx_init(struct tsnep_rx *rx) in tsnep_rx_init() argument
1033 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_rx_init()
1034 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); in tsnep_rx_init()
1035 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); in tsnep_rx_init()
1036 rx->write = 0; in tsnep_rx_init()
1037 rx->read = 0; in tsnep_rx_init()
1038 rx->owner_counter = 1; in tsnep_rx_init()
1039 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_init()
1042 static void tsnep_rx_enable(struct tsnep_rx *rx) in tsnep_rx_enable() argument
1047 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_enable()
1050 static void tsnep_rx_disable(struct tsnep_rx *rx) in tsnep_rx_disable() argument
1054 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); in tsnep_rx_disable()
1055 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, in tsnep_rx_disable()
1060 static int tsnep_rx_desc_available(struct tsnep_rx *rx) in tsnep_rx_desc_available() argument
1062 if (rx->read <= rx->write) in tsnep_rx_desc_available()
1063 return TSNEP_RING_SIZE - rx->write + rx->read - 1; in tsnep_rx_desc_available()
1065 return rx->read - rx->write - 1; in tsnep_rx_desc_available()
1068 static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) in tsnep_rx_free_page_buffer() argument
1075 page = rx->page_buffer; in tsnep_rx_free_page_buffer()
1077 page_pool_put_full_page(rx->page_pool, *page, false); in tsnep_rx_free_page_buffer()
1083 static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) in tsnep_rx_alloc_page_buffer() argument
1091 rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_page_buffer()
1092 if (!rx->page_buffer[i]) { in tsnep_rx_alloc_page_buffer()
1093 tsnep_rx_free_page_buffer(rx); in tsnep_rx_alloc_page_buffer()
1102 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_page() argument
1108 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); in tsnep_rx_set_page()
1111 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_alloc_buffer() argument
1113 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_buffer()
1116 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_alloc_buffer()
1119 tsnep_rx_set_page(rx, entry, page); in tsnep_rx_alloc_buffer()
1124 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer() argument
1126 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer()
1127 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer()
1129 tsnep_rx_set_page(rx, entry, read->page); in tsnep_rx_reuse_buffer()
1133 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) in tsnep_rx_activate() argument
1135 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_activate()
1140 if (index == rx->increment_owner_counter) { in tsnep_rx_activate()
1141 rx->owner_counter++; in tsnep_rx_activate()
1142 if (rx->owner_counter == 4) in tsnep_rx_activate()
1143 rx->owner_counter = 1; in tsnep_rx_activate()
1144 rx->increment_owner_counter--; in tsnep_rx_activate()
1145 if (rx->increment_owner_counter < 0) in tsnep_rx_activate()
1146 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_rx_activate()
1149 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_rx_activate()
1160 static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc() argument
1166 index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1168 if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { in tsnep_rx_alloc()
1169 rx->alloc_failed++; in tsnep_rx_alloc()
1174 tsnep_rx_reuse_buffer(rx, index); in tsnep_rx_alloc()
1179 tsnep_rx_activate(rx, index); in tsnep_rx_alloc()
1183 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc()
1188 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill() argument
1192 desc_refilled = tsnep_rx_alloc(rx, count, reuse); in tsnep_rx_refill()
1194 tsnep_rx_enable(rx); in tsnep_rx_refill()
1199 static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, in tsnep_rx_set_xdp() argument
1205 entry->desc->rx = __cpu_to_le64(entry->dma); in tsnep_rx_set_xdp()
1208 static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) in tsnep_rx_reuse_buffer_zc() argument
1210 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_reuse_buffer_zc()
1211 struct tsnep_rx_entry *read = &rx->entry[rx->read]; in tsnep_rx_reuse_buffer_zc()
1213 tsnep_rx_set_xdp(rx, entry, read->xdp); in tsnep_rx_reuse_buffer_zc()
1217 static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_alloc_zc() argument
1222 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); in tsnep_rx_alloc_zc()
1224 int index = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1225 struct tsnep_rx_entry *entry = &rx->entry[index]; in tsnep_rx_alloc_zc()
1227 tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); in tsnep_rx_alloc_zc()
1228 tsnep_rx_activate(rx, index); in tsnep_rx_alloc_zc()
1231 rx->alloc_failed++; in tsnep_rx_alloc_zc()
1234 tsnep_rx_reuse_buffer_zc(rx, rx->write); in tsnep_rx_alloc_zc()
1235 tsnep_rx_activate(rx, rx->write); in tsnep_rx_alloc_zc()
1240 rx->write = (rx->write + i) & TSNEP_RING_MASK; in tsnep_rx_alloc_zc()
1245 static void tsnep_rx_free_zc(struct tsnep_rx *rx) in tsnep_rx_free_zc() argument
1250 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_free_zc()
1258 static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) in tsnep_rx_refill_zc() argument
1262 desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); in tsnep_rx_refill_zc()
1264 tsnep_rx_enable(rx); in tsnep_rx_refill_zc()
1269 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog() argument
1284 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, false)) in tsnep_xdp_run_prog()
1289 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog()
1294 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1298 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog()
1307 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), in tsnep_xdp_run_prog()
1313 static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, in tsnep_xdp_run_prog_zc() argument
1324 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) in tsnep_xdp_run_prog_zc()
1334 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx, true)) in tsnep_xdp_run_prog_zc()
1339 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1343 trace_xdp_exception(rx->adapter->netdev, prog, act); in tsnep_xdp_run_prog_zc()
1364 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, in tsnep_build_skb() argument
1377 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { in tsnep_build_skb()
1389 skb_record_rx_queue(skb, rx->queue_index); in tsnep_build_skb()
1390 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); in tsnep_build_skb()
1395 static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_page() argument
1400 skb = tsnep_build_skb(rx, page, length); in tsnep_rx_page()
1404 rx->packets++; in tsnep_rx_page()
1405 rx->bytes += length; in tsnep_rx_page()
1407 rx->multicast++; in tsnep_rx_page()
1411 page_pool_recycle_direct(rx->page_pool, page); in tsnep_rx_page()
1413 rx->dropped++; in tsnep_rx_page()
1417 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll() argument
1420 struct device *dmadev = rx->adapter->dmadev; in tsnep_rx_poll()
1432 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll()
1433 dma_dir = page_pool_get_dma_dir(rx->page_pool); in tsnep_rx_poll()
1434 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll()
1436 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll()
1437 rx->tx_queue_index); in tsnep_rx_poll()
1438 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1440 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); in tsnep_rx_poll()
1443 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll()
1444 entry = &rx->entry[rx->read]; in tsnep_rx_poll()
1454 desc_available -= tsnep_rx_refill(rx, desc_available, in tsnep_rx_poll()
1458 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll()
1459 * RX processing in tsnep_rx_poll()
1461 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1464 rx->dropped++; in tsnep_rx_poll()
1481 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll()
1483 * consider metadata size as offset of actual data during RX in tsnep_rx_poll()
1488 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll()
1498 consume = tsnep_xdp_run_prog(rx, prog, &xdp, in tsnep_rx_poll()
1501 rx->packets++; in tsnep_rx_poll()
1502 rx->bytes += length; in tsnep_rx_poll()
1510 tsnep_rx_page(rx, napi, entry->page, length); in tsnep_rx_poll()
1515 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1518 tsnep_rx_refill(rx, desc_available, false); in tsnep_rx_poll()
1523 static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, in tsnep_rx_poll_zc() argument
1536 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_poll_zc()
1537 prog = READ_ONCE(rx->adapter->xdp_prog); in tsnep_rx_poll_zc()
1539 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, in tsnep_rx_poll_zc()
1540 rx->tx_queue_index); in tsnep_rx_poll_zc()
1541 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll_zc()
1544 while (likely(done < budget) && (rx->read != rx->write)) { in tsnep_rx_poll_zc()
1545 entry = &rx->entry[rx->read]; in tsnep_rx_poll_zc()
1555 desc_available -= tsnep_rx_refill_zc(rx, desc_available, in tsnep_rx_poll_zc()
1559 * empty RX ring, thus buffer cannot be used for in tsnep_rx_poll_zc()
1560 * RX processing in tsnep_rx_poll_zc()
1562 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1565 rx->dropped++; in tsnep_rx_poll_zc()
1580 xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); in tsnep_rx_poll_zc()
1582 /* RX metadata with timestamps is in front of actual data, in tsnep_rx_poll_zc()
1584 * consider metadata size as offset of actual data during RX in tsnep_rx_poll_zc()
1589 rx->read = (rx->read + 1) & TSNEP_RING_MASK; in tsnep_rx_poll_zc()
1598 consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, in tsnep_rx_poll_zc()
1601 rx->packets++; in tsnep_rx_poll_zc()
1602 rx->bytes += length; in tsnep_rx_poll_zc()
1610 page = page_pool_dev_alloc_pages(rx->page_pool); in tsnep_rx_poll_zc()
1615 tsnep_rx_page(rx, napi, page, length); in tsnep_rx_poll_zc()
1617 rx->dropped++; in tsnep_rx_poll_zc()
1624 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll_zc()
1627 desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); in tsnep_rx_poll_zc()
1629 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_poll_zc()
1631 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1633 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_poll_zc()
1641 static bool tsnep_rx_pending(struct tsnep_rx *rx) in tsnep_rx_pending() argument
1645 if (rx->read != rx->write) { in tsnep_rx_pending()
1646 entry = &rx->entry[rx->read]; in tsnep_rx_pending()
1656 static int tsnep_rx_open(struct tsnep_rx *rx) in tsnep_rx_open() argument
1661 retval = tsnep_rx_ring_create(rx); in tsnep_rx_open()
1665 tsnep_rx_init(rx); in tsnep_rx_open()
1667 desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_open()
1668 if (rx->xsk_pool) in tsnep_rx_open()
1669 retval = tsnep_rx_alloc_zc(rx, desc_available, false); in tsnep_rx_open()
1671 retval = tsnep_rx_alloc(rx, desc_available, false); in tsnep_rx_open()
1681 if (rx->xsk_pool) { in tsnep_rx_open()
1682 retval = tsnep_rx_alloc_page_buffer(rx); in tsnep_rx_open()
1690 tsnep_rx_ring_cleanup(rx); in tsnep_rx_open()
1694 static void tsnep_rx_close(struct tsnep_rx *rx) in tsnep_rx_close() argument
1696 if (rx->xsk_pool) in tsnep_rx_close()
1697 tsnep_rx_free_page_buffer(rx); in tsnep_rx_close()
1699 tsnep_rx_ring_cleanup(rx); in tsnep_rx_close()
1702 static void tsnep_rx_reopen(struct tsnep_rx *rx) in tsnep_rx_reopen() argument
1704 struct page **page = rx->page_buffer; in tsnep_rx_reopen()
1707 tsnep_rx_init(rx); in tsnep_rx_reopen()
1710 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen()
1720 tsnep_rx_set_page(rx, entry, *page); in tsnep_rx_reopen()
1721 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen()
1722 rx->write++; in tsnep_rx_reopen()
1730 static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) in tsnep_rx_reopen_xsk() argument
1732 struct page **page = rx->page_buffer; in tsnep_rx_reopen_xsk()
1736 tsnep_rx_init(rx); in tsnep_rx_reopen_xsk()
1742 allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, in tsnep_rx_reopen_xsk()
1746 struct tsnep_rx_entry *entry = &rx->entry[i]; in tsnep_rx_reopen_xsk()
1765 tsnep_rx_set_xdp(rx, entry, in tsnep_rx_reopen_xsk()
1766 rx->xdp_batch[allocated - 1]); in tsnep_rx_reopen_xsk()
1767 tsnep_rx_activate(rx, rx->write); in tsnep_rx_reopen_xsk()
1768 rx->write++; in tsnep_rx_reopen_xsk()
1778 if (xsk_uses_need_wakeup(rx->xsk_pool)) { in tsnep_rx_reopen_xsk()
1779 int desc_available = tsnep_rx_desc_available(rx); in tsnep_rx_reopen_xsk()
1782 xsk_set_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1784 xsk_clear_rx_need_wakeup(rx->xsk_pool); in tsnep_rx_reopen_xsk()
1793 if (queue->rx && tsnep_rx_pending(queue->rx)) in tsnep_pending()
1813 if (queue->rx) { in tsnep_poll()
1814 done = queue->rx->xsk_pool ? in tsnep_poll()
1815 tsnep_rx_poll_zc(queue->rx, napi, budget) : in tsnep_poll()
1816 tsnep_rx_poll(queue->rx, napi, budget); in tsnep_poll()
1853 if (queue->tx && queue->rx) in tsnep_request_irq()
1855 name, queue->rx->queue_index); in tsnep_request_irq()
1860 snprintf(queue->name, sizeof(queue->name), "%s-rx-%d", in tsnep_request_irq()
1861 name, queue->rx->queue_index); in tsnep_request_irq()
1893 struct tsnep_rx *rx = queue->rx; in tsnep_queue_close() local
1897 if (rx) { in tsnep_queue_close()
1898 if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) in tsnep_queue_close()
1899 xdp_rxq_info_unreg(&rx->xdp_rxq); in tsnep_queue_close()
1900 if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) in tsnep_queue_close()
1901 xdp_rxq_info_unreg(&rx->xdp_rxq_zc); in tsnep_queue_close()
1910 struct tsnep_rx *rx = queue->rx; in tsnep_queue_open() local
1916 if (rx) { in tsnep_queue_open()
1919 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1920 else if (rx->queue_index < adapter->num_tx_queues) in tsnep_queue_open()
1921 rx->tx_queue_index = rx->queue_index; in tsnep_queue_open()
1923 rx->tx_queue_index = 0; in tsnep_queue_open()
1929 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, in tsnep_queue_open()
1930 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1933 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, in tsnep_queue_open()
1935 rx->page_pool); in tsnep_queue_open()
1938 retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, in tsnep_queue_open()
1939 rx->queue_index, queue->napi.napi_id); in tsnep_queue_open()
1942 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, in tsnep_queue_open()
1947 if (rx->xsk_pool) in tsnep_queue_open()
1948 xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); in tsnep_queue_open()
1974 if (queue->rx) in tsnep_queue_enable()
1975 tsnep_rx_enable(queue->rx); in tsnep_queue_enable()
1986 /* disable RX after NAPI polling has been disabled, because RX can be in tsnep_queue_disable()
1989 if (queue->rx) in tsnep_queue_disable()
1990 tsnep_rx_disable(queue->rx); in tsnep_queue_disable()
2004 if (adapter->queue[i].rx) { in tsnep_netdev_open()
2005 retval = tsnep_rx_open(adapter->queue[i].rx); in tsnep_netdev_open()
2040 if (adapter->queue[i].rx) in tsnep_netdev_open()
2041 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_open()
2061 if (adapter->queue[i].rx) in tsnep_netdev_close()
2062 tsnep_rx_close(adapter->queue[i].rx); in tsnep_netdev_close()
2079 queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2080 sizeof(*queue->rx->page_buffer), in tsnep_enable_xsk()
2082 if (!queue->rx->page_buffer) in tsnep_enable_xsk()
2084 queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, in tsnep_enable_xsk()
2085 sizeof(*queue->rx->xdp_batch), in tsnep_enable_xsk()
2087 if (!queue->rx->xdp_batch) { in tsnep_enable_xsk()
2088 kfree(queue->rx->page_buffer); in tsnep_enable_xsk()
2089 queue->rx->page_buffer = NULL; in tsnep_enable_xsk()
2094 xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); in tsnep_enable_xsk()
2100 queue->rx->xsk_pool = pool; in tsnep_enable_xsk()
2103 tsnep_rx_reopen_xsk(queue->rx); in tsnep_enable_xsk()
2117 tsnep_rx_free_zc(queue->rx); in tsnep_disable_xsk()
2119 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk()
2123 tsnep_rx_reopen(queue->rx); in tsnep_disable_xsk()
2127 kfree(queue->rx->xdp_batch); in tsnep_disable_xsk()
2128 queue->rx->xdp_batch = NULL; in tsnep_disable_xsk()
2129 kfree(queue->rx->page_buffer); in tsnep_disable_xsk()
2130 queue->rx->page_buffer = NULL; in tsnep_disable_xsk()
2185 stats->rx_packets += adapter->rx[i].packets; in tsnep_netdev_get_stats64()
2186 stats->rx_bytes += adapter->rx[i].bytes; in tsnep_netdev_get_stats64()
2187 stats->rx_dropped += adapter->rx[i].dropped; in tsnep_netdev_get_stats64()
2188 stats->multicast += adapter->rx[i].multicast; in tsnep_netdev_get_stats64()
2377 /* initialize RX filtering, at least configured MAC address and in tsnep_mac_init()
2476 /* one TX/RX queue pair for netdev is mandatory */ in tsnep_queue_init()
2492 adapter->queue[0].rx = &adapter->rx[0]; in tsnep_queue_init()
2493 adapter->queue[0].rx->adapter = adapter; in tsnep_queue_init()
2494 adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); in tsnep_queue_init()
2495 adapter->queue[0].rx->queue_index = 0; in tsnep_queue_init()
2505 /* add additional TX/RX queue pairs only if dedicated interrupt is in tsnep_queue_init()
2523 adapter->queue[i].rx = &adapter->rx[i]; in tsnep_queue_init()
2524 adapter->queue[i].rx->adapter = adapter; in tsnep_queue_init()
2525 adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); in tsnep_queue_init()
2526 adapter->queue[i].rx->queue_index = i; in tsnep_queue_init()