/linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
H A D | rx.c | 163 struct xdp_buff **buffs; in mlx5e_xsk_alloc_rx_wqes_batched() local 170 buffs = rq->wqe.alloc_units->xsk_buffs; in mlx5e_xsk_alloc_rx_wqes_batched() 173 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); in mlx5e_xsk_alloc_rx_wqes_batched() 175 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); in mlx5e_xsk_alloc_rx_wqes_batched() 177 alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig); in mlx5e_xsk_alloc_rx_wqes_batched()
|
/linux/drivers/net/wan/ |
H A D | ixp4xx_hss.c | 1157 int i, buffs = RX_DESCS; /* allocated RX buffers */ in hss_hdlc_close() local 1168 buffs--; in hss_hdlc_close() 1170 buffs--; in hss_hdlc_close() 1172 if (buffs) in hss_hdlc_close() 1174 buffs); in hss_hdlc_close() 1176 buffs = TX_DESCS; in hss_hdlc_close() 1178 buffs--; /* cancel TX */ in hss_hdlc_close() 1183 buffs--; in hss_hdlc_close() 1184 if (!buffs) in hss_hdlc_close() 1188 if (buffs) in hss_hdlc_close() [all...] |
H A D | hd64570.c | 161 u16 buffs = transmit ? card->tx_ring_buffers in sca_init_port() local 164 for (i = 0; i < buffs; i++) { in sca_init_port() 186 sca_outw(desc_offset(port, buffs - 1, transmit), in sca_init_port()
|
H A D | hd64572.c | 131 u16 buffs = transmit ? card->tx_ring_buffers in sca_init_port() local 134 for (i = 0; i < buffs; i++) { in sca_init_port()
|
/linux/drivers/net/ethernet/xscale/ |
H A D | ixp4xx_eth.c | 1345 int buffs = RX_DESCS; /* allocated RX buffers */ in eth_close() local 1354 buffs--; in eth_close() 1366 buffs--; in eth_close() 1367 if (!buffs) in eth_close() 1384 if (buffs) in eth_close() 1386 " left in NPE\n", buffs); in eth_close() 1388 if (!buffs) in eth_close() 1392 buffs = TX_DESCS; in eth_close() 1394 buffs--; /* cancel TX */ in eth_close() 1399 buffs in eth_close() [all...] |
/linux/drivers/net/ethernet/intel/igb/ |
H A D | igb_xsk.c | 180 u16 buffs; in igb_fill_rx_descs() local 187 buffs = xsk_buff_alloc_batch(pool, xdp, count); in igb_fill_rx_descs() 188 for (i = 0; i < buffs; i++) { in igb_fill_rx_descs() 197 return buffs; in igb_fill_rx_descs()
|
/linux/drivers/net/ethernet/intel/idpf/ |
H A D | idpf_controlq_api.h | 172 struct idpf_dma_mem **buffs);
|
H A D | idpf_controlq.c | 413 * @buffs: array of pointers to dma mem structs to be given to hardware 420 * buffs = NULL to support direct commands 423 u16 *buff_count, struct idpf_dma_mem **buffs) in idpf_ctlq_post_rx_buffs() argument 486 cq->bi.rx_buff[ntp] = buffs[i]; in idpf_ctlq_post_rx_buffs()
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_xsk.c | 442 u16 buffs; in ice_fill_rx_descs() local 445 buffs = xsk_buff_alloc_batch(pool, xdp, count); in ice_fill_rx_descs() 446 for (i = 0; i < buffs; i++) { in ice_fill_rx_descs() 460 return buffs; in ice_fill_rx_descs()
|
/linux/drivers/net/ethernet/marvell/mvpp2/ |
H A D | mvpp2.h | 1425 struct mvpp2_txq_pcpu_buf *buffs; member
|
H A D | mvpp2_main.c | 313 txq_pcpu->buffs + txq_pcpu->txq_put_index; in mvpp2_txq_inc_put() 2822 txq_pcpu->buffs + txq_pcpu->txq_get_index; in mvpp2_txq_bufs_free() 3154 txq_pcpu->buffs = kmalloc_array(txq_pcpu->size, in mvpp2_txq_init() 3155 sizeof(*txq_pcpu->buffs), in mvpp2_txq_init() 3157 if (!txq_pcpu->buffs) in mvpp2_txq_init() 3190 kfree(txq_pcpu->buffs); in mvpp2_txq_deinit()
|
/linux/drivers/md/ |
H A D | raid1.c | 2742 int buffs; in init_resync() local 2744 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; in init_resync() 2747 return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc, in init_resync()
|
H A D | raid10.c | 3053 int ret, buffs, i; in init_resync() local 3055 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; in init_resync() 3061 ret = mempool_init(&conf->r10buf_pool, buffs, in init_resync()
|