Lines Matching +full:dma +full:- +full:queues

1 // SPDX-License-Identifier: GPL-2.0-only
7 * idpf_ctlq_alloc_desc_ring - Allocate Control Queue (CQ) rings
14 size_t size = cq->ring_size * sizeof(struct idpf_ctlq_desc); in idpf_ctlq_alloc_desc_ring()
16 cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size); in idpf_ctlq_alloc_desc_ring()
17 if (!cq->desc_ring.va) in idpf_ctlq_alloc_desc_ring()
18 return -ENOMEM; in idpf_ctlq_alloc_desc_ring()
24 * idpf_ctlq_alloc_bufs - Allocate Control Queue (CQ) buffers
28 * Allocate the buffer head for all control queues, and if it's a receive
29 * queue, allocate DMA buffers
36 /* Do not allocate DMA buffers for transmit queues */ in idpf_ctlq_alloc_bufs()
37 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_TX) in idpf_ctlq_alloc_bufs()
43 cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *), in idpf_ctlq_alloc_bufs()
45 if (!cq->bi.rx_buff) in idpf_ctlq_alloc_bufs()
46 return -ENOMEM; in idpf_ctlq_alloc_bufs()
49 for (i = 0; i < cq->ring_size - 1; i++) { in idpf_ctlq_alloc_bufs()
53 cq->bi.rx_buff[i] = kcalloc(num, sizeof(struct idpf_dma_mem), in idpf_ctlq_alloc_bufs()
55 if (!cq->bi.rx_buff[i]) in idpf_ctlq_alloc_bufs()
58 bi = cq->bi.rx_buff[i]; in idpf_ctlq_alloc_bufs()
60 bi->va = idpf_alloc_dma_mem(hw, bi, cq->buf_size); in idpf_ctlq_alloc_bufs()
61 if (!bi->va) { in idpf_ctlq_alloc_bufs()
63 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
72 i--; in idpf_ctlq_alloc_bufs()
73 for (; i >= 0; i--) { in idpf_ctlq_alloc_bufs()
74 idpf_free_dma_mem(hw, cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
75 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
77 kfree(cq->bi.rx_buff); in idpf_ctlq_alloc_bufs()
79 return -ENOMEM; in idpf_ctlq_alloc_bufs()
83 * idpf_ctlq_free_desc_ring - Free Control Queue (CQ) rings
88 * and de-allocated
93 idpf_free_dma_mem(hw, &cq->desc_ring); in idpf_ctlq_free_desc_ring()
97 * idpf_ctlq_free_bufs - Free CQ buffer info elements
101 * Free the DMA buffers for RX queues, and DMA buffer header for both RX and TX
102 * queues. The upper layers are expected to manage freeing of TX DMA buffers
108 if (cq->cq_type == IDPF_CTLQ_TYPE_MAILBOX_RX) { in idpf_ctlq_free_bufs()
111 /* free DMA buffers for rx queues*/ in idpf_ctlq_free_bufs()
112 for (i = 0; i < cq->ring_size; i++) { in idpf_ctlq_free_bufs()
113 if (cq->bi.rx_buff[i]) { in idpf_ctlq_free_bufs()
114 idpf_free_dma_mem(hw, cq->bi.rx_buff[i]); in idpf_ctlq_free_bufs()
115 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_free_bufs()
119 bi = (void *)cq->bi.rx_buff; in idpf_ctlq_free_bufs()
121 bi = (void *)cq->bi.tx_msg; in idpf_ctlq_free_bufs()
129 * idpf_ctlq_dealloc_ring_res - Free memory allocated for control queue
143 * idpf_ctlq_alloc_ring_res - allocate memory for descriptor ring and bufs
168 idpf_free_dma_mem(hw, &cq->desc_ring); in idpf_ctlq_alloc_ring_res()