Lines Matching full:q
91 mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_kick() argument
95 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val); in mtk_wed_wo_queue_kick()
99 mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len, in mtk_wed_wo_dequeue() argument
102 int buf_len = SKB_WITH_OVERHEAD(q->buf_size); in mtk_wed_wo_dequeue()
103 int index = (q->tail + 1) % q->n_desc; in mtk_wed_wo_dequeue()
108 if (!q->queued) in mtk_wed_wo_dequeue()
112 q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE); in mtk_wed_wo_dequeue()
113 else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE))) in mtk_wed_wo_dequeue()
116 q->tail = index; in mtk_wed_wo_dequeue()
117 q->queued--; in mtk_wed_wo_dequeue()
119 desc = &q->desc[index]; in mtk_wed_wo_dequeue()
120 entry = &q->entry[index]; in mtk_wed_wo_dequeue()
134 mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_refill() argument
140 while (q->queued < q->n_desc) { in mtk_wed_wo_queue_refill()
145 buf = page_frag_alloc(&q->cache, q->buf_size, in mtk_wed_wo_queue_refill()
150 addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir); in mtk_wed_wo_queue_refill()
156 q->head = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_refill()
157 entry = &q->entry[q->head]; in mtk_wed_wo_queue_refill()
159 entry->len = q->buf_size; in mtk_wed_wo_queue_refill()
160 q->entry[q->head].buf = buf; in mtk_wed_wo_queue_refill()
163 struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head]; in mtk_wed_wo_queue_refill()
171 q->queued++; in mtk_wed_wo_queue_refill()
186 mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_rx_run_queue() argument
194 data = mtk_wed_wo_dequeue(wo, q, &len, false); in mtk_wed_wo_rx_run_queue()
198 skb = build_skb(data, q->buf_size); in mtk_wed_wo_rx_run_queue()
217 if (mtk_wed_wo_queue_refill(wo, q, true)) { in mtk_wed_wo_rx_run_queue()
218 u32 index = (q->head - 1) % q->n_desc; in mtk_wed_wo_rx_run_queue()
220 mtk_wed_wo_queue_kick(wo, q, index); in mtk_wed_wo_rx_run_queue()
257 mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_alloc() argument
261 q->regs = *regs; in mtk_wed_wo_queue_alloc()
262 q->n_desc = n_desc; in mtk_wed_wo_queue_alloc()
263 q->buf_size = buf_size; in mtk_wed_wo_queue_alloc()
265 q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc), in mtk_wed_wo_queue_alloc()
266 &q->desc_dma, GFP_KERNEL); in mtk_wed_wo_queue_alloc()
267 if (!q->desc) in mtk_wed_wo_queue_alloc()
270 q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry), in mtk_wed_wo_queue_alloc()
272 if (!q->entry) in mtk_wed_wo_queue_alloc()
279 mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_free() argument
281 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); in mtk_wed_wo_queue_free()
282 dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc, in mtk_wed_wo_queue_free()
283 q->desc_dma); in mtk_wed_wo_queue_free()
287 mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_tx_clean() argument
292 for (i = 0; i < q->n_desc; i++) { in mtk_wed_wo_queue_tx_clean()
293 struct mtk_wed_wo_queue_entry *entry = &q->entry[i]; in mtk_wed_wo_queue_tx_clean()
304 if (!q->cache.va) in mtk_wed_wo_queue_tx_clean()
307 page = virt_to_page(q->cache.va); in mtk_wed_wo_queue_tx_clean()
308 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_tx_clean()
309 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_tx_clean()
313 mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_rx_clean() argument
318 void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true); in mtk_wed_wo_queue_rx_clean()
326 if (!q->cache.va) in mtk_wed_wo_queue_rx_clean()
329 page = virt_to_page(q->cache.va); in mtk_wed_wo_queue_rx_clean()
330 __page_frag_cache_drain(page, q->cache.pagecnt_bias); in mtk_wed_wo_queue_rx_clean()
331 memset(&q->cache, 0, sizeof(q->cache)); in mtk_wed_wo_queue_rx_clean()
335 mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q) in mtk_wed_wo_queue_reset() argument
337 mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0); in mtk_wed_wo_queue_reset()
338 mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma); in mtk_wed_wo_queue_reset()
339 mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc); in mtk_wed_wo_queue_reset()
342 int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, in mtk_wed_wo_queue_tx_skb() argument
350 q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx); in mtk_wed_wo_queue_tx_skb()
351 index = (q->head + 1) % q->n_desc; in mtk_wed_wo_queue_tx_skb()
352 if (q->tail == index) { in mtk_wed_wo_queue_tx_skb()
357 entry = &q->entry[index]; in mtk_wed_wo_queue_tx_skb()
363 desc = &q->desc[index]; in mtk_wed_wo_queue_tx_skb()
364 q->head = index; in mtk_wed_wo_queue_tx_skb()
377 mtk_wed_wo_queue_kick(wo, q, q->head); in mtk_wed_wo_queue_tx_skb()