Lines Matching full:q

189 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)  in mt76_dma_sync_idx()  argument
191 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
192 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx()
193 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx()
195 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
196 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx()
197 q->tail = q->head; in mt76_dma_sync_idx()
201 __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, in __mt76_dma_queue_reset() argument
204 if (!q || !q->ndesc) in __mt76_dma_queue_reset()
207 if (!mt76_queue_is_wed_rro_ind(q)) { in __mt76_dma_queue_reset()
211 for (i = 0; i < q->ndesc; i++) in __mt76_dma_queue_reset()
212 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in __mt76_dma_queue_reset()
216 Q_WRITE(q, cpu_idx, 0); in __mt76_dma_queue_reset()
217 Q_WRITE(q, dma_idx, 0); in __mt76_dma_queue_reset()
219 mt76_dma_sync_idx(dev, q); in __mt76_dma_queue_reset()
223 mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_queue_reset() argument
225 __mt76_dma_queue_reset(dev, q, true); in mt76_dma_queue_reset()
229 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
232 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
235 int idx = q->head; in mt76_dma_add_rx_buf()
239 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_add_rx_buf()
242 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_add_rx_buf()
243 data = &rro_desc[q->head]; in mt76_dma_add_rx_buf()
247 desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
253 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_add_rx_buf()
280 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
281 q->queued++; in mt76_dma_add_rx_buf()
287 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
297 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
298 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
304 idx = q->head; in mt76_dma_add_buf()
305 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
307 desc = &q->desc[idx]; in mt76_dma_add_buf()
308 entry = &q->entry[idx]; in mt76_dma_add_buf()
344 q->head = next; in mt76_dma_add_buf()
345 q->queued++; in mt76_dma_add_buf()
348 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
349 q->entry[idx].skb = skb; in mt76_dma_add_buf()
350 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
356 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
359 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
377 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
380 Q_WRITE(q, cpu_idx, q->head); in mt76_dma_kick_queue()
384 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
389 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
392 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
396 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
398 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
399 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
400 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
407 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
408 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
410 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
413 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
414 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
415 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
416 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
419 if (!q->queued) in mt76_dma_tx_cleanup()
424 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
427 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
428 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
432 if (mt76_queue_is_wed_rro_ind(q)) in mt76_dma_get_buf()
448 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_get_buf()
456 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
457 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
468 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
469 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
478 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
481 int idx = q->tail; in mt76_dma_dequeue()
484 if (!q->queued) in mt76_dma_dequeue()
487 if (mt76_queue_is_wed_rro_data(q)) in mt76_dma_dequeue()
490 if (!mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_dequeue()
492 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
493 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
497 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
498 q->queued--; in mt76_dma_dequeue()
500 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop); in mt76_dma_dequeue()
504 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
513 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
524 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
525 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
526 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
527 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
537 mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
591 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
604 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
636 mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
639 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill()
642 if (!q->ndesc) in mt76_dma_rx_fill()
645 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
647 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill()
654 if (mt76_queue_is_wed_rro_ind(q)) in mt76_dma_rx_fill()
657 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill()
662 dir = page_pool_get_dma_dir(q->page_pool); in mt76_dma_rx_fill()
665 qbuf.addr = addr + q->buf_offset; in mt76_dma_rx_fill()
667 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill()
669 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill()
676 if (frames || mt76_queue_is_wed_rx(q)) in mt76_dma_rx_fill()
677 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill()
679 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
684 int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) in mt76_dma_wed_setup() argument
690 if (!q || !q->ndesc) in mt76_dma_wed_setup()
693 flags = q->flags; in mt76_dma_wed_setup()
694 if (!q->wed || !mtk_wed_device_active(q->wed)) in mt76_dma_wed_setup()
695 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
697 if (!(q->flags & MT_QFLAG_WED)) in mt76_dma_wed_setup()
700 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags); in mt76_dma_wed_setup()
701 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags); in mt76_dma_wed_setup()
705 ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs, in mt76_dma_wed_setup()
708 q->wed_regs = q->wed->tx_ring[ring].reg_base; in mt76_dma_wed_setup()
712 q->flags = 0; in mt76_dma_wed_setup()
713 mt76_dma_queue_reset(dev, q); in mt76_dma_wed_setup()
714 mt76_dma_rx_fill(dev, q, false); in mt76_dma_wed_setup()
716 ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs); in mt76_dma_wed_setup()
718 q->wed_regs = q->wed->txfree_ring.reg_base; in mt76_dma_wed_setup()
721 ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs, in mt76_dma_wed_setup()
724 q->wed_regs = q->wed->rx_ring[ring].reg_base; in mt76_dma_wed_setup()
727 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
728 __mt76_dma_queue_reset(dev, q, false); in mt76_dma_wed_setup()
729 mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs); in mt76_dma_wed_setup()
730 q->head = q->ndesc - 1; in mt76_dma_wed_setup()
731 q->queued = q->head; in mt76_dma_wed_setup()
734 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
735 __mt76_dma_queue_reset(dev, q, false); in mt76_dma_wed_setup()
736 mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs); in mt76_dma_wed_setup()
737 q->head = q->ndesc - 1; in mt76_dma_wed_setup()
738 q->queued = q->head; in mt76_dma_wed_setup()
741 q->flags &= ~MT_QFLAG_WED; in mt76_dma_wed_setup()
742 mt76_dma_queue_reset(dev, q); in mt76_dma_wed_setup()
743 mt76_dma_rx_fill(dev, q, false); in mt76_dma_wed_setup()
744 mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs); in mt76_dma_wed_setup()
750 q->flags = flags; in mt76_dma_wed_setup()
760 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
766 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
767 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
769 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
770 q->ndesc = n_desc; in mt76_dma_alloc_queue()
771 q->buf_size = bufsize; in mt76_dma_alloc_queue()
772 q->hw_idx = idx; in mt76_dma_alloc_queue()
774 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) in mt76_dma_alloc_queue()
776 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, in mt76_dma_alloc_queue()
777 &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
778 if (!q->desc) in mt76_dma_alloc_queue()
781 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_alloc_queue()
785 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_alloc_queue()
786 for (i = 0; i < q->ndesc; i++) { in mt76_dma_alloc_queue()
794 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
795 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
796 if (!q->entry) in mt76_dma_alloc_queue()
799 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
803 ret = mt76_dma_wed_setup(dev, q, false); in mt76_dma_alloc_queue()
808 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || in mt76_dma_alloc_queue()
809 mt76_queue_is_wed_tx_free(q)) in mt76_dma_alloc_queue()
813 mt76_dma_queue_reset(dev, q); in mt76_dma_alloc_queue()
819 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
824 if (!q->ndesc) in mt76_dma_rx_cleanup()
828 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
829 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
830 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
835 if (!mt76_queue_is_wed_rro(q)) in mt76_dma_rx_cleanup()
839 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
840 if (q->rx_head) { in mt76_dma_rx_cleanup()
841 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
842 q->rx_head = NULL; in mt76_dma_rx_cleanup()
845 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
851 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
853 if (!q->ndesc) in mt76_dma_rx_reset()
856 if (!mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_rx_reset()
859 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
860 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
863 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
866 mt76_dma_wed_setup(dev, q, true); in mt76_dma_rx_reset()
868 if (mt76_queue_is_wed_tx_free(q)) in mt76_dma_rx_reset()
872 mt76_queue_is_wed_rro(q)) in mt76_dma_rx_reset()
875 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
876 mt76_dma_rx_fill(dev, q, false); in mt76_dma_rx_reset()
880 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
883 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
889 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
891 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
899 q->rx_head = NULL; in mt76_add_fragment()
901 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
907 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
913 bool allow_direct = !mt76_queue_is_wed_rx(q); in mt76_dma_rx_process()
917 mt76_queue_is_wed_tx_free(q)) { in mt76_dma_rx_process()
918 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
927 if (q->tail == dma_idx) in mt76_dma_rx_process()
928 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
930 if (q->tail == dma_idx) in mt76_dma_rx_process()
934 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
942 if (q->rx_head) in mt76_dma_rx_process()
943 data_len = q->buf_size; in mt76_dma_rx_process()
945 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
947 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
948 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
949 q->rx_head = NULL; in mt76_dma_rx_process()
953 if (q->rx_head) { in mt76_dma_rx_process()
954 mt76_add_fragment(dev, q, data, len, more, info, in mt76_dma_rx_process()
963 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
967 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
976 q->rx_head = skb; in mt76_dma_rx_process()
980 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
987 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
1093 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup() local
1096 mt76_queue_is_wed_rro(q)) in mt76_dma_cleanup()
1100 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
1102 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()