Lines Matching refs:q

189 mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)  in mt76_dma_queue_magic_cnt_init()  argument
191 if (!mt76_queue_is_wed_rro(q)) in mt76_dma_queue_magic_cnt_init()
194 q->magic_cnt = 0; in mt76_dma_queue_magic_cnt_init()
195 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_queue_magic_cnt_init()
201 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_queue_magic_cnt_init()
202 for (i = 0; i < q->ndesc; i++) { in mt76_dma_queue_magic_cnt_init()
208 } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_queue_magic_cnt_init()
209 struct mt76_rro_rxdmad_c *dmad = (void *)q->desc; in mt76_dma_queue_magic_cnt_init()
214 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_magic_cnt_init()
220 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_sync_idx() argument
222 Q_WRITE(q, desc_base, q->desc_dma); in mt76_dma_sync_idx()
223 if (q->flags & MT_QFLAG_WED_RRO_EN) in mt76_dma_sync_idx()
224 Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc); in mt76_dma_sync_idx()
226 Q_WRITE(q, ring_size, q->ndesc); in mt76_dma_sync_idx()
227 q->head = Q_READ(q, dma_idx); in mt76_dma_sync_idx()
228 q->tail = q->head; in mt76_dma_sync_idx()
231 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_queue_reset() argument
234 if (!q || !q->ndesc) in mt76_dma_queue_reset()
237 if (!mt76_queue_is_wed_rro_ind(q) && in mt76_dma_queue_reset()
238 !mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_queue_reset()
242 for (i = 0; i < q->ndesc; i++) in mt76_dma_queue_reset()
243 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_queue_reset()
246 mt76_dma_queue_magic_cnt_init(dev, q); in mt76_dma_queue_reset()
248 if (mt76_queue_is_emi(q)) in mt76_dma_queue_reset()
249 *q->emi_cpu_idx = 0; in mt76_dma_queue_reset()
251 Q_WRITE(q, cpu_idx, 0); in mt76_dma_queue_reset()
252 Q_WRITE(q, dma_idx, 0); in mt76_dma_queue_reset()
254 mt76_dma_sync_idx(dev, q); in mt76_dma_queue_reset()
258 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_rx_buf() argument
261 struct mt76_queue_entry *entry = &q->entry[q->head]; in mt76_dma_add_rx_buf()
265 int idx = q->head; in mt76_dma_add_rx_buf()
268 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_add_rx_buf()
271 rro_desc = (struct mt76_wed_rro_desc *)q->desc; in mt76_dma_add_rx_buf()
272 data = &rro_desc[q->head]; in mt76_dma_add_rx_buf()
274 } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_add_rx_buf()
275 data = &q->desc[q->head]; in mt76_dma_add_rx_buf()
279 desc = &q->desc[q->head]; in mt76_dma_add_rx_buf()
285 if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) { in mt76_dma_add_rx_buf()
299 txwi->qid = q - dev->q_rx; in mt76_dma_add_rx_buf()
302 if (mt76_queue_is_wed_rro_msdu_pg(q) && in mt76_dma_add_rx_buf()
304 if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data)) in mt76_dma_add_rx_buf()
308 if (q->flags & MT_QFLAG_WED_RRO_EN) { in mt76_dma_add_rx_buf()
309 info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt); in mt76_dma_add_rx_buf()
310 if ((q->head + 1) == q->ndesc) in mt76_dma_add_rx_buf()
311 q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT; in mt76_dma_add_rx_buf()
326 q->head = (q->head + 1) % q->ndesc; in mt76_dma_add_rx_buf()
327 q->queued++; in mt76_dma_add_rx_buf()
333 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_add_buf() argument
343 q->entry[q->head].txwi = DMA_DUMMY_DATA; in mt76_dma_add_buf()
344 q->entry[q->head].skip_buf0 = true; in mt76_dma_add_buf()
350 idx = q->head; in mt76_dma_add_buf()
351 next = (q->head + 1) % q->ndesc; in mt76_dma_add_buf()
353 desc = &q->desc[idx]; in mt76_dma_add_buf()
354 entry = &q->entry[idx]; in mt76_dma_add_buf()
390 q->head = next; in mt76_dma_add_buf()
391 q->queued++; in mt76_dma_add_buf()
394 q->entry[idx].txwi = txwi; in mt76_dma_add_buf()
395 q->entry[idx].skb = skb; in mt76_dma_add_buf()
396 q->entry[idx].wcid = 0xffff; in mt76_dma_add_buf()
402 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_tx_cleanup_idx() argument
405 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_tx_cleanup_idx()
423 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_kick_queue() argument
426 if (mt76_queue_is_emi(q)) in mt76_dma_kick_queue()
427 *q->emi_cpu_idx = cpu_to_le16(q->head); in mt76_dma_kick_queue()
429 Q_WRITE(q, cpu_idx, q->head); in mt76_dma_kick_queue()
433 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush) in mt76_dma_tx_cleanup() argument
438 if (!q || !q->ndesc) in mt76_dma_tx_cleanup()
441 spin_lock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
445 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
447 while (q->queued > 0 && q->tail != last) { in mt76_dma_tx_cleanup()
448 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); in mt76_dma_tx_cleanup()
449 mt76_queue_tx_complete(dev, q, &entry); in mt76_dma_tx_cleanup()
456 if (!flush && q->tail == last) in mt76_dma_tx_cleanup()
457 last = Q_READ(q, dma_idx); in mt76_dma_tx_cleanup()
459 spin_unlock_bh(&q->cleanup_lock); in mt76_dma_tx_cleanup()
462 spin_lock_bh(&q->lock); in mt76_dma_tx_cleanup()
463 mt76_dma_sync_idx(dev, q); in mt76_dma_tx_cleanup()
464 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_cleanup()
465 spin_unlock_bh(&q->lock); in mt76_dma_tx_cleanup()
468 if (!q->queued) in mt76_dma_tx_cleanup()
473 mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_get_rxdmad_c_buf() argument
476 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_rxdmad_c_buf()
490 q = &dev->q_rx[t->qid]; in mt76_dma_get_rxdmad_c_buf()
492 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_rxdmad_c_buf()
493 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_rxdmad_c_buf()
516 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, in mt76_dma_get_buf() argument
519 struct mt76_queue_entry *e = &q->entry[idx]; in mt76_dma_get_buf()
520 struct mt76_desc *desc = &q->desc[idx]; in mt76_dma_get_buf()
524 if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush) in mt76_dma_get_buf()
525 buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more); in mt76_dma_get_buf()
527 if (mt76_queue_is_wed_rro(q)) in mt76_dma_get_buf()
543 if (mt76_queue_is_wed_rx(q)) { in mt76_dma_get_buf()
551 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
552 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
563 SKB_WITH_OVERHEAD(q->buf_size), in mt76_dma_get_buf()
564 page_pool_get_dma_dir(q->page_pool)); in mt76_dma_get_buf()
573 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, in mt76_dma_dequeue() argument
576 int idx = q->tail; in mt76_dma_dequeue()
579 if (!q->queued) in mt76_dma_dequeue()
582 if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q)) in mt76_dma_dequeue()
585 if (mt76_queue_is_wed_rro_ind(q)) { in mt76_dma_dequeue()
592 cmd = q->entry[idx].buf; in mt76_dma_dequeue()
595 if (magic_cnt != q->magic_cnt) in mt76_dma_dequeue()
598 if (q->tail == q->ndesc - 1) in mt76_dma_dequeue()
599 q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT; in mt76_dma_dequeue()
600 } else if (mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_dequeue()
607 dmad = q->entry[idx].buf; in mt76_dma_dequeue()
610 if (magic_cnt != q->magic_cnt) in mt76_dma_dequeue()
613 if (q->tail == q->ndesc - 1) in mt76_dma_dequeue()
614 q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT; in mt76_dma_dequeue()
617 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_dequeue()
618 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) in mt76_dma_dequeue()
622 q->tail = (q->tail + 1) % q->ndesc; in mt76_dma_dequeue()
623 q->queued--; in mt76_dma_dequeue()
625 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush); in mt76_dma_dequeue()
629 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_tx_queue_skb_raw() argument
638 if (q->queued + 1 >= q->ndesc - 1) in mt76_dma_tx_queue_skb_raw()
649 spin_lock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
650 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); in mt76_dma_tx_queue_skb_raw()
651 mt76_dma_kick_queue(dev, q); in mt76_dma_tx_queue_skb_raw()
652 spin_unlock_bh(&q->lock); in mt76_dma_tx_queue_skb_raw()
662 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q, in mt76_dma_tx_queue_skb() argument
717 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { in mt76_dma_tx_queue_skb()
730 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, in mt76_dma_tx_queue_skb()
762 mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill_buf() argument
765 int len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_fill_buf()
768 if (!q->ndesc) in mt76_dma_rx_fill_buf()
771 while (q->queued < q->ndesc - 1) { in mt76_dma_rx_fill_buf()
776 if (mt76_queue_is_wed_rro_ind(q) || in mt76_dma_rx_fill_buf()
777 mt76_queue_is_wed_rro_rxdmad_c(q)) in mt76_dma_rx_fill_buf()
780 buf = mt76_get_page_pool_buf(q, &offset, q->buf_size); in mt76_dma_rx_fill_buf()
785 offset + q->buf_offset; in mt76_dma_rx_fill_buf()
787 qbuf.len = len - q->buf_offset; in mt76_dma_rx_fill_buf()
789 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) { in mt76_dma_rx_fill_buf()
796 if (frames || mt76_queue_is_wed_rx(q)) in mt76_dma_rx_fill_buf()
797 mt76_dma_kick_queue(dev, q); in mt76_dma_rx_fill_buf()
802 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_rx_fill() argument
807 spin_lock_bh(&q->lock); in mt76_dma_rx_fill()
808 frames = mt76_dma_rx_fill_buf(dev, q, allow_direct); in mt76_dma_rx_fill()
809 spin_unlock_bh(&q->lock); in mt76_dma_rx_fill()
815 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, in mt76_dma_alloc_queue() argument
821 spin_lock_init(&q->lock); in mt76_dma_alloc_queue()
822 spin_lock_init(&q->cleanup_lock); in mt76_dma_alloc_queue()
824 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; in mt76_dma_alloc_queue()
825 q->ndesc = n_desc; in mt76_dma_alloc_queue()
826 q->buf_size = bufsize; in mt76_dma_alloc_queue()
827 q->hw_idx = idx; in mt76_dma_alloc_queue()
829 size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc) in mt76_dma_alloc_queue()
831 q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size, in mt76_dma_alloc_queue()
832 &q->desc_dma, GFP_KERNEL); in mt76_dma_alloc_queue()
833 if (!q->desc) in mt76_dma_alloc_queue()
836 mt76_dma_queue_magic_cnt_init(dev, q); in mt76_dma_alloc_queue()
837 size = q->ndesc * sizeof(*q->entry); in mt76_dma_alloc_queue()
838 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); in mt76_dma_alloc_queue()
839 if (!q->entry) in mt76_dma_alloc_queue()
842 ret = mt76_create_page_pool(dev, q); in mt76_dma_alloc_queue()
846 ret = mt76_wed_dma_setup(dev, q, false); in mt76_dma_alloc_queue()
851 if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) || in mt76_dma_alloc_queue()
852 mt76_queue_is_wed_tx_free(q)) in mt76_dma_alloc_queue()
859 mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q)); in mt76_dma_alloc_queue()
865 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) in mt76_dma_rx_cleanup() argument
870 if (!q->ndesc) in mt76_dma_rx_cleanup()
874 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
875 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL); in mt76_dma_rx_cleanup()
876 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
881 if (!mt76_queue_is_wed_rro(q)) in mt76_dma_rx_cleanup()
885 spin_lock_bh(&q->lock); in mt76_dma_rx_cleanup()
886 if (q->rx_head) { in mt76_dma_rx_cleanup()
887 dev_kfree_skb(q->rx_head); in mt76_dma_rx_cleanup()
888 q->rx_head = NULL; in mt76_dma_rx_cleanup()
891 spin_unlock_bh(&q->lock); in mt76_dma_rx_cleanup()
897 struct mt76_queue *q = &dev->q_rx[qid]; in mt76_dma_rx_reset() local
899 if (!q->ndesc) in mt76_dma_rx_reset()
902 if (!mt76_queue_is_wed_rro_ind(q) && in mt76_dma_rx_reset()
903 !mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_rx_reset()
906 for (i = 0; i < q->ndesc; i++) in mt76_dma_rx_reset()
907 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); in mt76_dma_rx_reset()
910 mt76_dma_rx_cleanup(dev, q); in mt76_dma_rx_reset()
913 mt76_wed_dma_setup(dev, q, true); in mt76_dma_rx_reset()
915 if (mt76_queue_is_wed_tx_free(q)) in mt76_dma_rx_reset()
919 mt76_queue_is_wed_rro(q)) in mt76_dma_rx_reset()
922 mt76_dma_sync_idx(dev, q); in mt76_dma_rx_reset()
923 mt76_dma_rx_fill_buf(dev, q, false); in mt76_dma_rx_reset()
927 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, in mt76_add_fragment() argument
930 struct sk_buff *skb = q->rx_head; in mt76_add_fragment()
936 int offset = data - page_address(page) + q->buf_offset; in mt76_add_fragment()
938 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size); in mt76_add_fragment()
946 q->rx_head = NULL; in mt76_add_fragment()
948 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_add_fragment()
954 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) in mt76_dma_rx_process() argument
960 bool allow_direct = !mt76_queue_is_wed_rx(q); in mt76_dma_rx_process()
963 if ((q->flags & MT_QFLAG_WED_RRO_EN) || in mt76_dma_rx_process()
965 mt76_queue_is_wed_tx_free(q))) { in mt76_dma_rx_process()
966 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
975 if (q->tail == dma_idx) in mt76_dma_rx_process()
976 dma_idx = Q_READ(q, dma_idx); in mt76_dma_rx_process()
978 if (q->tail == dma_idx) in mt76_dma_rx_process()
982 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more, in mt76_dma_rx_process()
992 if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process) in mt76_dma_rx_process()
995 if (mt76_queue_is_wed_rro(q) && in mt76_dma_rx_process()
996 !mt76_queue_is_wed_rro_rxdmad_c(q)) { in mt76_dma_rx_process()
1004 if (q->rx_head) in mt76_dma_rx_process()
1005 data_len = q->buf_size; in mt76_dma_rx_process()
1007 data_len = SKB_WITH_OVERHEAD(q->buf_size); in mt76_dma_rx_process()
1009 if (data_len < len + q->buf_offset) { in mt76_dma_rx_process()
1010 dev_kfree_skb(q->rx_head); in mt76_dma_rx_process()
1011 q->rx_head = NULL; in mt76_dma_rx_process()
1015 if (q->rx_head) { in mt76_dma_rx_process()
1016 mt76_add_fragment(dev, q, data, len, more, info, in mt76_dma_rx_process()
1025 skb = napi_build_skb(data, q->buf_size); in mt76_dma_rx_process()
1029 skb_reserve(skb, q->buf_offset); in mt76_dma_rx_process()
1038 q->rx_head = skb; in mt76_dma_rx_process()
1042 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info); in mt76_dma_rx_process()
1049 mt76_dma_rx_fill(dev, q, true); in mt76_dma_rx_process()
1170 struct mt76_queue *q = &dev->q_rx[i]; in mt76_dma_cleanup() local
1173 mt76_queue_is_wed_rro(q)) in mt76_dma_cleanup()
1177 mt76_dma_rx_cleanup(dev, q); in mt76_dma_cleanup()
1179 page_pool_destroy(q->page_pool); in mt76_dma_cleanup()