xref: /src/sys/contrib/dev/mediatek/mt76/dma.c (revision b1bebaaba9b9c0ddfe503c43ca8e9e3917ee2c57)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include <linux/dma-mapping.h>
7 #if defined(__FreeBSD__)
8 #include <linux/cache.h>
9 #include <net/page_pool/helpers.h>
10 #endif
11 #include "mt76.h"
12 #include "dma.h"
13 
14 static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev * dev)15 mt76_alloc_txwi(struct mt76_dev *dev)
16 {
17 	struct mt76_txwi_cache *t;
18 	dma_addr_t addr;
19 	u8 *txwi;
20 	int size;
21 
22 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
23 	txwi = kzalloc(size, GFP_ATOMIC);
24 	if (!txwi)
25 		return NULL;
26 
27 	addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
28 			      DMA_TO_DEVICE);
29 	if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
30 		kfree(txwi);
31 		return NULL;
32 	}
33 
34 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
35 	t->dma_addr = addr;
36 
37 	return t;
38 }
39 
40 static struct mt76_txwi_cache *
mt76_alloc_rxwi(struct mt76_dev * dev)41 mt76_alloc_rxwi(struct mt76_dev *dev)
42 {
43 	struct mt76_txwi_cache *t;
44 
45 	t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
46 	if (!t)
47 		return NULL;
48 
49 	t->ptr = NULL;
50 	return t;
51 }
52 
53 static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev * dev)54 __mt76_get_txwi(struct mt76_dev *dev)
55 {
56 	struct mt76_txwi_cache *t = NULL;
57 
58 	spin_lock(&dev->lock);
59 	if (!list_empty(&dev->txwi_cache)) {
60 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
61 				     list);
62 		list_del(&t->list);
63 	}
64 	spin_unlock(&dev->lock);
65 
66 	return t;
67 }
68 
69 static struct mt76_txwi_cache *
__mt76_get_rxwi(struct mt76_dev * dev)70 __mt76_get_rxwi(struct mt76_dev *dev)
71 {
72 	struct mt76_txwi_cache *t = NULL;
73 
74 	spin_lock_bh(&dev->wed_lock);
75 	if (!list_empty(&dev->rxwi_cache)) {
76 		t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
77 				     list);
78 		list_del(&t->list);
79 	}
80 	spin_unlock_bh(&dev->wed_lock);
81 
82 	return t;
83 }
84 
85 static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev * dev)86 mt76_get_txwi(struct mt76_dev *dev)
87 {
88 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
89 
90 	if (t)
91 		return t;
92 
93 	return mt76_alloc_txwi(dev);
94 }
95 
96 struct mt76_txwi_cache *
mt76_get_rxwi(struct mt76_dev * dev)97 mt76_get_rxwi(struct mt76_dev *dev)
98 {
99 	struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
100 
101 	if (t)
102 		return t;
103 
104 	return mt76_alloc_rxwi(dev);
105 }
106 EXPORT_SYMBOL_GPL(mt76_get_rxwi);
107 
108 void
mt76_put_txwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)109 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
110 {
111 	if (!t)
112 		return;
113 
114 	spin_lock(&dev->lock);
115 	list_add(&t->list, &dev->txwi_cache);
116 	spin_unlock(&dev->lock);
117 }
118 EXPORT_SYMBOL_GPL(mt76_put_txwi);
119 
120 void
mt76_put_rxwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)121 mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
122 {
123 	if (!t)
124 		return;
125 
126 	spin_lock_bh(&dev->wed_lock);
127 	list_add(&t->list, &dev->rxwi_cache);
128 	spin_unlock_bh(&dev->wed_lock);
129 }
130 EXPORT_SYMBOL_GPL(mt76_put_rxwi);
131 
132 static void
mt76_free_pending_txwi(struct mt76_dev * dev)133 mt76_free_pending_txwi(struct mt76_dev *dev)
134 {
135 	struct mt76_txwi_cache *t;
136 
137 	local_bh_disable();
138 	while ((t = __mt76_get_txwi(dev)) != NULL) {
139 		dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
140 				 DMA_TO_DEVICE);
141 		kfree(mt76_get_txwi_ptr(dev, t));
142 	}
143 	local_bh_enable();
144 }
145 
146 void
mt76_free_pending_rxwi(struct mt76_dev * dev)147 mt76_free_pending_rxwi(struct mt76_dev *dev)
148 {
149 	struct mt76_txwi_cache *t;
150 
151 	local_bh_disable();
152 	while ((t = __mt76_get_rxwi(dev)) != NULL) {
153 		if (t->ptr)
154 			mt76_put_page_pool_buf(t->ptr, false);
155 		kfree(t);
156 	}
157 	local_bh_enable();
158 }
159 EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
160 
161 static void
mt76_dma_queue_magic_cnt_init(struct mt76_dev * dev,struct mt76_queue * q)162 mt76_dma_queue_magic_cnt_init(struct mt76_dev *dev, struct mt76_queue *q)
163 {
164 	if (!mt76_queue_is_wed_rro(q))
165 		return;
166 
167 	q->magic_cnt = 0;
168 	if (mt76_queue_is_wed_rro_ind(q)) {
169 		struct mt76_wed_rro_desc *rro_desc;
170 		u32 data1 = FIELD_PREP(RRO_IND_DATA1_MAGIC_CNT_MASK,
171 				       MT_DMA_WED_IND_CMD_CNT - 1);
172 		int i;
173 
174 		rro_desc = (struct mt76_wed_rro_desc *)q->desc;
175 		for (i = 0; i < q->ndesc; i++) {
176 			struct mt76_wed_rro_ind *cmd;
177 
178 			cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
179 			cmd->data1 = cpu_to_le32(data1);
180 		}
181 	} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
182 		struct mt76_rro_rxdmad_c *dmad = (void *)q->desc;
183 		u32 data3 = FIELD_PREP(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
184 				       MT_DMA_MAGIC_CNT - 1);
185 		int i;
186 
187 		for (i = 0; i < q->ndesc; i++)
188 			dmad[i].data3 = cpu_to_le32(data3);
189 	}
190 }
191 
192 static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)193 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
194 {
195 	Q_WRITE(q, desc_base, q->desc_dma);
196 	if ((q->flags & MT_QFLAG_WED_RRO_EN) && !mt76_npu_device_active(dev))
197 		Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
198 	else
199 		Q_WRITE(q, ring_size, q->ndesc);
200 
201 	if (mt76_queue_is_npu_tx(q)) {
202 		writel(q->desc_dma, &q->regs->desc_base);
203 		writel(q->ndesc, &q->regs->ring_size);
204 	}
205 	q->head = Q_READ(q, dma_idx);
206 	q->tail = q->head;
207 }
208 
mt76_dma_queue_reset(struct mt76_dev * dev,struct mt76_queue * q,bool reset_idx)209 void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
210 			  bool reset_idx)
211 {
212 	if (!q || !q->ndesc)
213 		return;
214 
215 	if (!mt76_queue_is_wed_rro_ind(q) &&
216 	    !mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
217 		int i;
218 
219 		/* clear descriptors */
220 		for (i = 0; i < q->ndesc; i++)
221 			q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
222 	}
223 
224 	mt76_dma_queue_magic_cnt_init(dev, q);
225 	if (reset_idx) {
226 		if (mt76_queue_is_emi(q))
227 			*q->emi_cpu_idx = 0;
228 		else
229 			Q_WRITE(q, cpu_idx, 0);
230 		Q_WRITE(q, dma_idx, 0);
231 	}
232 	mt76_dma_sync_idx(dev, q);
233 }
234 
235 static int
mt76_dma_add_rx_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,void * data)236 mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
237 		    struct mt76_queue_buf *buf, void *data)
238 {
239 	struct mt76_queue_entry *entry = &q->entry[q->head];
240 	struct mt76_txwi_cache *txwi = NULL;
241 	u32 buf1 = 0, ctrl, info = 0;
242 	struct mt76_desc *desc;
243 	int idx = q->head;
244 	int rx_token;
245 
246 	if (mt76_queue_is_wed_rro_ind(q)) {
247 		struct mt76_wed_rro_desc *rro_desc;
248 
249 		rro_desc = (struct mt76_wed_rro_desc *)q->desc;
250 		data = &rro_desc[q->head];
251 		goto done;
252 	} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
253 		data = &q->desc[q->head];
254 		goto done;
255 	}
256 
257 	desc = &q->desc[q->head];
258 	ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
259 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
260 	buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
261 #endif
262 
263 	if (mt76_queue_is_wed_rx(q) || mt76_queue_is_wed_rro_data(q)) {
264 		txwi = mt76_get_rxwi(dev);
265 		if (!txwi)
266 			return -ENOMEM;
267 
268 		rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
269 		if (rx_token < 0) {
270 			mt76_put_rxwi(dev, txwi);
271 			return -ENOMEM;
272 		}
273 
274 		buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
275 		ctrl |= MT_DMA_CTL_TO_HOST;
276 
277 		txwi->qid = q - dev->q_rx;
278 	}
279 
280 	if (mt76_queue_is_wed_rro_msdu_pg(q) &&
281 	    dev->drv->rx_rro_add_msdu_page) {
282 		if (dev->drv->rx_rro_add_msdu_page(dev, q, buf->addr, data))
283 			return -ENOMEM;
284 	}
285 
286 	if (q->flags & MT_QFLAG_WED_RRO_EN) {
287 		info |= FIELD_PREP(MT_DMA_MAGIC_MASK, q->magic_cnt);
288 		if ((q->head + 1) == q->ndesc)
289 			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
290 	}
291 
292 	WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
293 	WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
294 	WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
295 	WRITE_ONCE(desc->info, cpu_to_le32(info));
296 
297 done:
298 	entry->dma_addr[0] = buf->addr;
299 	entry->dma_len[0] = buf->len;
300 	entry->txwi = txwi;
301 	entry->buf = data;
302 	entry->wcid = 0xffff;
303 	entry->skip_buf1 = true;
304 	q->head = (q->head + 1) % q->ndesc;
305 	q->queued++;
306 
307 	return idx;
308 }
309 
310 static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)311 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
312 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
313 		 struct sk_buff *skb, void *txwi)
314 {
315 	struct mt76_queue_entry *entry;
316 	struct mt76_desc *desc;
317 	int i, idx = -1;
318 	u32 ctrl, next;
319 
320 	if (txwi) {
321 		q->entry[q->head].txwi = DMA_DUMMY_DATA;
322 		q->entry[q->head].skip_buf0 = true;
323 	}
324 
325 	for (i = 0; i < nbufs; i += 2, buf += 2) {
326 		u32 buf0 = buf[0].addr, buf1 = 0;
327 
328 		idx = q->head;
329 		next = (q->head + 1) % q->ndesc;
330 
331 		desc = &q->desc[idx];
332 		entry = &q->entry[idx];
333 
334 		if (buf[0].skip_unmap)
335 			entry->skip_buf0 = true;
336 		entry->skip_buf1 = i == nbufs - 1;
337 
338 		entry->dma_addr[0] = buf[0].addr;
339 		entry->dma_len[0] = buf[0].len;
340 
341 		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
342 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
343 		info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
344 #endif
345 		if (i < nbufs - 1) {
346 			entry->dma_addr[1] = buf[1].addr;
347 			entry->dma_len[1] = buf[1].len;
348 			buf1 = buf[1].addr;
349 			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
350 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
351 			info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
352 					   buf[1].addr >> 32);
353 #endif
354 			if (buf[1].skip_unmap)
355 				entry->skip_buf1 = true;
356 		}
357 
358 		if (i == nbufs - 1)
359 			ctrl |= MT_DMA_CTL_LAST_SEC0;
360 		else if (i == nbufs - 2)
361 			ctrl |= MT_DMA_CTL_LAST_SEC1;
362 
363 		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
364 		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
365 		WRITE_ONCE(desc->info, cpu_to_le32(info));
366 		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
367 
368 		q->head = next;
369 		q->queued++;
370 	}
371 
372 	q->entry[idx].txwi = txwi;
373 	q->entry[idx].skb = skb;
374 	q->entry[idx].wcid = 0xffff;
375 
376 	return idx;
377 }
378 
379 static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)380 mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
381 			struct mt76_queue_entry *prev_e)
382 {
383 	struct mt76_queue_entry *e = &q->entry[idx];
384 
385 	if (!e->skip_buf0)
386 		dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
387 				 DMA_TO_DEVICE);
388 
389 	if (!e->skip_buf1)
390 		dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
391 				 DMA_TO_DEVICE);
392 
393 	if (e->txwi == DMA_DUMMY_DATA)
394 		e->txwi = NULL;
395 
396 	*prev_e = *e;
397 	memset(e, 0, sizeof(*e));
398 }
399 
400 static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)401 mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
402 {
403 	wmb();
404 	if (mt76_queue_is_emi(q))
405 		*q->emi_cpu_idx = cpu_to_le16(q->head);
406 	else
407 		Q_WRITE(q, cpu_idx, q->head);
408 }
409 
410 static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,struct mt76_queue * q,bool flush)411 mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
412 {
413 	struct mt76_queue_entry entry;
414 	int last;
415 
416 	if (!q || !q->ndesc)
417 		return;
418 
419 	spin_lock_bh(&q->cleanup_lock);
420 	if (flush)
421 		last = -1;
422 	else
423 		last = Q_READ(q, dma_idx);
424 
425 	while (q->queued > 0 && q->tail != last) {
426 		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
427 		mt76_npu_txdesc_cleanup(q, q->tail);
428 		mt76_queue_tx_complete(dev, q, &entry);
429 
430 		if (entry.txwi) {
431 			if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
432 				mt76_put_txwi(dev, entry.txwi);
433 		}
434 
435 		if (!flush && q->tail == last)
436 			last = Q_READ(q, dma_idx);
437 	}
438 	spin_unlock_bh(&q->cleanup_lock);
439 
440 	if (flush) {
441 		spin_lock_bh(&q->lock);
442 		mt76_dma_sync_idx(dev, q);
443 		mt76_dma_kick_queue(dev, q);
444 		spin_unlock_bh(&q->lock);
445 	}
446 
447 	if (!q->queued)
448 		wake_up(&dev->tx_wait);
449 }
450 
451 static void *
mt76_dma_get_rxdmad_c_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,bool * more)452 mt76_dma_get_rxdmad_c_buf(struct mt76_dev *dev, struct mt76_queue *q,
453 			  int idx, int *len, bool *more)
454 {
455 	struct mt76_queue_entry *e = &q->entry[idx];
456 	struct mt76_rro_rxdmad_c *dmad = e->buf;
457 	u32 data1 = le32_to_cpu(dmad->data1);
458 	u32 data2 = le32_to_cpu(dmad->data2);
459 	struct mt76_txwi_cache *t;
460 	u16 rx_token_id;
461 	u8 ind_reason;
462 	void *buf;
463 
464 	rx_token_id = FIELD_GET(RRO_RXDMAD_DATA2_RX_TOKEN_ID_MASK, data2);
465 	t = mt76_rx_token_release(dev, rx_token_id);
466 	if (!t)
467 		return ERR_PTR(-EAGAIN);
468 
469 	q = &dev->q_rx[t->qid];
470 	dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
471 				SKB_WITH_OVERHEAD(q->buf_size),
472 				page_pool_get_dma_dir(q->page_pool));
473 
474 	if (len)
475 		*len = FIELD_GET(RRO_RXDMAD_DATA1_SDL0_MASK, data1);
476 	if (more)
477 		*more = !FIELD_GET(RRO_RXDMAD_DATA1_LS_MASK, data1);
478 
479 	buf = t->ptr;
480 	ind_reason = FIELD_GET(RRO_RXDMAD_DATA2_IND_REASON_MASK, data2);
481 	if (ind_reason == MT_DMA_WED_IND_REASON_REPEAT ||
482 	    ind_reason == MT_DMA_WED_IND_REASON_OLDPKT) {
483 		mt76_put_page_pool_buf(buf, false);
484 		buf = ERR_PTR(-EAGAIN);
485 	}
486 	t->ptr = NULL;
487 	t->dma_addr = 0;
488 
489 	mt76_put_rxwi(dev, t);
490 
491 	return buf;
492 }
493 
494 static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more,bool * drop,bool flush)495 mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
496 		 int *len, u32 *info, bool *more, bool *drop, bool flush)
497 {
498 	struct mt76_queue_entry *e = &q->entry[idx];
499 	struct mt76_desc *desc = &q->desc[idx];
500 	u32 ctrl, desc_info, buf1;
501 	void *buf = e->buf;
502 
503 	if (mt76_queue_is_wed_rro_rxdmad_c(q) && !flush)
504 		buf = mt76_dma_get_rxdmad_c_buf(dev, q, idx, len, more);
505 
506 	if (mt76_queue_is_wed_rro(q))
507 		goto done;
508 
509 	ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
510 	if (len) {
511 		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
512 		*more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
513 	}
514 
515 	desc_info = le32_to_cpu(desc->info);
516 	if (info)
517 		*info = desc_info;
518 
519 	buf1 = le32_to_cpu(desc->buf1);
520 	mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
521 
522 	if (mt76_queue_is_wed_rx(q)) {
523 		u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
524 		struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
525 
526 		if (!t)
527 			return NULL;
528 
529 		dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
530 				SKB_WITH_OVERHEAD(q->buf_size),
531 				page_pool_get_dma_dir(q->page_pool));
532 
533 		buf = t->ptr;
534 		t->dma_addr = 0;
535 		t->ptr = NULL;
536 
537 		mt76_put_rxwi(dev, t);
538 		if (drop)
539 			*drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
540 	} else {
541 		dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
542 				SKB_WITH_OVERHEAD(q->buf_size),
543 				page_pool_get_dma_dir(q->page_pool));
544 	}
545 
546 done:
547 	e->buf = NULL;
548 	return buf;
549 }
550 
551 static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more,bool * drop)552 mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
553 		 int *len, u32 *info, bool *more, bool *drop)
554 {
555 	int idx = q->tail;
556 
557 	*more = false;
558 	if (!q->queued)
559 		return NULL;
560 
561 	if (mt76_queue_is_wed_rro_data(q) || mt76_queue_is_wed_rro_msdu_pg(q))
562 		goto done;
563 
564 	if (mt76_queue_is_wed_rro_ind(q)) {
565 		struct mt76_wed_rro_ind *cmd;
566 		u8 magic_cnt;
567 
568 		if (flush)
569 			goto done;
570 
571 		cmd = q->entry[idx].buf;
572 		magic_cnt = FIELD_GET(RRO_IND_DATA1_MAGIC_CNT_MASK,
573 				      le32_to_cpu(cmd->data1));
574 		if (magic_cnt != q->magic_cnt)
575 			return NULL;
576 
577 		if (q->tail == q->ndesc - 1)
578 			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_WED_IND_CMD_CNT;
579 	} else if (mt76_queue_is_wed_rro_rxdmad_c(q)) {
580 		struct mt76_rro_rxdmad_c *dmad;
581 		u16 magic_cnt;
582 
583 		if (flush)
584 			goto done;
585 
586 		dmad = q->entry[idx].buf;
587 		magic_cnt = FIELD_GET(RRO_RXDMAD_DATA3_MAGIC_CNT_MASK,
588 				      le32_to_cpu(dmad->data3));
589 		if (magic_cnt != q->magic_cnt)
590 			return NULL;
591 
592 		if (q->tail == q->ndesc - 1)
593 			q->magic_cnt = (q->magic_cnt + 1) % MT_DMA_MAGIC_CNT;
594 	} else {
595 		if (flush)
596 			q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
597 		else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
598 			return NULL;
599 	}
600 done:
601 	q->tail = (q->tail + 1) % q->ndesc;
602 	q->queued--;
603 
604 	return mt76_dma_get_buf(dev, q, idx, len, info, more, drop, flush);
605 }
606 
607 static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,struct mt76_queue * q,struct sk_buff * skb,u32 tx_info)608 mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
609 			  struct sk_buff *skb, u32 tx_info)
610 {
611 	struct mt76_queue_buf buf = {};
612 	dma_addr_t addr;
613 
614 	if (test_bit(MT76_MCU_RESET, &dev->phy.state))
615 		goto error;
616 
617 	if (q->queued + 1 >= q->ndesc - 1)
618 		goto error;
619 
620 	addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
621 			      DMA_TO_DEVICE);
622 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
623 		goto error;
624 
625 	buf.addr = addr;
626 	buf.len = skb->len;
627 
628 	spin_lock_bh(&q->lock);
629 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
630 	mt76_dma_kick_queue(dev, q);
631 	spin_unlock_bh(&q->lock);
632 
633 	return 0;
634 
635 error:
636 	dev_kfree_skb(skb);
637 	return -ENOMEM;
638 }
639 
640 static int
mt76_dma_tx_queue_skb(struct mt76_phy * phy,struct mt76_queue * q,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)641 mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
642 		      enum mt76_txq_id qid, struct sk_buff *skb,
643 		      struct mt76_wcid *wcid, struct ieee80211_sta *sta)
644 {
645 	struct ieee80211_tx_status status = {
646 		.sta = sta,
647 	};
648 	struct mt76_tx_info tx_info = {
649 		.skb = skb,
650 	};
651 	struct mt76_dev *dev = phy->dev;
652 	struct ieee80211_hw *hw;
653 	int len, n = 0, ret = -ENOMEM;
654 	struct mt76_txwi_cache *t;
655 	struct sk_buff *iter;
656 	dma_addr_t addr;
657 	u8 *txwi;
658 
659 	if (test_bit(MT76_RESET, &phy->state))
660 		goto free_skb;
661 
662 	/* TODO: Take into account unlinear skbs */
663 	if (mt76_npu_device_active(dev) && skb_linearize(skb))
664 		goto free_skb;
665 
666 	t = mt76_get_txwi(dev);
667 	if (!t)
668 		goto free_skb;
669 
670 	txwi = mt76_get_txwi_ptr(dev, t);
671 
672 	skb->prev = skb->next = NULL;
673 	if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
674 		mt76_insert_hdr_pad(skb);
675 
676 	len = skb_headlen(skb);
677 	addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
678 	if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
679 		goto free;
680 
681 	tx_info.buf[n].addr = t->dma_addr;
682 	tx_info.buf[n++].len = dev->drv->txwi_size;
683 	tx_info.buf[n].addr = addr;
684 	tx_info.buf[n++].len = len;
685 
686 	skb_walk_frags(skb, iter) {
687 		if (n == ARRAY_SIZE(tx_info.buf))
688 			goto unmap;
689 
690 		addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
691 				      DMA_TO_DEVICE);
692 		if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
693 			goto unmap;
694 
695 		tx_info.buf[n].addr = addr;
696 		tx_info.buf[n++].len = iter->len;
697 	}
698 	tx_info.nbuf = n;
699 
700 	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
701 		ret = -ENOMEM;
702 		goto unmap;
703 	}
704 
705 	dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
706 				DMA_TO_DEVICE);
707 	ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
708 	dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
709 				   DMA_TO_DEVICE);
710 	if (ret < 0)
711 		goto unmap;
712 
713 	if (mt76_npu_device_active(dev))
714 		return mt76_npu_dma_add_buf(phy, q, skb, &tx_info.buf[1], txwi);
715 
716 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
717 				tx_info.info, tx_info.skb, t);
718 
719 unmap:
720 	for (n--; n > 0; n--)
721 		dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
722 				 tx_info.buf[n].len, DMA_TO_DEVICE);
723 
724 free:
725 #ifdef CONFIG_NL80211_TESTMODE
726 	/* fix tx_done accounting on queue overflow */
727 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
728 		struct mt76_phy *phy = hw->priv;
729 
730 		if (tx_info.skb == phy->test.tx_skb)
731 			phy->test.tx_done--;
732 	}
733 #endif
734 
735 	mt76_put_txwi(dev, t);
736 
737 free_skb:
738 	status.skb = tx_info.skb;
739 	hw = mt76_tx_status_get_hw(dev, tx_info.skb);
740 	spin_lock_bh(&dev->rx_lock);
741 	ieee80211_tx_status_ext(hw, &status);
742 	spin_unlock_bh(&dev->rx_lock);
743 
744 	return ret;
745 }
746 
747 static int
mt76_dma_rx_fill_buf(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct)748 mt76_dma_rx_fill_buf(struct mt76_dev *dev, struct mt76_queue *q,
749 		     bool allow_direct)
750 {
751 	int len = SKB_WITH_OVERHEAD(q->buf_size);
752 	int frames = 0;
753 
754 	if (!q->ndesc)
755 		return 0;
756 
757 	while (q->queued < q->ndesc - 1) {
758 		struct mt76_queue_buf qbuf = {};
759 		void *buf = NULL;
760 		int offset;
761 
762 		if (mt76_queue_is_wed_rro_ind(q) ||
763 		    mt76_queue_is_wed_rro_rxdmad_c(q))
764 			goto done;
765 
766 		buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
767 		if (!buf)
768 			break;
769 
770 		qbuf.addr = page_pool_get_dma_addr(virt_to_head_page(buf)) +
771 			    offset + q->buf_offset;
772 done:
773 		qbuf.len = len - q->buf_offset;
774 		qbuf.skip_unmap = false;
775 		if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
776 			mt76_put_page_pool_buf(buf, allow_direct);
777 			break;
778 		}
779 		frames++;
780 	}
781 
782 	if (frames || mt76_queue_is_wed_rx(q))
783 		mt76_dma_kick_queue(dev, q);
784 
785 	return frames;
786 }
787 
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q,bool allow_direct)788 int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
789 		     bool allow_direct)
790 {
791 	int frames;
792 
793 	spin_lock_bh(&q->lock);
794 	frames = mt76_dma_rx_fill_buf(dev, q, allow_direct);
795 	spin_unlock_bh(&q->lock);
796 
797 	return frames;
798 }
799 
800 static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)801 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
802 		     int idx, int n_desc, int bufsize,
803 		     u32 ring_base)
804 {
805 	int ret, size;
806 
807 	spin_lock_init(&q->lock);
808 	spin_lock_init(&q->cleanup_lock);
809 
810 #if defined(__linux__)
811 	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
812 #elif defined(__FreeBSD__)
813 	q->regs = (void *)((u8 *)dev->mmio.regs + ring_base + idx * MT_RING_SIZE);
814 #endif
815 	q->ndesc = n_desc;
816 	q->buf_size = bufsize;
817 	q->hw_idx = idx;
818 	q->dev = dev;
819 
820 	if (mt76_queue_is_wed_rro_ind(q))
821 		size = sizeof(struct mt76_wed_rro_desc);
822 	else if (mt76_queue_is_npu_tx(q))
823 		size = sizeof(struct airoha_npu_tx_dma_desc);
824 	else if (mt76_queue_is_npu_rx(q))
825 		size = sizeof(struct airoha_npu_rx_dma_desc);
826 	else
827 		size = sizeof(struct mt76_desc);
828 
829 	q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
830 				      &q->desc_dma, GFP_KERNEL);
831 	if (!q->desc)
832 		return -ENOMEM;
833 
834 	mt76_dma_queue_magic_cnt_init(dev, q);
835 	size = q->ndesc * sizeof(*q->entry);
836 	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
837 	if (!q->entry)
838 		return -ENOMEM;
839 
840 	ret = mt76_create_page_pool(dev, q);
841 	if (ret)
842 		return ret;
843 
844 	mt76_npu_queue_setup(dev, q);
845 	ret = mt76_wed_dma_setup(dev, q, false);
846 	if (ret)
847 		return ret;
848 
849 	if (mtk_wed_device_active(&dev->mmio.wed)) {
850 		if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
851 		    mt76_queue_is_wed_tx_free(q))
852 			return 0;
853 	}
854 
855 	/* HW specific driver is supposed to reset brand-new EMI queues since
856 	 * it needs to set cpu index pointer.
857 	 */
858 	mt76_dma_queue_reset(dev, q, !mt76_queue_is_emi(q));
859 
860 	return 0;
861 }
862 
863 static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)864 mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
865 {
866 	void *buf;
867 	bool more;
868 
869 	if (!q->ndesc)
870 		return;
871 
872 	if (mt76_queue_is_npu(q)) {
873 		mt76_npu_queue_cleanup(dev, q);
874 		return;
875 	}
876 
877 	do {
878 		spin_lock_bh(&q->lock);
879 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
880 		spin_unlock_bh(&q->lock);
881 
882 		if (!buf)
883 			break;
884 
885 		if (!mt76_queue_is_wed_rro(q))
886 			mt76_put_page_pool_buf(buf, false);
887 	} while (1);
888 
889 	spin_lock_bh(&q->lock);
890 	if (q->rx_head) {
891 		dev_kfree_skb(q->rx_head);
892 		q->rx_head = NULL;
893 	}
894 
895 	spin_unlock_bh(&q->lock);
896 }
897 
898 static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)899 mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
900 {
901 	struct mt76_queue *q = &dev->q_rx[qid];
902 
903 	if (!q->ndesc)
904 		return;
905 
906 	if (!mt76_queue_is_wed_rro_ind(q) &&
907 	    !mt76_queue_is_wed_rro_rxdmad_c(q) && !mt76_queue_is_npu(q)) {
908 		int i;
909 
910 		for (i = 0; i < q->ndesc; i++)
911 			q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
912 	}
913 
914 	mt76_dma_rx_cleanup(dev, q);
915 
916 	/* reset WED rx queues */
917 	mt76_wed_dma_setup(dev, q, true);
918 
919 	if (mt76_queue_is_wed_tx_free(q))
920 		return;
921 
922 	if (mtk_wed_device_active(&dev->mmio.wed) &&
923 	    mt76_queue_is_wed_rro(q))
924 		return;
925 
926 	mt76_dma_sync_idx(dev, q);
927 	if (mt76_queue_is_npu(q))
928 		mt76_npu_fill_rx_queue(dev, q);
929 	else
930 		mt76_dma_rx_fill(dev, q, false);
931 }
932 
933 static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more,u32 info,bool allow_direct)934 mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
935 		  int len, bool more, u32 info, bool allow_direct)
936 {
937 	struct sk_buff *skb = q->rx_head;
938 	struct skb_shared_info *shinfo = skb_shinfo(skb);
939 	int nr_frags = shinfo->nr_frags;
940 
941 	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
942 		struct page *page = virt_to_head_page(data);
943 #if defined(__linux__)
944 		int offset = data - page_address(page) + q->buf_offset;
945 #elif defined(__FreeBSD__)
946 		int offset = (u8 *)data - (u8 *)page_address(page) + q->buf_offset;
947 #endif
948 
949 		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
950 	} else {
951 		mt76_put_page_pool_buf(data, allow_direct);
952 	}
953 
954 	if (more)
955 		return;
956 
957 	q->rx_head = NULL;
958 	if (nr_frags < ARRAY_SIZE(shinfo->frags))
959 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
960 	else
961 		dev_kfree_skb(skb);
962 }
963 
964 static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)965 mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
966 {
967 	int len, data_len, done = 0, dma_idx;
968 	struct sk_buff *skb;
969 	unsigned char *data;
970 	bool check_ddone = false;
971 	bool allow_direct = !mt76_queue_is_wed_rx(q);
972 	bool more;
973 
974 	if ((q->flags & MT_QFLAG_WED_RRO_EN) ||
975 	    (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
976 	     mt76_queue_is_wed_tx_free(q))) {
977 		dma_idx = Q_READ(q, dma_idx);
978 		check_ddone = true;
979 	}
980 
981 	while (done < budget) {
982 		bool drop = false;
983 		u32 info;
984 
985 		if (check_ddone) {
986 			if (q->tail == dma_idx)
987 				dma_idx = Q_READ(q, dma_idx);
988 
989 			if (q->tail == dma_idx)
990 				break;
991 		}
992 
993 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
994 					&drop);
995 		if (!data)
996 			break;
997 
998 		if (PTR_ERR(data) == -EAGAIN) {
999 			done++;
1000 			continue;
1001 		}
1002 
1003 		if (mt76_queue_is_wed_rro_ind(q) && dev->drv->rx_rro_ind_process)
1004 			dev->drv->rx_rro_ind_process(dev, data);
1005 
1006 		if (mt76_queue_is_wed_rro(q) &&
1007 		    !mt76_queue_is_wed_rro_rxdmad_c(q)) {
1008 			done++;
1009 			continue;
1010 		}
1011 
1012 		if (drop)
1013 			goto free_frag;
1014 
1015 		if (q->rx_head)
1016 			data_len = q->buf_size;
1017 		else
1018 			data_len = SKB_WITH_OVERHEAD(q->buf_size);
1019 
1020 		if (data_len < len + q->buf_offset) {
1021 			dev_kfree_skb(q->rx_head);
1022 			q->rx_head = NULL;
1023 			goto free_frag;
1024 		}
1025 
1026 		if (q->rx_head) {
1027 			mt76_add_fragment(dev, q, data, len, more, info,
1028 					  allow_direct);
1029 			continue;
1030 		}
1031 
1032 		if (!more && dev->drv->rx_check &&
1033 		    !(dev->drv->rx_check(dev, data, len)))
1034 			goto free_frag;
1035 
1036 		skb = napi_build_skb(data, q->buf_size);
1037 		if (!skb)
1038 			goto free_frag;
1039 
1040 		skb_reserve(skb, q->buf_offset);
1041 		skb_mark_for_recycle(skb);
1042 
1043 		*(u32 *)skb->cb = info;
1044 
1045 		__skb_put(skb, len);
1046 		done++;
1047 
1048 		if (more) {
1049 			q->rx_head = skb;
1050 			continue;
1051 		}
1052 
1053 		dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
1054 		continue;
1055 
1056 free_frag:
1057 		mt76_put_page_pool_buf(data, allow_direct);
1058 	}
1059 
1060 	mt76_dma_rx_fill(dev, q, true);
1061 	return done;
1062 }
1063 
mt76_dma_rx_poll(struct napi_struct * napi,int budget)1064 int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
1065 {
1066 	struct mt76_dev *dev;
1067 	int qid, done = 0, cur;
1068 
1069 	dev = mt76_priv(napi->dev);
1070 	qid = napi - dev->napi;
1071 
1072 	rcu_read_lock();
1073 
1074 	do {
1075 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
1076 		mt76_rx_poll_complete(dev, qid, napi);
1077 		done += cur;
1078 	} while (cur && done < budget);
1079 
1080 	rcu_read_unlock();
1081 
1082 	if (done < budget && napi_complete(napi))
1083 		dev->drv->rx_poll_complete(dev, qid);
1084 
1085 	return done;
1086 }
1087 EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
1088 
1089 static void
mt76_dma_rx_queue_init(struct mt76_dev * dev,enum mt76_rxq_id qid,int (* poll)(struct napi_struct * napi,int budget))1090 mt76_dma_rx_queue_init(struct mt76_dev *dev, enum mt76_rxq_id qid,
1091 		       int (*poll)(struct napi_struct *napi, int budget))
1092 {
1093 	netif_napi_add(dev->napi_dev, &dev->napi[qid], poll);
1094 	mt76_dma_rx_fill_buf(dev, &dev->q_rx[qid], false);
1095 	napi_enable(&dev->napi[qid]);
1096 }
1097 
1098 static int
mt76_dma_init(struct mt76_dev * dev,int (* poll)(struct napi_struct * napi,int budget))1099 mt76_dma_init(struct mt76_dev *dev,
1100 	      int (*poll)(struct napi_struct *napi, int budget))
1101 {
1102 	struct mt76_dev **priv;
1103 	int i;
1104 
1105 	dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
1106 	if (!dev->napi_dev)
1107 		return -ENOMEM;
1108 
1109 	/* napi_dev private data points to mt76_dev parent, so, mt76_dev
1110 	 * can be retrieved given napi_dev
1111 	 */
1112 	priv = netdev_priv(dev->napi_dev);
1113 	*priv = dev;
1114 
1115 	dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
1116 	if (!dev->tx_napi_dev) {
1117 		free_netdev(dev->napi_dev);
1118 		return -ENOMEM;
1119 	}
1120 	priv = netdev_priv(dev->tx_napi_dev);
1121 	*priv = dev;
1122 
1123 	snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
1124 		 wiphy_name(dev->hw->wiphy));
1125 	dev->napi_dev->threaded = 1;
1126 	init_completion(&dev->mmio.wed_reset);
1127 	init_completion(&dev->mmio.wed_reset_complete);
1128 
1129 	mt76_for_each_q_rx(dev, i) {
1130 		if (mt76_queue_is_wed_rro(&dev->q_rx[i]))
1131 			continue;
1132 
1133 		mt76_dma_rx_queue_init(dev, i, poll);
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static const struct mt76_queue_ops mt76_dma_ops = {
1140 	.init = mt76_dma_init,
1141 	.alloc = mt76_dma_alloc_queue,
1142 	.reset_q = mt76_dma_queue_reset,
1143 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
1144 	.tx_queue_skb = mt76_dma_tx_queue_skb,
1145 	.tx_cleanup = mt76_dma_tx_cleanup,
1146 	.rx_queue_init = mt76_dma_rx_queue_init,
1147 	.rx_cleanup = mt76_dma_rx_cleanup,
1148 	.rx_reset = mt76_dma_rx_reset,
1149 	.kick = mt76_dma_kick_queue,
1150 };
1151 
mt76_dma_attach(struct mt76_dev * dev)1152 void mt76_dma_attach(struct mt76_dev *dev)
1153 {
1154 	dev->queue_ops = &mt76_dma_ops;
1155 }
1156 EXPORT_SYMBOL_GPL(mt76_dma_attach);
1157 
mt76_dma_cleanup(struct mt76_dev * dev)1158 void mt76_dma_cleanup(struct mt76_dev *dev)
1159 {
1160 	int i;
1161 
1162 	mt76_worker_disable(&dev->tx_worker);
1163 	napi_disable(&dev->tx_napi);
1164 	netif_napi_del(&dev->tx_napi);
1165 
1166 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
1167 		struct mt76_phy *phy = dev->phys[i];
1168 		int j;
1169 
1170 		if (!phy)
1171 			continue;
1172 
1173 		for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
1174 			mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
1175 	}
1176 
1177 	for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
1178 		mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
1179 
1180 	mt76_for_each_q_rx(dev, i) {
1181 		struct mt76_queue *q = &dev->q_rx[i];
1182 
1183 		if (mtk_wed_device_active(&dev->mmio.wed) &&
1184 		    mt76_queue_is_wed_rro(q))
1185 			continue;
1186 
1187 		netif_napi_del(&dev->napi[i]);
1188 		mt76_dma_rx_cleanup(dev, q);
1189 
1190 		page_pool_destroy(q->page_pool);
1191 	}
1192 
1193 	if (mtk_wed_device_active(&dev->mmio.wed))
1194 		mtk_wed_device_detach(&dev->mmio.wed);
1195 
1196 	if (mtk_wed_device_active(&dev->mmio.wed_hif2))
1197 		mtk_wed_device_detach(&dev->mmio.wed_hif2);
1198 
1199 	mt76_free_pending_txwi(dev);
1200 	mt76_free_pending_rxwi(dev);
1201 	free_netdev(dev->napi_dev);
1202 	free_netdev(dev->tx_napi_dev);
1203 }
1204 EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
1205