1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4  */
5 
6 #include "mt76.h"
7 
8 static int
9 mt76_txq_get_qid(struct ieee80211_txq *txq)
10 {
11 	if (!txq->sta)
12 		return MT_TXQ_BE;
13 
14 	return txq->ac;
15 }
16 
17 void
18 mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
19 {
20 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
21 	struct ieee80211_txq *txq;
22 	struct mt76_txq *mtxq;
23 	u8 tid;
24 
25 	if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
26 	    !ieee80211_is_data_present(hdr->frame_control))
27 		return;
28 
29 	tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
30 	txq = sta->txq[tid];
31 	mtxq = (struct mt76_txq *)txq->drv_priv;
32 	if (!mtxq->aggr)
33 		return;
34 
35 	mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
36 }
37 EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
38 
39 void
40 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
41 		   __acquires(&dev->status_lock)
42 {
43 	__skb_queue_head_init(list);
44 	spin_lock_bh(&dev->status_lock);
45 }
46 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
47 
48 void
49 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
50 		      __releases(&dev->status_lock)
51 {
52 	struct ieee80211_hw *hw;
53 	struct sk_buff *skb;
54 
55 	spin_unlock_bh(&dev->status_lock);
56 
57 	rcu_read_lock();
58 	while ((skb = __skb_dequeue(list)) != NULL) {
59 		struct ieee80211_tx_status status = {
60 			.skb = skb,
61 			.info = IEEE80211_SKB_CB(skb),
62 		};
63 		struct ieee80211_rate_status rs = {};
64 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
65 		struct mt76_wcid *wcid;
66 
67 		wcid = rcu_dereference(dev->wcid[cb->wcid]);
68 		if (wcid) {
69 			status.sta = wcid_to_sta(wcid);
70 			if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
71 				rs.rate_idx = wcid->rate;
72 				status.rates = &rs;
73 				status.n_rates = 1;
74 			} else {
75 				status.n_rates = 0;
76 			}
77 		}
78 
79 		hw = mt76_tx_status_get_hw(dev, skb);
80 		spin_lock_bh(&dev->rx_lock);
81 		ieee80211_tx_status_ext(hw, &status);
82 		spin_unlock_bh(&dev->rx_lock);
83 	}
84 	rcu_read_unlock();
85 }
86 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
87 
88 static void
89 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
90 			  struct sk_buff_head *list)
91 {
92 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
93 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
94 	u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
95 
96 	flags |= cb->flags;
97 	cb->flags = flags;
98 
99 	if ((flags & done) != done)
100 		return;
101 
102 	/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
103 	if (flags & MT_TX_CB_TXS_FAILED &&
104 	    (dev->drv->drv_flags & MT_DRV_IGNORE_TXS_FAILED)) {
105 		info->status.rates[0].count = 0;
106 		info->status.rates[0].idx = -1;
107 		info->flags |= IEEE80211_TX_STAT_ACK;
108 	}
109 
110 	__skb_queue_tail(list, skb);
111 }
112 
113 void
114 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
115 			struct sk_buff_head *list)
116 {
117 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
118 }
119 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
120 
121 int
122 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
123 		       struct sk_buff *skb)
124 {
125 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
126 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
127 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
128 	int pid;
129 
130 	memset(cb, 0, sizeof(*cb));
131 
132 	if (!wcid || !rcu_access_pointer(dev->wcid[wcid->idx]))
133 		return MT_PACKET_ID_NO_ACK;
134 
135 	if (info->flags & IEEE80211_TX_CTL_NO_ACK)
136 		return MT_PACKET_ID_NO_ACK;
137 
138 	if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
139 			     IEEE80211_TX_CTL_RATE_CTRL_PROBE))) {
140 		if (mtk_wed_device_active(&dev->mmio.wed) &&
141 		    ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) ||
142 		     ieee80211_is_data(hdr->frame_control)))
143 			return MT_PACKET_ID_WED;
144 
145 		return MT_PACKET_ID_NO_SKB;
146 	}
147 
148 	spin_lock_bh(&dev->status_lock);
149 
150 	pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
151 			MT_PACKET_ID_MASK, GFP_ATOMIC);
152 	if (pid < 0) {
153 		pid = MT_PACKET_ID_NO_SKB;
154 		goto out;
155 	}
156 
157 	cb->wcid = wcid->idx;
158 	cb->pktid = pid;
159 
160 	if (list_empty(&wcid->list))
161 		list_add_tail(&wcid->list, &dev->wcid_list);
162 
163 out:
164 	spin_unlock_bh(&dev->status_lock);
165 
166 	return pid;
167 }
168 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
169 
170 struct sk_buff *
171 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
172 		       struct sk_buff_head *list)
173 {
174 	struct sk_buff *skb;
175 	int id;
176 
177 	lockdep_assert_held(&dev->status_lock);
178 
179 	skb = idr_remove(&wcid->pktid, pktid);
180 	if (skb)
181 		goto out;
182 
183 	/* look for stale entries in the wcid idr queue */
184 	idr_for_each_entry(&wcid->pktid, skb, id) {
185 		struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
186 
187 		if (pktid >= 0) {
188 			if (!(cb->flags & MT_TX_CB_DMA_DONE))
189 				continue;
190 
191 			if (time_is_after_jiffies(cb->jiffies +
192 						   MT_TX_STATUS_SKB_TIMEOUT))
193 				continue;
194 		}
195 
196 		/* It has been too long since DMA_DONE, time out this packet
197 		 * and stop waiting for TXS callback.
198 		 */
199 		idr_remove(&wcid->pktid, cb->pktid);
200 		__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
201 						    MT_TX_CB_TXS_DONE, list);
202 	}
203 
204 out:
205 	if (idr_is_empty(&wcid->pktid))
206 		list_del_init(&wcid->list);
207 
208 	return skb;
209 }
210 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
211 
212 void
213 mt76_tx_status_check(struct mt76_dev *dev, bool flush)
214 {
215 	struct mt76_wcid *wcid, *tmp;
216 	struct sk_buff_head list;
217 
218 	mt76_tx_status_lock(dev, &list);
219 	list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
220 		mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
221 	mt76_tx_status_unlock(dev, &list);
222 }
223 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
224 
225 static void
226 mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
227 		      struct sk_buff *skb)
228 {
229 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
230 	int pending;
231 
232 	if (!wcid || info->tx_time_est)
233 		return;
234 
235 	pending = atomic_dec_return(&wcid->non_aql_packets);
236 	if (pending < 0)
237 		atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
238 }
239 
240 void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
241 			    struct list_head *free_list)
242 {
243 	struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
244 	struct ieee80211_tx_status status = {
245 		.skb = skb,
246 		.free_list = free_list,
247 	};
248 	struct mt76_wcid *wcid = NULL;
249 	struct ieee80211_hw *hw;
250 	struct sk_buff_head list;
251 
252 	rcu_read_lock();
253 
254 	if (wcid_idx < ARRAY_SIZE(dev->wcid))
255 		wcid = rcu_dereference(dev->wcid[wcid_idx]);
256 
257 	mt76_tx_check_non_aql(dev, wcid, skb);
258 
259 #ifdef CONFIG_NL80211_TESTMODE
260 	if (mt76_is_testmode_skb(dev, skb, &hw)) {
261 		struct mt76_phy *phy = hw->priv;
262 
263 		if (skb == phy->test.tx_skb)
264 			phy->test.tx_done++;
265 		if (phy->test.tx_queued == phy->test.tx_done)
266 			wake_up(&dev->tx_wait);
267 
268 		dev_kfree_skb_any(skb);
269 		goto out;
270 	}
271 #endif
272 
273 	if (cb->pktid < MT_PACKET_ID_FIRST) {
274 		struct ieee80211_rate_status rs = {};
275 
276 		hw = mt76_tx_status_get_hw(dev, skb);
277 		status.sta = wcid_to_sta(wcid);
278 		if (status.sta && (wcid->rate.flags || wcid->rate.legacy)) {
279 			rs.rate_idx = wcid->rate;
280 			status.rates = &rs;
281 			status.n_rates = 1;
282 		}
283 		spin_lock_bh(&dev->rx_lock);
284 		ieee80211_tx_status_ext(hw, &status);
285 		spin_unlock_bh(&dev->rx_lock);
286 		goto out;
287 	}
288 
289 	mt76_tx_status_lock(dev, &list);
290 	cb->jiffies = jiffies;
291 	__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
292 	mt76_tx_status_unlock(dev, &list);
293 
294 out:
295 	rcu_read_unlock();
296 }
297 EXPORT_SYMBOL_GPL(__mt76_tx_complete_skb);
298 
299 static int
300 __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
301 		    struct mt76_wcid *wcid, struct ieee80211_sta *sta,
302 		    bool *stop)
303 {
304 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
305 	struct mt76_queue *q = phy->q_tx[qid];
306 	struct mt76_dev *dev = phy->dev;
307 	bool non_aql;
308 	int pending;
309 	int idx;
310 
311 	non_aql = !info->tx_time_est;
312 	idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta);
313 	if (idx < 0 || !sta)
314 		return idx;
315 
316 	wcid = (struct mt76_wcid *)sta->drv_priv;
317 	if (!wcid->sta)
318 		return idx;
319 
320 	q->entry[idx].wcid = wcid->idx;
321 
322 	if (!non_aql)
323 		return idx;
324 
325 	pending = atomic_inc_return(&wcid->non_aql_packets);
326 	if (stop && pending >= MT_MAX_NON_AQL_PKT)
327 		*stop = true;
328 
329 	return idx;
330 }
331 
332 void
333 mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
334 	struct mt76_wcid *wcid, struct sk_buff *skb)
335 {
336 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
337 	struct sk_buff_head *head;
338 
339 	if (mt76_testmode_enabled(phy)) {
340 		ieee80211_free_txskb(phy->hw, skb);
341 		return;
342 	}
343 
344 	if (WARN_ON(skb_get_queue_mapping(skb) >= MT_TXQ_PSD))
345 		skb_set_queue_mapping(skb, MT_TXQ_BE);
346 
347 	if (wcid && !(wcid->tx_info & MT_WCID_TX_INFO_SET))
348 		ieee80211_get_tx_rates(info->control.vif, sta, skb,
349 				       info->control.rates, 1);
350 
351 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
352 
353 	if ((info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
354 	    (info->control.flags & IEEE80211_TX_CTRL_DONT_USE_RATE_MASK))
355 		head = &wcid->tx_offchannel;
356 	else
357 		head = &wcid->tx_pending;
358 
359 	spin_lock_bh(&head->lock);
360 	__skb_queue_tail(head, skb);
361 	spin_unlock_bh(&head->lock);
362 
363 	spin_lock_bh(&phy->tx_lock);
364 	if (list_empty(&wcid->tx_list))
365 		list_add_tail(&wcid->tx_list, &phy->tx_list);
366 	spin_unlock_bh(&phy->tx_lock);
367 
368 	mt76_worker_schedule(&phy->dev->tx_worker);
369 }
370 EXPORT_SYMBOL_GPL(mt76_tx);
371 
372 static struct sk_buff *
373 mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
374 {
375 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
376 	struct ieee80211_tx_info *info;
377 	struct sk_buff *skb;
378 
379 	skb = ieee80211_tx_dequeue(phy->hw, txq);
380 	if (!skb)
381 		return NULL;
382 
383 	info = IEEE80211_SKB_CB(skb);
384 	info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, phy->band_idx);
385 
386 	return skb;
387 }
388 
389 static void
390 mt76_queue_ps_skb(struct mt76_phy *phy, struct ieee80211_sta *sta,
391 		  struct sk_buff *skb, bool last)
392 {
393 	struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
394 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
395 
396 	info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
397 	if (last)
398 		info->flags |= IEEE80211_TX_STATUS_EOSP |
399 			       IEEE80211_TX_CTL_REQ_TX_STATUS;
400 
401 	mt76_skb_set_moredata(skb, !last);
402 	__mt76_tx_queue_skb(phy, MT_TXQ_PSD, skb, wcid, sta, NULL);
403 }
404 
405 void
406 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
407 			     u16 tids, int nframes,
408 			     enum ieee80211_frame_release_type reason,
409 			     bool more_data)
410 {
411 	struct mt76_phy *phy = hw->priv;
412 	struct mt76_dev *dev = phy->dev;
413 	struct sk_buff *last_skb = NULL;
414 	struct mt76_queue *hwq = phy->q_tx[MT_TXQ_PSD];
415 	int i;
416 
417 	spin_lock_bh(&hwq->lock);
418 	for (i = 0; tids && nframes; i++, tids >>= 1) {
419 		struct ieee80211_txq *txq = sta->txq[i];
420 		struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
421 		struct sk_buff *skb;
422 
423 		if (!(tids & 1))
424 			continue;
425 
426 		do {
427 			skb = mt76_txq_dequeue(phy, mtxq);
428 			if (!skb)
429 				break;
430 
431 			nframes--;
432 			if (last_skb)
433 				mt76_queue_ps_skb(phy, sta, last_skb, false);
434 
435 			last_skb = skb;
436 		} while (nframes);
437 	}
438 
439 	if (last_skb) {
440 		mt76_queue_ps_skb(phy, sta, last_skb, true);
441 		dev->queue_ops->kick(dev, hwq);
442 	} else {
443 		ieee80211_sta_eosp(sta);
444 	}
445 
446 	spin_unlock_bh(&hwq->lock);
447 }
448 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
449 
450 static bool
451 mt76_txq_stopped(struct mt76_queue *q)
452 {
453 	return q->stopped || q->blocked ||
454 	       q->queued + MT_TXQ_FREE_THR >= q->ndesc;
455 }
456 
457 static int
458 mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
459 		    struct mt76_txq *mtxq, struct mt76_wcid *wcid)
460 {
461 	struct mt76_dev *dev = phy->dev;
462 	struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
463 	enum mt76_txq_id qid = mt76_txq_get_qid(txq);
464 	struct ieee80211_tx_info *info;
465 	struct sk_buff *skb;
466 	int n_frames = 1;
467 	bool stop = false;
468 	int idx;
469 
470 	if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
471 		return 0;
472 
473 	if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
474 		return 0;
475 
476 	skb = mt76_txq_dequeue(phy, mtxq);
477 	if (!skb)
478 		return 0;
479 
480 	info = IEEE80211_SKB_CB(skb);
481 	if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
482 		ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
483 				       info->control.rates, 1);
484 
485 	spin_lock(&q->lock);
486 	idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
487 	spin_unlock(&q->lock);
488 	if (idx < 0)
489 		return idx;
490 
491 	do {
492 		if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
493 			break;
494 
495 		if (stop || mt76_txq_stopped(q))
496 			break;
497 
498 		skb = mt76_txq_dequeue(phy, mtxq);
499 		if (!skb)
500 			break;
501 
502 		info = IEEE80211_SKB_CB(skb);
503 		if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
504 			ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
505 					       info->control.rates, 1);
506 
507 		spin_lock(&q->lock);
508 		idx = __mt76_tx_queue_skb(phy, qid, skb, wcid, txq->sta, &stop);
509 		spin_unlock(&q->lock);
510 		if (idx < 0)
511 			break;
512 
513 		n_frames++;
514 	} while (1);
515 
516 	spin_lock(&q->lock);
517 	dev->queue_ops->kick(dev, q);
518 	spin_unlock(&q->lock);
519 
520 	return n_frames;
521 }
522 
523 static int
524 mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
525 {
526 	struct mt76_dev *dev = phy->dev;
527 	struct ieee80211_txq *txq;
528 	struct mt76_txq *mtxq;
529 	struct mt76_wcid *wcid;
530 	struct mt76_queue *q;
531 	int ret = 0;
532 
533 	while (1) {
534 		int n_frames = 0;
535 
536 		txq = ieee80211_next_txq(phy->hw, qid);
537 		if (!txq)
538 			break;
539 
540 		mtxq = (struct mt76_txq *)txq->drv_priv;
541 		wcid = rcu_dereference(dev->wcid[mtxq->wcid]);
542 		if (!wcid || test_bit(MT_WCID_FLAG_PS, &wcid->flags))
543 			continue;
544 
545 		phy = mt76_dev_phy(dev, wcid->phy_idx);
546 		if (test_bit(MT76_RESET, &phy->state) || phy->offchannel)
547 			continue;
548 
549 		q = phy->q_tx[qid];
550 		if (dev->queue_ops->tx_cleanup &&
551 		    q->queued + 2 * MT_TXQ_FREE_THR >= q->ndesc) {
552 			dev->queue_ops->tx_cleanup(dev, q, false);
553 		}
554 
555 		if (mtxq->send_bar && mtxq->aggr) {
556 			struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
557 			struct ieee80211_sta *sta = txq->sta;
558 			struct ieee80211_vif *vif = txq->vif;
559 			u16 agg_ssn = mtxq->agg_ssn;
560 			u8 tid = txq->tid;
561 
562 			mtxq->send_bar = false;
563 			ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
564 		}
565 
566 		if (!mt76_txq_stopped(q))
567 			n_frames = mt76_txq_send_burst(phy, q, mtxq, wcid);
568 
569 		ieee80211_return_txq(phy->hw, txq, false);
570 
571 		if (unlikely(n_frames < 0))
572 			return n_frames;
573 
574 		ret += n_frames;
575 	}
576 
577 	return ret;
578 }
579 
580 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
581 {
582 	int len;
583 
584 	if (qid >= 4)
585 		return;
586 
587 	local_bh_disable();
588 	rcu_read_lock();
589 
590 	do {
591 		ieee80211_txq_schedule_start(phy->hw, qid);
592 		len = mt76_txq_schedule_list(phy, qid);
593 		ieee80211_txq_schedule_end(phy->hw, qid);
594 	} while (len > 0);
595 
596 	rcu_read_unlock();
597 	local_bh_enable();
598 }
599 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
600 
601 static int
602 mt76_txq_schedule_pending_wcid(struct mt76_phy *phy, struct mt76_wcid *wcid,
603 			       struct sk_buff_head *head)
604 {
605 	struct mt76_dev *dev = phy->dev;
606 	struct ieee80211_sta *sta;
607 	struct mt76_queue *q;
608 	struct sk_buff *skb;
609 	int ret = 0;
610 
611 	spin_lock(&head->lock);
612 	while ((skb = skb_peek(head)) != NULL) {
613 		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
614 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
615 		int qid = skb_get_queue_mapping(skb);
616 
617 		if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
618 		    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
619 		    !ieee80211_is_data(hdr->frame_control) &&
620 		    !ieee80211_is_bufferable_mmpdu(skb))
621 			qid = MT_TXQ_PSD;
622 
623 		q = phy->q_tx[qid];
624 		if (mt76_txq_stopped(q) || test_bit(MT76_RESET, &phy->state)) {
625 			ret = -1;
626 			break;
627 		}
628 
629 		__skb_unlink(skb, head);
630 		spin_unlock(&head->lock);
631 
632 		sta = wcid_to_sta(wcid);
633 		spin_lock(&q->lock);
634 		__mt76_tx_queue_skb(phy, qid, skb, wcid, sta, NULL);
635 		dev->queue_ops->kick(dev, q);
636 		spin_unlock(&q->lock);
637 
638 		spin_lock(&head->lock);
639 	}
640 	spin_unlock(&head->lock);
641 
642 	return ret;
643 }
644 
645 static void mt76_txq_schedule_pending(struct mt76_phy *phy)
646 {
647 	LIST_HEAD(tx_list);
648 
649 	if (list_empty(&phy->tx_list))
650 		return;
651 
652 	local_bh_disable();
653 	rcu_read_lock();
654 
655 	spin_lock(&phy->tx_lock);
656 	list_splice_init(&phy->tx_list, &tx_list);
657 	while (!list_empty(&tx_list)) {
658 		struct mt76_wcid *wcid;
659 		int ret;
660 
661 		wcid = list_first_entry(&tx_list, struct mt76_wcid, tx_list);
662 		list_del_init(&wcid->tx_list);
663 
664 		spin_unlock(&phy->tx_lock);
665 		ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_offchannel);
666 		if (ret >= 0 && !phy->offchannel)
667 			ret = mt76_txq_schedule_pending_wcid(phy, wcid, &wcid->tx_pending);
668 		spin_lock(&phy->tx_lock);
669 
670 		if (!skb_queue_empty(&wcid->tx_pending) &&
671 		    !skb_queue_empty(&wcid->tx_offchannel) &&
672 		    list_empty(&wcid->tx_list))
673 			list_add_tail(&wcid->tx_list, &phy->tx_list);
674 
675 		if (ret < 0)
676 			break;
677 	}
678 	spin_unlock(&phy->tx_lock);
679 
680 	rcu_read_unlock();
681 	local_bh_enable();
682 }
683 
684 void mt76_txq_schedule_all(struct mt76_phy *phy)
685 {
686 	struct mt76_phy *main_phy = &phy->dev->phy;
687 	int i;
688 
689 	mt76_txq_schedule_pending(phy);
690 
691 	if (phy != main_phy && phy->hw == main_phy->hw)
692 		return;
693 
694 	for (i = 0; i <= MT_TXQ_BK; i++)
695 		mt76_txq_schedule(phy, i);
696 }
697 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
698 
699 void mt76_tx_worker_run(struct mt76_dev *dev)
700 {
701 	struct mt76_phy *phy;
702 	int i;
703 
704 	mt76_txq_schedule_all(&dev->phy);
705 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
706 		phy = dev->phys[i];
707 		if (!phy)
708 			continue;
709 
710 		mt76_txq_schedule_all(phy);
711 	}
712 
713 #ifdef CONFIG_NL80211_TESTMODE
714 	for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
715 		phy = dev->phys[i];
716 		if (!phy || !phy->test.tx_pending)
717 			continue;
718 
719 		mt76_testmode_tx_pending(phy);
720 	}
721 #endif
722 }
723 EXPORT_SYMBOL_GPL(mt76_tx_worker_run);
724 
725 void mt76_tx_worker(struct mt76_worker *w)
726 {
727 	struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
728 
729 	mt76_tx_worker_run(dev);
730 }
731 
732 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta,
733 			 bool send_bar)
734 {
735 	int i;
736 
737 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
738 		struct ieee80211_txq *txq = sta->txq[i];
739 		struct mt76_queue *hwq;
740 		struct mt76_txq *mtxq;
741 
742 		if (!txq)
743 			continue;
744 
745 		hwq = phy->q_tx[mt76_txq_get_qid(txq)];
746 		mtxq = (struct mt76_txq *)txq->drv_priv;
747 
748 		spin_lock_bh(&hwq->lock);
749 		mtxq->send_bar = mtxq->aggr && send_bar;
750 		spin_unlock_bh(&hwq->lock);
751 	}
752 }
753 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
754 
755 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
756 {
757 	struct mt76_phy *phy = hw->priv;
758 	struct mt76_dev *dev = phy->dev;
759 
760 	mt76_worker_schedule(&dev->tx_worker);
761 }
762 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
763 
764 u8 mt76_ac_to_hwq(u8 ac)
765 {
766 	static const u8 wmm_queue_map[] = {
767 		[IEEE80211_AC_BE] = 0,
768 		[IEEE80211_AC_BK] = 1,
769 		[IEEE80211_AC_VI] = 2,
770 		[IEEE80211_AC_VO] = 3,
771 	};
772 
773 	if (WARN_ON(ac >= IEEE80211_NUM_ACS))
774 		return 0;
775 
776 	return wmm_queue_map[ac];
777 }
778 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
779 
780 int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
781 {
782 	struct sk_buff *iter, *last = skb;
783 
784 	/* First packet of a A-MSDU burst keeps track of the whole burst
785 	 * length, need to update length of it and the last packet.
786 	 */
787 	skb_walk_frags(skb, iter) {
788 		last = iter;
789 		if (!iter->next) {
790 			skb->data_len += pad;
791 			skb->len += pad;
792 			break;
793 		}
794 	}
795 
796 	if (skb_pad(last, pad))
797 		return -ENOMEM;
798 
799 	__skb_put(last, pad);
800 
801 	return 0;
802 }
803 EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
804 
805 void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
806 			    struct mt76_queue_entry *e)
807 {
808 	if (e->skb)
809 		dev->drv->tx_complete_skb(dev, e);
810 
811 	spin_lock_bh(&q->lock);
812 	q->tail = (q->tail + 1) % q->ndesc;
813 	q->queued--;
814 	spin_unlock_bh(&q->lock);
815 }
816 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
817 
818 void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
819 {
820 	struct mt76_phy *phy = &dev->phy;
821 	struct mt76_queue *q = phy->q_tx[0];
822 
823 	if (blocked == q->blocked)
824 		return;
825 
826 	q->blocked = blocked;
827 
828 	phy = dev->phys[MT_BAND1];
829 	if (phy) {
830 		q = phy->q_tx[0];
831 		q->blocked = blocked;
832 	}
833 	phy = dev->phys[MT_BAND2];
834 	if (phy) {
835 		q = phy->q_tx[0];
836 		q->blocked = blocked;
837 	}
838 
839 	if (!blocked)
840 		mt76_worker_schedule(&dev->tx_worker);
841 }
842 EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked);
843 
844 int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi)
845 {
846 	int token;
847 
848 	spin_lock_bh(&dev->token_lock);
849 
850 	token = idr_alloc(&dev->token, *ptxwi, 0, dev->token_size, GFP_ATOMIC);
851 	if (token >= 0)
852 		dev->token_count++;
853 
854 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
855 	if (mtk_wed_device_active(&dev->mmio.wed) &&
856 	    token >= dev->mmio.wed.wlan.token_start)
857 		dev->wed_token_count++;
858 #endif
859 
860 	if (dev->token_count >= dev->token_size - MT76_TOKEN_FREE_THR)
861 		__mt76_set_tx_blocked(dev, true);
862 
863 	spin_unlock_bh(&dev->token_lock);
864 
865 	return token;
866 }
867 EXPORT_SYMBOL_GPL(mt76_token_consume);
868 
869 int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
870 			  struct mt76_txwi_cache *t, dma_addr_t phys)
871 {
872 	int token;
873 
874 	spin_lock_bh(&dev->rx_token_lock);
875 	token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
876 			  GFP_ATOMIC);
877 	if (token >= 0) {
878 		t->ptr = ptr;
879 		t->dma_addr = phys;
880 	}
881 	spin_unlock_bh(&dev->rx_token_lock);
882 
883 	return token;
884 }
885 EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
886 
887 struct mt76_txwi_cache *
888 mt76_token_release(struct mt76_dev *dev, int token, bool *wake)
889 {
890 	struct mt76_txwi_cache *txwi;
891 
892 	spin_lock_bh(&dev->token_lock);
893 
894 	txwi = idr_remove(&dev->token, token);
895 	if (txwi) {
896 		dev->token_count--;
897 
898 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
899 		if (mtk_wed_device_active(&dev->mmio.wed) &&
900 		    token >= dev->mmio.wed.wlan.token_start &&
901 		    --dev->wed_token_count == 0)
902 			wake_up(&dev->tx_wait);
903 #endif
904 	}
905 
906 	if (dev->token_count < dev->token_size - MT76_TOKEN_FREE_THR &&
907 	    dev->phy.q_tx[0]->blocked)
908 		*wake = true;
909 
910 	spin_unlock_bh(&dev->token_lock);
911 
912 	return txwi;
913 }
914 EXPORT_SYMBOL_GPL(mt76_token_release);
915 
916 struct mt76_txwi_cache *
917 mt76_rx_token_release(struct mt76_dev *dev, int token)
918 {
919 	struct mt76_txwi_cache *t;
920 
921 	spin_lock_bh(&dev->rx_token_lock);
922 	t = idr_remove(&dev->rx_token, token);
923 	spin_unlock_bh(&dev->rx_token_lock);
924 
925 	return t;
926 }
927 EXPORT_SYMBOL_GPL(mt76_rx_token_release);
928