1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
15
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,u8 band_idx)16 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
17 u16 idx, u8 band_idx)
18 {
19 struct mt7996_sta_link *msta_link;
20 struct mt7996_sta *msta;
21 struct mt7996_vif *mvif;
22 struct mt76_wcid *wcid;
23 int i;
24
25 wcid = mt76_wcid_ptr(dev, idx);
26 if (!wcid || !wcid->sta)
27 return NULL;
28
29 if (!mt7996_band_valid(dev, band_idx))
30 return NULL;
31
32 if (wcid->phy_idx == band_idx)
33 return wcid;
34
35 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
36 msta = msta_link->sta;
37 if (!msta || !msta->vif)
38 return NULL;
39
40 mvif = msta->vif;
41 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
42 struct mt76_vif_link *mlink;
43
44 mlink = rcu_dereference(mvif->mt76.link[i]);
45 if (!mlink)
46 continue;
47
48 if (mlink->band_idx != band_idx)
49 continue;
50
51 msta_link = rcu_dereference(msta->link[i]);
52 break;
53 }
54
55 return &msta_link->wcid;
56 }
57
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)58 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
59 {
60 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
61 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
62
63 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
64 0, 5000);
65 }
66
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)67 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
68 {
69 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
70 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
71
72 return MT_WTBL_LMAC_OFFS(wcid, dw);
73 }
74
mt7996_mac_sta_poll(struct mt7996_dev * dev)75 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
76 {
77 static const u8 ac_to_tid[] = {
78 [IEEE80211_AC_BE] = 0,
79 [IEEE80211_AC_BK] = 1,
80 [IEEE80211_AC_VI] = 4,
81 [IEEE80211_AC_VO] = 6
82 };
83 struct mt7996_sta_link *msta_link;
84 struct mt76_vif_link *mlink;
85 struct ieee80211_sta *sta;
86 struct mt7996_sta *msta;
87 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
88 LIST_HEAD(sta_poll_list);
89 struct mt76_wcid *wcid;
90 int i;
91
92 spin_lock_bh(&dev->mt76.sta_poll_lock);
93 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
94 spin_unlock_bh(&dev->mt76.sta_poll_lock);
95
96 rcu_read_lock();
97
98 while (true) {
99 bool clear = false;
100 u32 addr, val;
101 u16 idx;
102 s8 rssi[4];
103
104 spin_lock_bh(&dev->mt76.sta_poll_lock);
105 if (list_empty(&sta_poll_list)) {
106 spin_unlock_bh(&dev->mt76.sta_poll_lock);
107 break;
108 }
109 msta_link = list_first_entry(&sta_poll_list,
110 struct mt7996_sta_link,
111 wcid.poll_list);
112 msta = msta_link->sta;
113 wcid = &msta_link->wcid;
114 list_del_init(&wcid->poll_list);
115 spin_unlock_bh(&dev->mt76.sta_poll_lock);
116
117 idx = wcid->idx;
118
119 /* refresh peer's airtime reporting */
120 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
121
122 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
123 u32 tx_last = msta_link->airtime_ac[i];
124 u32 rx_last = msta_link->airtime_ac[i + 4];
125
126 msta_link->airtime_ac[i] = mt76_rr(dev, addr);
127 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
128
129 tx_time[i] = msta_link->airtime_ac[i] - tx_last;
130 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
131
132 if ((tx_last | rx_last) & BIT(30))
133 clear = true;
134
135 addr += 8;
136 }
137
138 if (clear) {
139 mt7996_mac_wtbl_update(dev, idx,
140 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
141 memset(msta_link->airtime_ac, 0,
142 sizeof(msta_link->airtime_ac));
143 }
144
145 if (!wcid->sta)
146 continue;
147
148 sta = container_of((void *)msta, struct ieee80211_sta,
149 drv_priv);
150 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
151 u8 q = mt76_connac_lmac_mapping(i);
152 u32 tx_cur = tx_time[q];
153 u32 rx_cur = rx_time[q];
154 u8 tid = ac_to_tid[i];
155
156 if (!tx_cur && !rx_cur)
157 continue;
158
159 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
160 }
161
162 /* get signal strength of resp frames (CTS/BA/ACK) */
163 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
164 val = mt76_rr(dev, addr);
165
166 rssi[0] = to_rssi(GENMASK(7, 0), val);
167 rssi[1] = to_rssi(GENMASK(15, 8), val);
168 rssi[2] = to_rssi(GENMASK(23, 16), val);
169 rssi[3] = to_rssi(GENMASK(31, 14), val);
170
171 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
172 if (mlink) {
173 struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
174
175 if (mphy)
176 msta_link->ack_signal =
177 mt76_rx_signal(mphy->antenna_mask,
178 rssi);
179 }
180
181 ewma_avg_signal_add(&msta_link->avg_ack_signal,
182 -msta_link->ack_signal);
183 }
184
185 rcu_read_unlock();
186 }
187
188 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)189 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
190 {
191 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
192 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
193 struct mt7996_sta_link *msta_link = (void *)status->wcid;
194 struct mt7996_sta *msta = msta_link->sta;
195 struct ieee80211_bss_conf *link_conf;
196 __le32 *rxd = (__le32 *)skb->data;
197 struct ieee80211_sta *sta;
198 struct ieee80211_vif *vif;
199 struct ieee80211_hdr hdr;
200 u16 frame_control;
201
202 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
203 MT_RXD3_NORMAL_U2M)
204 return -EINVAL;
205
206 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
207 return -EINVAL;
208
209 if (!msta || !msta->vif)
210 return -EINVAL;
211
212 sta = wcid_to_sta(status->wcid);
213 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
214 link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]);
215 if (!link_conf)
216 return -EINVAL;
217
218 /* store the info from RXD and ethhdr to avoid being overridden */
219 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
220 hdr.frame_control = cpu_to_le16(frame_control);
221 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
222 hdr.duration_id = 0;
223
224 ether_addr_copy(hdr.addr1, vif->addr);
225 ether_addr_copy(hdr.addr2, sta->addr);
226 switch (frame_control & (IEEE80211_FCTL_TODS |
227 IEEE80211_FCTL_FROMDS)) {
228 case 0:
229 ether_addr_copy(hdr.addr3, link_conf->bssid);
230 break;
231 case IEEE80211_FCTL_FROMDS:
232 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
233 break;
234 case IEEE80211_FCTL_TODS:
235 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
236 break;
237 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
238 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
239 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
240 break;
241 default:
242 return -EINVAL;
243 }
244
245 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
246 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
247 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
248 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
249 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
250 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
251 else
252 skb_pull(skb, 2);
253
254 if (ieee80211_has_order(hdr.frame_control))
255 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
256 IEEE80211_HT_CTL_LEN);
257 if (ieee80211_is_data_qos(hdr.frame_control)) {
258 __le16 qos_ctrl;
259
260 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
261 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
262 IEEE80211_QOS_CTL_LEN);
263 }
264
265 if (ieee80211_has_a4(hdr.frame_control))
266 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
267 else
268 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
269
270 return 0;
271 }
272
273 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)274 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
275 struct mt76_rx_status *status,
276 struct ieee80211_supported_band *sband,
277 __le32 *rxv, u8 *mode)
278 {
279 u32 v0, v2;
280 u8 stbc, gi, bw, dcm, nss;
281 int i, idx;
282 bool cck = false;
283
284 v0 = le32_to_cpu(rxv[0]);
285 v2 = le32_to_cpu(rxv[2]);
286
287 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
288 i = idx;
289 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
290
291 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
292 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
293 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
294 dcm = FIELD_GET(MT_PRXV_DCM, v2);
295 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
296
297 switch (*mode) {
298 case MT_PHY_TYPE_CCK:
299 cck = true;
300 fallthrough;
301 case MT_PHY_TYPE_OFDM:
302 i = mt76_get_rate(&dev->mt76, sband, i, cck);
303 break;
304 case MT_PHY_TYPE_HT_GF:
305 case MT_PHY_TYPE_HT:
306 status->encoding = RX_ENC_HT;
307 if (gi)
308 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
309 if (i > 31)
310 return -EINVAL;
311 break;
312 case MT_PHY_TYPE_VHT:
313 status->nss = nss;
314 status->encoding = RX_ENC_VHT;
315 if (gi)
316 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
317 if (i > 11)
318 return -EINVAL;
319 break;
320 case MT_PHY_TYPE_HE_MU:
321 case MT_PHY_TYPE_HE_SU:
322 case MT_PHY_TYPE_HE_EXT_SU:
323 case MT_PHY_TYPE_HE_TB:
324 status->nss = nss;
325 status->encoding = RX_ENC_HE;
326 i &= GENMASK(3, 0);
327
328 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
329 status->he_gi = gi;
330
331 status->he_dcm = dcm;
332 break;
333 case MT_PHY_TYPE_EHT_SU:
334 case MT_PHY_TYPE_EHT_TRIG:
335 case MT_PHY_TYPE_EHT_MU:
336 status->nss = nss;
337 status->encoding = RX_ENC_EHT;
338 i &= GENMASK(3, 0);
339
340 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
341 status->eht.gi = gi;
342 break;
343 default:
344 return -EINVAL;
345 }
346 status->rate_idx = i;
347
348 switch (bw) {
349 case IEEE80211_STA_RX_BW_20:
350 break;
351 case IEEE80211_STA_RX_BW_40:
352 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
353 (idx & MT_PRXV_TX_ER_SU_106T)) {
354 status->bw = RATE_INFO_BW_HE_RU;
355 status->he_ru =
356 NL80211_RATE_INFO_HE_RU_ALLOC_106;
357 } else {
358 status->bw = RATE_INFO_BW_40;
359 }
360 break;
361 case IEEE80211_STA_RX_BW_80:
362 status->bw = RATE_INFO_BW_80;
363 break;
364 case IEEE80211_STA_RX_BW_160:
365 status->bw = RATE_INFO_BW_160;
366 break;
367 /* rxv reports bw 320-1 and 320-2 separately */
368 case IEEE80211_STA_RX_BW_320:
369 case IEEE80211_STA_RX_BW_320 + 1:
370 status->bw = RATE_INFO_BW_320;
371 break;
372 default:
373 return -EINVAL;
374 }
375
376 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
377 if (*mode < MT_PHY_TYPE_HE_SU && gi)
378 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
379
380 return 0;
381 }
382
383 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)384 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
385 struct mt7996_sta *msta, struct sk_buff *skb,
386 u32 info)
387 {
388 struct ieee80211_vif *vif;
389 struct wireless_dev *wdev;
390
391 if (!msta || !msta->vif)
392 return;
393
394 if (!mt76_queue_is_wed_rx(q))
395 return;
396
397 if (!(info & MT_DMA_INFO_PPE_VLD))
398 return;
399
400 vif = container_of((void *)msta->vif, struct ieee80211_vif,
401 drv_priv);
402 wdev = ieee80211_vif_to_wdev(vif);
403 skb->dev = wdev->netdev;
404
405 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
406 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
407 FIELD_GET(MT_DMA_PPE_ENTRY, info));
408 }
409
410 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)411 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
412 struct sk_buff *skb, u32 *info)
413 {
414 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
415 struct mt76_phy *mphy = &dev->mt76.phy;
416 struct mt7996_phy *phy = &dev->phy;
417 struct ieee80211_supported_band *sband;
418 __le32 *rxd = (__le32 *)skb->data;
419 __le32 *rxv = NULL;
420 u32 rxd0 = le32_to_cpu(rxd[0]);
421 u32 rxd1 = le32_to_cpu(rxd[1]);
422 u32 rxd2 = le32_to_cpu(rxd[2]);
423 u32 rxd3 = le32_to_cpu(rxd[3]);
424 u32 rxd4 = le32_to_cpu(rxd[4]);
425 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
426 u32 csum_status = *(u32 *)skb->cb;
427 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
428 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
429 bool unicast, insert_ccmp_hdr = false;
430 u8 remove_pad, amsdu_info, band_idx;
431 u8 mode = 0, qos_ctl = 0;
432 bool hdr_trans;
433 u16 hdr_gap;
434 u16 seq_ctrl = 0;
435 __le16 fc = 0;
436 int idx;
437 u8 hw_aggr = false;
438 struct mt7996_sta *msta = NULL;
439
440 hw_aggr = status->aggr;
441 memset(status, 0, sizeof(*status));
442
443 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
444 mphy = dev->mt76.phys[band_idx];
445 phy = mphy->priv;
446 status->phy_idx = mphy->band_idx;
447
448 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
449 return -EINVAL;
450
451 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
452 return -EINVAL;
453
454 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
455 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
456 return -EINVAL;
457
458 /* ICV error or CCMP/BIP/WPI MIC error */
459 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
460 status->flag |= RX_FLAG_ONLY_MONITOR;
461
462 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
463 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
464 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
465
466 if (status->wcid) {
467 struct mt7996_sta_link *msta_link;
468
469 msta_link = container_of(status->wcid, struct mt7996_sta_link,
470 wcid);
471 msta = msta_link->sta;
472 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
473 }
474
475 status->freq = mphy->chandef.chan->center_freq;
476 status->band = mphy->chandef.chan->band;
477 if (status->band == NL80211_BAND_5GHZ)
478 sband = &mphy->sband_5g.sband;
479 else if (status->band == NL80211_BAND_6GHZ)
480 sband = &mphy->sband_6g.sband;
481 else
482 sband = &mphy->sband_2g.sband;
483
484 if (!sband->channels)
485 return -EINVAL;
486
487 if ((rxd3 & csum_mask) == csum_mask &&
488 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
489 skb->ip_summed = CHECKSUM_UNNECESSARY;
490
491 if (rxd3 & MT_RXD3_NORMAL_FCS_ERR)
492 status->flag |= RX_FLAG_FAILED_FCS_CRC;
493
494 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
495 status->flag |= RX_FLAG_MMIC_ERROR;
496
497 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
498 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
499 status->flag |= RX_FLAG_DECRYPTED;
500 status->flag |= RX_FLAG_IV_STRIPPED;
501 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
502 }
503
504 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
505
506 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
507 return -EINVAL;
508
509 rxd += 8;
510 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
511 u32 v0 = le32_to_cpu(rxd[0]);
512 u32 v2 = le32_to_cpu(rxd[2]);
513
514 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
515 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
516 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
517
518 rxd += 4;
519 if ((u8 *)rxd - skb->data >= skb->len)
520 return -EINVAL;
521 }
522
523 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
524 u8 *data = (u8 *)rxd;
525
526 if (status->flag & RX_FLAG_DECRYPTED) {
527 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
528 case MT_CIPHER_AES_CCMP:
529 case MT_CIPHER_CCMP_CCX:
530 case MT_CIPHER_CCMP_256:
531 insert_ccmp_hdr =
532 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
533 fallthrough;
534 case MT_CIPHER_TKIP:
535 case MT_CIPHER_TKIP_NO_MIC:
536 case MT_CIPHER_GCMP:
537 case MT_CIPHER_GCMP_256:
538 status->iv[0] = data[5];
539 status->iv[1] = data[4];
540 status->iv[2] = data[3];
541 status->iv[3] = data[2];
542 status->iv[4] = data[1];
543 status->iv[5] = data[0];
544 break;
545 default:
546 break;
547 }
548 }
549 rxd += 4;
550 if ((u8 *)rxd - skb->data >= skb->len)
551 return -EINVAL;
552 }
553
554 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
555 status->timestamp = le32_to_cpu(rxd[0]);
556 status->flag |= RX_FLAG_MACTIME_START;
557
558 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
559 status->flag |= RX_FLAG_AMPDU_DETAILS;
560
561 /* all subframes of an A-MPDU have the same timestamp */
562 if (phy->rx_ampdu_ts != status->timestamp) {
563 if (!++phy->ampdu_ref)
564 phy->ampdu_ref++;
565 }
566 phy->rx_ampdu_ts = status->timestamp;
567
568 status->ampdu_ref = phy->ampdu_ref;
569 }
570
571 rxd += 4;
572 if ((u8 *)rxd - skb->data >= skb->len)
573 return -EINVAL;
574 }
575
576 /* RXD Group 3 - P-RXV */
577 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
578 u32 v3;
579 int ret;
580
581 rxv = rxd;
582 rxd += 4;
583 if ((u8 *)rxd - skb->data >= skb->len)
584 return -EINVAL;
585
586 v3 = le32_to_cpu(rxv[3]);
587
588 status->chains = mphy->antenna_mask;
589 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
590 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
591 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
592 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
593
594 /* RXD Group 5 - C-RXV */
595 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
596 rxd += 24;
597 if ((u8 *)rxd - skb->data >= skb->len)
598 return -EINVAL;
599 }
600
601 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
602 if (ret < 0)
603 return ret;
604 }
605
606 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
607 status->amsdu = !!amsdu_info;
608 if (status->amsdu) {
609 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
610 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
611 }
612
613 /* IEEE 802.11 fragmentation can only be applied to unicast frames.
614 * Hence, drop fragments with multicast/broadcast RA.
615 * This check fixes vulnerabilities, like CVE-2020-26145.
616 */
617 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
618 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
619 return -EINVAL;
620
621 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
622 if (hdr_trans && ieee80211_has_morefrags(fc)) {
623 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
624 return -EINVAL;
625 hdr_trans = false;
626 } else {
627 int pad_start = 0;
628
629 skb_pull(skb, hdr_gap);
630 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
631 pad_start = ieee80211_get_hdrlen_from_skb(skb);
632 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
633 /* When header translation failure is indicated,
634 * the hardware will insert an extra 2-byte field
635 * containing the data length after the protocol
636 * type field. This happens either when the LLC-SNAP
637 * pattern did not match, or if a VLAN header was
638 * detected.
639 */
640 pad_start = 12;
641 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
642 pad_start += 4;
643 else
644 pad_start = 0;
645 }
646
647 if (pad_start) {
648 memmove(skb->data + 2, skb->data, pad_start);
649 skb_pull(skb, 2);
650 }
651 }
652
653 if (!hdr_trans) {
654 struct ieee80211_hdr *hdr;
655
656 if (insert_ccmp_hdr) {
657 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
658
659 mt76_insert_ccmp_hdr(skb, key_id);
660 }
661
662 hdr = mt76_skb_get_hdr(skb);
663 fc = hdr->frame_control;
664 if (ieee80211_is_beacon(fc))
665 mt76_rx_beacon(mphy, skb);
666 if (ieee80211_is_data_qos(fc)) {
667 u8 *qos = ieee80211_get_qos_ctl(hdr);
668
669 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
670 qos_ctl = *qos;
671
672 /* Mesh DA/SA/Length will be stripped after hardware
673 * de-amsdu, so here needs to clear amsdu present bit
674 * to mark it as a normal mesh frame.
675 */
676 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
677 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
678 }
679 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
680 } else {
681 status->flag |= RX_FLAG_8023;
682 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
683 *info);
684 mt76_npu_check_ppe(&dev->mt76, skb, *info);
685 }
686
687 if (rxv && !(status->flag & RX_FLAG_8023)) {
688 switch (status->encoding) {
689 case RX_ENC_EHT:
690 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
691 break;
692 case RX_ENC_HE:
693 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
694 break;
695 default:
696 break;
697 }
698 }
699
700 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
701 return 0;
702
703 status->aggr = unicast &&
704 !ieee80211_is_qos_nullfunc(fc);
705 status->qos_ctl = qos_ctl;
706 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
707
708 return 0;
709 }
710
711 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)712 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
713 struct sk_buff *skb, struct mt76_wcid *wcid)
714 {
715 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
716 u8 fc_type, fc_stype;
717 u16 ethertype;
718 bool wmm = false;
719 u32 val;
720
721 if (wcid->sta) {
722 struct ieee80211_sta *sta = wcid_to_sta(wcid);
723
724 wmm = sta->wme;
725 }
726
727 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
728 FIELD_PREP(MT_TXD1_TID, tid);
729
730 ethertype = get_unaligned_be16(&skb->data[12]);
731 if (ethertype >= ETH_P_802_3_MIN)
732 val |= MT_TXD1_ETH_802_3;
733
734 txwi[1] |= cpu_to_le32(val);
735
736 fc_type = IEEE80211_FTYPE_DATA >> 2;
737 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
738
739 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
740 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
741
742 txwi[2] |= cpu_to_le32(val);
743
744 if (wcid->amsdu)
745 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
746 }
747
748 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key,struct mt76_wcid * wcid)749 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
750 struct sk_buff *skb,
751 struct ieee80211_key_conf *key,
752 struct mt76_wcid *wcid)
753 {
754 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
755 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
756 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
757 bool multicast = is_multicast_ether_addr(hdr->addr1);
758 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
759 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
760 u16 seqno = le16_to_cpu(sc);
761 bool hw_bigtk = false;
762 u8 fc_type, fc_stype;
763 u32 val;
764
765 if (ieee80211_is_action(fc) &&
766 skb->len >= IEEE80211_MIN_ACTION_SIZE(action_code) &&
767 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
768 mgmt->u.action.action_code == WLAN_ACTION_ADDBA_REQ) {
769 if (is_mt7990(&dev->mt76))
770 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
771 else
772 txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD);
773
774 tid = MT_TX_ADDBA;
775 } else if (ieee80211_is_mgmt(hdr->frame_control)) {
776 tid = MT_TX_NORMAL;
777 }
778
779 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
780 FIELD_PREP(MT_TXD1_HDR_INFO,
781 ieee80211_get_hdrlen_from_skb(skb) / 2) |
782 FIELD_PREP(MT_TXD1_TID, tid);
783
784 if (!ieee80211_is_data(fc) || multicast ||
785 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
786 val |= MT_TXD1_FIXED_RATE;
787
788 if (is_mt7990(&dev->mt76) && ieee80211_is_beacon(fc) &&
789 (wcid->hw_key_idx2 == 6 || wcid->hw_key_idx2 == 7))
790 hw_bigtk = true;
791
792 if ((key && multicast && ieee80211_is_robust_mgmt_frame(skb)) || hw_bigtk) {
793 val |= MT_TXD1_BIP;
794 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
795 }
796
797 txwi[1] |= cpu_to_le32(val);
798
799 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
800 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
801
802 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
803 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
804
805 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
806 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
807 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
808 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
809 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
810 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
811 else
812 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
813
814 txwi[2] |= cpu_to_le32(val);
815
816 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
817 if (ieee80211_is_beacon(fc)) {
818 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
819 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
820 }
821
822 if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
823 val = MT_TXD3_SN_VALID |
824 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
825 txwi[3] |= cpu_to_le32(val);
826 }
827
828 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
829 if (ieee80211_is_back_req(hdr->frame_control)) {
830 struct ieee80211_bar *bar;
831
832 bar = (struct ieee80211_bar *)skb->data;
833 seqno = le16_to_cpu(bar->start_seq_num);
834 }
835
836 val = MT_TXD3_SN_VALID |
837 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
838 txwi[3] |= cpu_to_le32(val);
839 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
840 }
841
842 if (ieee80211_vif_is_mld(info->control.vif) &&
843 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
844 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
845
846 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
847 ieee80211_vif_is_mld(info->control.vif)) {
848 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
849 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
850 }
851
852 if (!wcid->sta && ieee80211_is_mgmt(fc))
853 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
854 }
855
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)856 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
857 struct sk_buff *skb, struct mt76_wcid *wcid,
858 struct ieee80211_key_conf *key, int pid,
859 enum mt76_txq_id qid, u32 changed)
860 {
861 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
862 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
863 struct ieee80211_vif *vif = info->control.vif;
864 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
865 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
866 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
867 struct mt76_vif_link *mlink = NULL;
868 struct mt7996_vif *mvif;
869 unsigned int link_id;
870 u16 tx_count = 15;
871 u32 val;
872 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
873 BSS_CHANGED_FILS_DISCOVERY));
874 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
875 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
876
877 if (wcid != &dev->mt76.global_wcid)
878 link_id = wcid->link_id;
879 else
880 link_id = u32_get_bits(info->control.flags,
881 IEEE80211_TX_CTRL_MLO_LINK);
882
883 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
884 if (mvif) {
885 if (wcid->offchannel)
886 mlink = rcu_dereference(mvif->mt76.offchannel_link);
887 if (!mlink)
888 mlink = rcu_dereference(mvif->mt76.link[link_id]);
889 }
890
891 if (mlink) {
892 omac_idx = mlink->omac_idx;
893 wmm_idx = mlink->wmm_idx;
894 band_idx = mlink->band_idx;
895 }
896
897 if (inband_disc) {
898 p_fmt = MT_TX_TYPE_FW;
899 q_idx = MT_LMAC_ALTX0;
900 } else if (beacon) {
901 p_fmt = MT_TX_TYPE_FW;
902 q_idx = MT_LMAC_BCN0;
903 } else if (qid >= MT_TXQ_PSD) {
904 p_fmt = MT_TX_TYPE_CT;
905 q_idx = MT_LMAC_ALTX0;
906 } else {
907 p_fmt = MT_TX_TYPE_CT;
908 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
909 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
910 }
911
912 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
913 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
914 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
915 txwi[0] = cpu_to_le32(val);
916
917 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
918 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
919
920 if (band_idx)
921 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
922
923 txwi[1] = cpu_to_le32(val);
924 txwi[2] = 0;
925
926 val = MT_TXD3_SW_POWER_MGMT |
927 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
928 if (key)
929 val |= MT_TXD3_PROTECT_FRAME;
930 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
931 val |= MT_TXD3_NO_ACK;
932
933 txwi[3] = cpu_to_le32(val);
934 txwi[4] = 0;
935
936 val = FIELD_PREP(MT_TXD5_PID, pid);
937 if (pid >= MT_PACKET_ID_FIRST)
938 val |= MT_TXD5_TX_STATUS_HOST;
939 txwi[5] = cpu_to_le32(val);
940
941 val = MT_TXD6_DAS | MT_TXD6_VTA;
942 if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) ||
943 skb->protocol == cpu_to_be16(ETH_P_PAE))
944 val |= MT_TXD6_DIS_MAT;
945
946 if (is_mt7996(&dev->mt76))
947 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
948 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
949 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
950
951 txwi[6] = cpu_to_le32(val);
952 txwi[7] = 0;
953
954 if (is_8023)
955 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
956 else
957 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
958
959 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
960 bool mcast = ieee80211_is_data(hdr->frame_control) &&
961 is_multicast_ether_addr(hdr->addr1);
962 u8 idx = MT7996_BASIC_RATES_TBL;
963
964 if (mlink) {
965 if (mcast && mlink->mcast_rates_idx)
966 idx = mlink->mcast_rates_idx;
967 else if (beacon && mlink->beacon_rates_idx)
968 idx = mlink->beacon_rates_idx;
969 else
970 idx = mlink->basic_rates_idx;
971 }
972
973 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
974 if (mcast)
975 val |= MT_TXD6_DIS_MAT;
976 txwi[6] |= cpu_to_le32(val);
977 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
978 }
979 }
980
981 static bool
mt7996_tx_use_mgmt(struct mt7996_dev * dev,struct sk_buff * skb)982 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
983 {
984 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
985
986 if (ieee80211_is_mgmt(hdr->frame_control))
987 return true;
988
989 /* for SDO to bypass specific data frame */
990 if (!mt7996_has_wa(dev)) {
991 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
992 return true;
993
994 if (ieee80211_has_a4(hdr->frame_control) &&
995 !ieee80211_is_data_present(hdr->frame_control))
996 return true;
997 }
998
999 return false;
1000 }
1001
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)1002 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1003 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1004 struct ieee80211_sta *sta,
1005 struct mt76_tx_info *tx_info)
1006 {
1007 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1008 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1009 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1010 struct ieee80211_key_conf *key = info->control.hw_key;
1011 struct ieee80211_vif *vif = info->control.vif;
1012 struct mt7996_vif *mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
1013 struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL;
1014 struct mt76_vif_link *mlink = NULL;
1015 struct mt76_txwi_cache *t;
1016 int id, i, pid, nbuf = tx_info->nbuf - 1;
1017 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1018 __le32 *ptr = (__le32 *)txwi_ptr;
1019 u8 *txwi = (u8 *)txwi_ptr;
1020 u8 link_id;
1021
1022 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1023 return -EINVAL;
1024
1025 if (!wcid)
1026 wcid = &dev->mt76.global_wcid;
1027
1028 if ((is_8023 || ieee80211_is_data_qos(hdr->frame_control)) && sta->mlo &&
1029 likely(tx_info->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
1030 u8 tid = tx_info->skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1031
1032 link_id = (tid % 2) ? msta->seclink_id : msta->deflink_id;
1033 } else {
1034 link_id = u32_get_bits(info->control.flags,
1035 IEEE80211_TX_CTRL_MLO_LINK);
1036 }
1037
1038 if (link_id != wcid->link_id && link_id != IEEE80211_LINK_UNSPECIFIED) {
1039 if (msta) {
1040 struct mt7996_sta_link *msta_link =
1041 rcu_dereference(msta->link[link_id]);
1042
1043 if (msta_link)
1044 wcid = &msta_link->wcid;
1045 } else if (mvif) {
1046 mlink = rcu_dereference(mvif->mt76.link[link_id]);
1047 if (mlink && mlink->wcid)
1048 wcid = mlink->wcid;
1049 }
1050 }
1051
1052 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1053 t->skb = tx_info->skb;
1054
1055 id = mt76_token_consume(mdev, &t);
1056 if (id < 0)
1057 return id;
1058
1059 /* Since the rules of HW MLD address translation are not fully
1060 * compatible with 802.11 EAPOL frame, we do the translation by
1061 * software
1062 */
1063 if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) {
1064 struct ieee80211_hdr *hdr = (void *)tx_info->skb->data;
1065 struct ieee80211_bss_conf *link_conf;
1066 struct ieee80211_link_sta *link_sta;
1067
1068 link_conf = rcu_dereference(vif->link_conf[wcid->link_id]);
1069 if (!link_conf)
1070 return -EINVAL;
1071
1072 link_sta = rcu_dereference(sta->link[wcid->link_id]);
1073 if (!link_sta)
1074 return -EINVAL;
1075
1076 dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr,
1077 tx_info->buf[1].len, DMA_TO_DEVICE);
1078
1079 memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
1080 memcpy(hdr->addr2, link_conf->addr, ETH_ALEN);
1081 if (ieee80211_has_a4(hdr->frame_control)) {
1082 memcpy(hdr->addr3, sta->addr, ETH_ALEN);
1083 memcpy(hdr->addr4, vif->addr, ETH_ALEN);
1084 } else if (ieee80211_has_tods(hdr->frame_control)) {
1085 memcpy(hdr->addr3, sta->addr, ETH_ALEN);
1086 } else if (ieee80211_has_fromds(hdr->frame_control)) {
1087 memcpy(hdr->addr3, vif->addr, ETH_ALEN);
1088 }
1089
1090 dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr,
1091 tx_info->buf[1].len, DMA_TO_DEVICE);
1092 }
1093
1094 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1095 memset(txwi_ptr, 0, MT_TXD_SIZE);
1096 /* Transmit non qos data by 802.11 header and need to fill txd by host*/
1097 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1098 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
1099 pid, qid, 0);
1100
1101 /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA
1102 * req
1103 */
1104 if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) {
1105 u32 val, mac_txp_size = sizeof(struct mt76_connac_hw_txp);
1106
1107 ptr = (__le32 *)(txwi + MT_TXD_SIZE);
1108 memset((void *)ptr, 0, mac_txp_size);
1109
1110 val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) |
1111 MT_TXP0_TOKEN_ID0_VALID_MASK;
1112 ptr[0] = cpu_to_le32(val);
1113
1114 val = FIELD_PREP(MT_TXP1_TID_ADDBA,
1115 tx_info->skb->priority &
1116 IEEE80211_QOS_CTL_TID_MASK);
1117 ptr[1] = cpu_to_le32(val);
1118 ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF);
1119
1120 val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) |
1121 MT_TXP3_ML0_MASK;
1122 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1123 val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H,
1124 tx_info->buf[1].addr >> 32);
1125 #endif
1126 ptr[3] = cpu_to_le32(val);
1127
1128 tx_info->buf[0].len = MT_TXD_SIZE + mac_txp_size;
1129 } else {
1130 struct mt76_connac_txp_common *txp;
1131
1132 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
1133 for (i = 0; i < nbuf; i++) {
1134 u16 len;
1135
1136 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
1137 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1138 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
1139 tx_info->buf[i + 1].addr >> 32);
1140 #endif
1141
1142 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1143 txp->fw.len[i] = cpu_to_le16(len);
1144 }
1145 txp->fw.nbuf = nbuf;
1146
1147 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
1148
1149 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1150 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
1151
1152 if (!key)
1153 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1154
1155 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
1156 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1157
1158 if (mvif) {
1159 if (wcid->offchannel)
1160 mlink = rcu_dereference(mvif->mt76.offchannel_link);
1161 if (!mlink)
1162 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
1163
1164 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
1165 }
1166
1167 txp->fw.token = cpu_to_le16(id);
1168 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
1169 }
1170
1171 tx_info->skb = NULL;
1172
1173 /* pass partial skb header to fw */
1174 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1175 tx_info->buf[1].skip_unmap = true;
1176 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1177
1178 return 0;
1179 }
1180
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)1181 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1182 {
1183 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1184 __le32 *txwi = ptr;
1185 u32 val;
1186
1187 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1188
1189 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1190 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1191 txwi[0] = cpu_to_le32(val);
1192
1193 val = BIT(31) |
1194 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1195 txwi[1] = cpu_to_le32(val);
1196
1197 txp->token = cpu_to_le16(token_id);
1198 txp->nbuf = 1;
1199 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1200
1201 return MT_TXD_SIZE + sizeof(*txp);
1202 }
1203
1204 static void
mt7996_tx_check_aggr(struct ieee80211_link_sta * link_sta,struct mt76_wcid * wcid,struct sk_buff * skb)1205 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta,
1206 struct mt76_wcid *wcid, struct sk_buff *skb)
1207 {
1208 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1209 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1210 u16 fc, tid;
1211
1212 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
1213 return;
1214
1215 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1216 if (tid >= 6) /* skip VO queue */
1217 return;
1218
1219 if (is_8023) {
1220 fc = IEEE80211_FTYPE_DATA |
1221 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA
1222 : IEEE80211_STYPE_DATA);
1223 } else {
1224 /* No need to get precise TID for Action/Management Frame,
1225 * since it will not meet the following Frame Control
1226 * condition anyway.
1227 */
1228
1229 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1230
1231 fc = le16_to_cpu(hdr->frame_control) &
1232 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1233 }
1234
1235 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1236 return;
1237
1238 if (!test_and_set_bit(tid, &wcid->ampdu_state) &&
1239 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0))
1240 clear_bit(tid, &wcid->ampdu_state);
1241 }
1242
1243 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_link_sta * link_sta,struct mt76_wcid * wcid,struct list_head * free_list)1244 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1245 struct ieee80211_link_sta *link_sta,
1246 struct mt76_wcid *wcid, struct list_head *free_list)
1247 {
1248 struct mt76_dev *mdev = &dev->mt76;
1249 __le32 *txwi;
1250 u16 wcid_idx;
1251
1252 mt76_connac_txp_skb_unmap(mdev, t);
1253 if (!t->skb)
1254 goto out;
1255
1256 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1257 if (link_sta) {
1258 wcid_idx = wcid->idx;
1259 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) {
1260 struct mt7996_sta *msta;
1261
1262 /* AMPDU state is stored in the primary link */
1263 msta = (void *)link_sta->sta->drv_priv;
1264 mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid,
1265 t->skb);
1266 }
1267 } else {
1268 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1269 }
1270
1271 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1272
1273 out:
1274 t->skb = NULL;
1275 mt76_put_txwi(mdev, t);
1276 }
1277
1278 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1279 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1280 {
1281 __le32 *tx_free = (__le32 *)data, *cur_info;
1282 struct mt76_dev *mdev = &dev->mt76;
1283 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1284 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1285 struct ieee80211_link_sta *link_sta = NULL;
1286 struct mt76_txwi_cache *txwi;
1287 struct mt76_wcid *wcid = NULL;
1288 LIST_HEAD(free_list);
1289 struct sk_buff *skb, *tmp;
1290 void *end = data + len;
1291 bool wake = false;
1292 u16 total, count = 0;
1293 u8 ver;
1294
1295 /* clean DMA queues and unmap buffers first */
1296 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1297 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1298 if (phy2) {
1299 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1300 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1301 }
1302 if (phy3) {
1303 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1304 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1305 }
1306
1307 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
1308 if (WARN_ON_ONCE(ver < 5))
1309 return;
1310
1311 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1312 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1313 u32 msdu, info;
1314 u8 i;
1315
1316 if (WARN_ON_ONCE((void *)cur_info >= end))
1317 return;
1318 /* 1'b1: new wcid pair.
1319 * 1'b0: msdu_id with the same 'wcid pair' as above.
1320 */
1321 info = le32_to_cpu(*cur_info);
1322 if (info & MT_TXFREE_INFO_PAIR) {
1323 struct ieee80211_sta *sta;
1324 unsigned long valid_links;
1325 struct mt7996_sta *msta;
1326 unsigned int id;
1327 u16 idx;
1328
1329 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1330 wcid = mt76_wcid_ptr(dev, idx);
1331 sta = wcid_to_sta(wcid);
1332 if (!sta) {
1333 link_sta = NULL;
1334 goto next;
1335 }
1336
1337 link_sta = rcu_dereference(sta->link[wcid->link_id]);
1338 if (!link_sta)
1339 goto next;
1340
1341 msta = (struct mt7996_sta *)sta->drv_priv;
1342 valid_links = sta->valid_links ?: BIT(0);
1343
1344 /* For MLD STA, add all link's wcid to sta_poll_list */
1345 for_each_set_bit(id, &valid_links,
1346 IEEE80211_MLD_MAX_NUM_LINKS) {
1347 struct mt7996_sta_link *msta_link;
1348
1349 msta_link = rcu_dereference(msta->link[id]);
1350 if (!msta_link)
1351 continue;
1352
1353 mt76_wcid_add_poll(&dev->mt76,
1354 &msta_link->wcid);
1355 }
1356 next:
1357 /* ver 7 has a new DW with pair = 1, skip it */
1358 if (ver == 7 && ((void *)(cur_info + 1) < end) &&
1359 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
1360 cur_info++;
1361 continue;
1362 } else if (info & MT_TXFREE_INFO_HEADER) {
1363 u32 tx_retries = 0, tx_failed = 0;
1364
1365 if (!wcid)
1366 continue;
1367
1368 tx_retries =
1369 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1370 tx_failed = tx_retries +
1371 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1372
1373 wcid->stats.tx_retries += tx_retries;
1374 wcid->stats.tx_failed += tx_failed;
1375 continue;
1376 }
1377
1378 for (i = 0; i < 2; i++) {
1379 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1380 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1381 continue;
1382
1383 count++;
1384 txwi = mt76_token_release(mdev, msdu, &wake);
1385 if (!txwi)
1386 continue;
1387
1388 mt7996_txwi_free(dev, txwi, link_sta, wcid,
1389 &free_list);
1390 }
1391 }
1392
1393 mt7996_mac_sta_poll(dev);
1394
1395 if (wake)
1396 mt76_set_tx_blocked(&dev->mt76, false);
1397
1398 mt76_worker_schedule(&dev->mt76.tx_worker);
1399
1400 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1401 skb_list_del_init(skb);
1402 napi_consume_skb(skb, 1);
1403 }
1404 }
1405
1406 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1407 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1408 int pid, __le32 *txs_data)
1409 {
1410 struct mt76_sta_stats *stats = &wcid->stats;
1411 struct ieee80211_supported_band *sband;
1412 struct mt76_dev *mdev = &dev->mt76;
1413 struct mt76_phy *mphy;
1414 struct ieee80211_tx_info *info;
1415 struct sk_buff_head list;
1416 struct rate_info rate = {};
1417 struct sk_buff *skb = NULL;
1418 bool cck = false;
1419 u32 txrate, txs, mode, stbc;
1420
1421 txs = le32_to_cpu(txs_data[0]);
1422
1423 mt76_tx_status_lock(mdev, &list);
1424
1425 /* only report MPDU TXS */
1426 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
1427 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1428 if (skb) {
1429 info = IEEE80211_SKB_CB(skb);
1430 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1431 info->flags |= IEEE80211_TX_STAT_ACK;
1432
1433 info->status.ampdu_len = 1;
1434 info->status.ampdu_ack_len =
1435 !!(info->flags & IEEE80211_TX_STAT_ACK);
1436
1437 info->status.rates[0].idx = -1;
1438 }
1439 }
1440
1441 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1442 struct ieee80211_sta *sta;
1443 u8 tid;
1444
1445 sta = wcid_to_sta(wcid);
1446 tid = FIELD_GET(MT_TXS0_TID, txs);
1447 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1448 }
1449
1450 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1451
1452 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1453 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1454 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1455
1456 if (stbc && rate.nss > 1)
1457 rate.nss >>= 1;
1458
1459 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1460 stats->tx_nss[rate.nss - 1]++;
1461 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1462 stats->tx_mcs[rate.mcs]++;
1463
1464 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1465 switch (mode) {
1466 case MT_PHY_TYPE_CCK:
1467 cck = true;
1468 fallthrough;
1469 case MT_PHY_TYPE_OFDM:
1470 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1471
1472 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1473 sband = &mphy->sband_5g.sband;
1474 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1475 sband = &mphy->sband_6g.sband;
1476 else
1477 sband = &mphy->sband_2g.sband;
1478
1479 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1480 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1481 break;
1482 case MT_PHY_TYPE_HT:
1483 case MT_PHY_TYPE_HT_GF:
1484 if (rate.mcs > 31)
1485 goto out;
1486
1487 rate.flags = RATE_INFO_FLAGS_MCS;
1488 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1489 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1490 break;
1491 case MT_PHY_TYPE_VHT:
1492 if (rate.mcs > 9)
1493 goto out;
1494
1495 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1496 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1497 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1498 break;
1499 case MT_PHY_TYPE_HE_SU:
1500 case MT_PHY_TYPE_HE_EXT_SU:
1501 case MT_PHY_TYPE_HE_TB:
1502 case MT_PHY_TYPE_HE_MU:
1503 if (rate.mcs > 11)
1504 goto out;
1505
1506 rate.he_gi = wcid->rate.he_gi;
1507 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1508 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1509 break;
1510 case MT_PHY_TYPE_EHT_SU:
1511 case MT_PHY_TYPE_EHT_TRIG:
1512 case MT_PHY_TYPE_EHT_MU:
1513 if (rate.mcs > 13)
1514 goto out;
1515
1516 rate.eht_gi = wcid->rate.eht_gi;
1517 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1518 break;
1519 default:
1520 goto out;
1521 }
1522
1523 stats->tx_mode[mode]++;
1524
1525 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1526 case IEEE80211_STA_RX_BW_320:
1527 rate.bw = RATE_INFO_BW_320;
1528 stats->tx_bw[4]++;
1529 break;
1530 case IEEE80211_STA_RX_BW_160:
1531 rate.bw = RATE_INFO_BW_160;
1532 stats->tx_bw[3]++;
1533 break;
1534 case IEEE80211_STA_RX_BW_80:
1535 rate.bw = RATE_INFO_BW_80;
1536 stats->tx_bw[2]++;
1537 break;
1538 case IEEE80211_STA_RX_BW_40:
1539 rate.bw = RATE_INFO_BW_40;
1540 stats->tx_bw[1]++;
1541 break;
1542 default:
1543 rate.bw = RATE_INFO_BW_20;
1544 stats->tx_bw[0]++;
1545 break;
1546 }
1547 wcid->rate = rate;
1548
1549 out:
1550 if (skb)
1551 mt76_tx_status_skb_done(mdev, skb, &list);
1552 mt76_tx_status_unlock(mdev, &list);
1553
1554 return !!skb;
1555 }
1556
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1557 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1558 {
1559 struct mt7996_sta_link *msta_link;
1560 struct mt76_wcid *wcid;
1561 __le32 *txs_data = data;
1562 u16 wcidx;
1563 u8 pid;
1564
1565 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1566 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1567
1568 if (pid < MT_PACKET_ID_NO_SKB)
1569 return;
1570
1571 rcu_read_lock();
1572
1573 wcid = mt76_wcid_ptr(dev, wcidx);
1574 if (!wcid)
1575 goto out;
1576
1577 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1578
1579 if (!wcid->sta)
1580 goto out;
1581
1582 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
1583 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
1584
1585 out:
1586 rcu_read_unlock();
1587 }
1588
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1589 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1590 {
1591 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1592 __le32 *rxd = (__le32 *)data;
1593 __le32 *end = (__le32 *)&rxd[len / 4];
1594 enum rx_pkt_type type;
1595
1596 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1597 if (type != PKT_TYPE_NORMAL) {
1598 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1599
1600 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1601 MT_RXD0_SW_PKT_TYPE_FRAME))
1602 return true;
1603 }
1604
1605 switch (type) {
1606 case PKT_TYPE_TXRX_NOTIFY:
1607 mt7996_mac_tx_free(dev, data, len);
1608 return false;
1609 case PKT_TYPE_TXS:
1610 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1611 mt7996_mac_add_txs(dev, rxd);
1612 return false;
1613 case PKT_TYPE_RX_FW_MONITOR:
1614 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1615 return false;
1616 default:
1617 return true;
1618 }
1619 }
1620
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1621 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1622 struct sk_buff *skb, u32 *info)
1623 {
1624 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1625 __le32 *rxd = (__le32 *)skb->data;
1626 __le32 *end = (__le32 *)&skb->data[skb->len];
1627 enum rx_pkt_type type;
1628
1629 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1630 if (type != PKT_TYPE_NORMAL) {
1631 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1632
1633 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1634 MT_RXD0_SW_PKT_TYPE_FRAME))
1635 type = PKT_TYPE_NORMAL;
1636 }
1637
1638 switch (type) {
1639 case PKT_TYPE_TXRX_NOTIFY:
1640 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1641 q == MT_RXQ_TXFREE_BAND2) {
1642 dev_kfree_skb(skb);
1643 break;
1644 }
1645
1646 mt7996_mac_tx_free(dev, skb->data, skb->len);
1647 napi_consume_skb(skb, 1);
1648 break;
1649 case PKT_TYPE_RX_EVENT:
1650 mt7996_mcu_rx_event(dev, skb);
1651 break;
1652 case PKT_TYPE_TXS:
1653 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1654 mt7996_mac_add_txs(dev, rxd);
1655 dev_kfree_skb(skb);
1656 break;
1657 case PKT_TYPE_RX_FW_MONITOR:
1658 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1659 dev_kfree_skb(skb);
1660 break;
1661 case PKT_TYPE_NORMAL:
1662 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1663 mt76_rx(&dev->mt76, q, skb);
1664 return;
1665 }
1666 fallthrough;
1667 default:
1668 dev_kfree_skb(skb);
1669 break;
1670 }
1671 }
1672
1673 static struct mt7996_msdu_page *
mt7996_msdu_page_get_from_cache(struct mt7996_dev * dev)1674 mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev)
1675 {
1676 struct mt7996_msdu_page *p = NULL;
1677
1678 spin_lock(&dev->wed_rro.lock);
1679
1680 if (!list_empty(&dev->wed_rro.page_cache)) {
1681 p = list_first_entry(&dev->wed_rro.page_cache,
1682 struct mt7996_msdu_page, list);
1683 list_del(&p->list);
1684 }
1685
1686 spin_unlock(&dev->wed_rro.lock);
1687
1688 return p;
1689 }
1690
mt7996_msdu_page_get(struct mt7996_dev * dev)1691 static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev)
1692 {
1693 struct mt7996_msdu_page *p;
1694
1695 p = mt7996_msdu_page_get_from_cache(dev);
1696 if (!p) {
1697 p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC);
1698 if (p)
1699 INIT_LIST_HEAD(&p->list);
1700 }
1701
1702 return p;
1703 }
1704
mt7996_msdu_page_put_to_cache(struct mt7996_dev * dev,struct mt7996_msdu_page * p)1705 static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev,
1706 struct mt7996_msdu_page *p)
1707 {
1708 if (p->buf) {
1709 mt76_put_page_pool_buf(p->buf, false);
1710 p->buf = NULL;
1711 }
1712
1713 spin_lock(&dev->wed_rro.lock);
1714 list_add(&p->list, &dev->wed_rro.page_cache);
1715 spin_unlock(&dev->wed_rro.lock);
1716 }
1717
mt7996_msdu_page_free_cache(struct mt7996_dev * dev)1718 static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev)
1719 {
1720 while (true) {
1721 struct mt7996_msdu_page *p;
1722
1723 p = mt7996_msdu_page_get_from_cache(dev);
1724 if (!p)
1725 break;
1726
1727 if (p->buf)
1728 mt76_put_page_pool_buf(p->buf, false);
1729
1730 kfree(p);
1731 }
1732 }
1733
mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)1734 static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr)
1735 {
1736 u32 val = 0;
1737 int i = 0;
1738
1739 while (dma_addr) {
1740 val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE;
1741 dma_addr >>= 8;
1742 i += 13;
1743 }
1744
1745 return val % MT7996_RRO_MSDU_PG_HASH_SIZE;
1746 }
1747
1748 static struct mt7996_msdu_page *
mt7996_rro_msdu_page_get(struct mt7996_dev * dev,dma_addr_t dma_addr)1749 mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr)
1750 {
1751 u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
1752 struct mt7996_msdu_page *p, *tmp, *addr = NULL;
1753
1754 spin_lock(&dev->wed_rro.lock);
1755
1756 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash],
1757 list) {
1758 if (p->dma_addr == dma_addr) {
1759 list_del(&p->list);
1760 addr = p;
1761 break;
1762 }
1763 }
1764
1765 spin_unlock(&dev->wed_rro.lock);
1766
1767 return addr;
1768 }
1769
mt7996_rx_token_put(struct mt7996_dev * dev)1770 static void mt7996_rx_token_put(struct mt7996_dev *dev)
1771 {
1772 int i;
1773
1774 for (i = 0; i < dev->mt76.rx_token_size; i++) {
1775 struct mt76_txwi_cache *t;
1776
1777 t = mt76_rx_token_release(&dev->mt76, i);
1778 if (!t || !t->ptr)
1779 continue;
1780
1781 mt76_put_page_pool_buf(t->ptr, false);
1782 t->dma_addr = 0;
1783 t->ptr = NULL;
1784
1785 mt76_put_rxwi(&dev->mt76, t);
1786 }
1787 }
1788
mt7996_rro_msdu_page_map_free(struct mt7996_dev * dev)1789 void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev)
1790 {
1791 struct mt7996_msdu_page *p, *tmp;
1792 int i;
1793
1794 local_bh_disable();
1795
1796 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) {
1797 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i],
1798 list) {
1799 list_del_init(&p->list);
1800 if (p->buf)
1801 mt76_put_page_pool_buf(p->buf, false);
1802 kfree(p);
1803 }
1804 }
1805 mt7996_msdu_page_free_cache(dev);
1806
1807 local_bh_enable();
1808
1809 mt7996_rx_token_put(dev);
1810 }
1811
mt7996_rro_msdu_page_add(struct mt76_dev * mdev,struct mt76_queue * q,dma_addr_t dma_addr,void * data)1812 int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q,
1813 dma_addr_t dma_addr, void *data)
1814 {
1815 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1816 struct mt7996_msdu_page_info *pinfo = data;
1817 struct mt7996_msdu_page *p;
1818 u32 hash;
1819
1820 pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1));
1821 p = mt7996_msdu_page_get(dev);
1822 if (!p)
1823 return -ENOMEM;
1824
1825 p->buf = data;
1826 p->dma_addr = dma_addr;
1827 p->q = q;
1828
1829 hash = mt7996_msdu_page_hash_from_addr(dma_addr);
1830
1831 spin_lock(&dev->wed_rro.lock);
1832 list_add_tail(&p->list, &dev->wed_rro.page_map[hash]);
1833 spin_unlock(&dev->wed_rro.lock);
1834
1835 return 0;
1836 }
1837
1838 static struct mt7996_wed_rro_addr *
mt7996_rro_addr_elem_get(struct mt7996_dev * dev,u16 session_id,u16 seq_num)1839 mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num)
1840 {
1841 u32 idx = 0;
1842 void *addr;
1843
1844 if (session_id == MT7996_RRO_MAX_SESSION) {
1845 addr = dev->wed_rro.session.ptr;
1846 } else {
1847 idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE;
1848 addr = dev->wed_rro.addr_elem[idx].ptr;
1849
1850 idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE;
1851 idx = idx * MT7996_RRO_WINDOW_MAX_LEN;
1852 }
1853 idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN;
1854
1855 return addr + idx * sizeof(struct mt7996_wed_rro_addr);
1856 }
1857
1858 #define MT996_RRO_SN_MASK GENMASK(11, 0)
1859
mt7996_rro_rx_process(struct mt76_dev * mdev,void * data)1860 void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data)
1861 {
1862 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1863 struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data;
1864 u32 cmd_data0 = le32_to_cpu(cmd->data0);
1865 u32 cmd_data1 = le32_to_cpu(cmd->data1);
1866 u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0);
1867 u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0);
1868 u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0);
1869 u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1);
1870 struct mt7996_msdu_page_info *pinfo = NULL;
1871 struct mt7996_msdu_page *p = NULL;
1872 int i, seq_num = 0;
1873
1874 for (i = 0; i < ind_count; i++) {
1875 struct mt7996_wed_rro_addr *e;
1876 struct mt76_rx_status *status;
1877 struct mt7996_rro_hif *rxd;
1878 int j, len, qid, data_len;
1879 struct mt76_txwi_cache *t;
1880 dma_addr_t dma_addr = 0;
1881 u16 rx_token_id, count;
1882 struct mt76_queue *q;
1883 struct sk_buff *skb;
1884 u32 info = 0, data;
1885 u8 signature;
1886 void *buf;
1887 bool ls;
1888
1889 seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i);
1890 e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num);
1891 data = le32_to_cpu(e->data);
1892 signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data);
1893 if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) {
1894 u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK,
1895 0xff);
1896
1897 e->data |= cpu_to_le32(val);
1898 goto update_ack_seq_num;
1899 }
1900
1901 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1902 dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data);
1903 dma_addr <<= 32;
1904 #endif
1905 dma_addr |= le32_to_cpu(e->head_low);
1906
1907 count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data);
1908 for (j = 0; j < count; j++) {
1909 if (!p) {
1910 p = mt7996_rro_msdu_page_get(dev, dma_addr);
1911 if (!p)
1912 continue;
1913
1914 dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr,
1915 SKB_WITH_OVERHEAD(p->q->buf_size),
1916 page_pool_get_dma_dir(p->q->page_pool));
1917 pinfo = (struct mt7996_msdu_page_info *)p->buf;
1918 }
1919
1920 rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG];
1921 len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK,
1922 le32_to_cpu(rxd->data1));
1923
1924 rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK,
1925 le32_to_cpu(rxd->data4));
1926 t = mt76_rx_token_release(mdev, rx_token_id);
1927 if (!t)
1928 goto next_page;
1929
1930 qid = t->qid;
1931 buf = t->ptr;
1932 q = &mdev->q_rx[qid];
1933 dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr,
1934 SKB_WITH_OVERHEAD(q->buf_size),
1935 page_pool_get_dma_dir(q->page_pool));
1936
1937 t->dma_addr = 0;
1938 t->ptr = NULL;
1939 mt76_put_rxwi(mdev, t);
1940 if (!buf)
1941 goto next_page;
1942
1943 if (q->rx_head)
1944 data_len = q->buf_size;
1945 else
1946 data_len = SKB_WITH_OVERHEAD(q->buf_size);
1947
1948 if (data_len < len + q->buf_offset) {
1949 dev_kfree_skb(q->rx_head);
1950 mt76_put_page_pool_buf(buf, false);
1951 q->rx_head = NULL;
1952 goto next_page;
1953 }
1954
1955 ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK,
1956 le32_to_cpu(rxd->data1));
1957 if (q->rx_head) {
1958 /* TODO: Take into account non-linear skb. */
1959 mt76_put_page_pool_buf(buf, false);
1960 if (ls) {
1961 dev_kfree_skb(q->rx_head);
1962 q->rx_head = NULL;
1963 }
1964 goto next_page;
1965 }
1966
1967 if (ls && !mt7996_rx_check(mdev, buf, len))
1968 goto next_page;
1969
1970 skb = build_skb(buf, q->buf_size);
1971 if (!skb)
1972 goto next_page;
1973
1974 skb_reserve(skb, q->buf_offset);
1975 skb_mark_for_recycle(skb);
1976 __skb_put(skb, len);
1977
1978 if (ind_reason == 1 || ind_reason == 2) {
1979 dev_kfree_skb(skb);
1980 goto next_page;
1981 }
1982
1983 if (!ls) {
1984 q->rx_head = skb;
1985 goto next_page;
1986 }
1987
1988 status = (struct mt76_rx_status *)skb->cb;
1989 if (seq_id != MT7996_RRO_MAX_SESSION)
1990 status->aggr = true;
1991
1992 mt7996_queue_rx_skb(mdev, qid, skb, &info);
1993 next_page:
1994 if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) {
1995 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1996 dma_addr =
1997 FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK,
1998 le32_to_cpu(pinfo->data));
1999 dma_addr <<= 32;
2000 dma_addr |= le32_to_cpu(pinfo->pg_low);
2001 #else
2002 dma_addr = le32_to_cpu(pinfo->pg_low);
2003 #endif
2004 mt7996_msdu_page_put_to_cache(dev, p);
2005 p = NULL;
2006 }
2007 }
2008
2009 update_ack_seq_num:
2010 if ((i + 1) % 4 == 0)
2011 mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
2012 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK,
2013 seq_id) |
2014 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK,
2015 seq_num));
2016 if (p) {
2017 mt7996_msdu_page_put_to_cache(dev, p);
2018 p = NULL;
2019 }
2020 }
2021
2022 /* Update ack_seq_num for remaining addr_elem */
2023 if (i % 4)
2024 mt76_wr(dev, MT_RRO_ACK_SN_CTRL,
2025 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) |
2026 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num));
2027 }
2028
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)2029 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
2030 {
2031 struct mt7996_dev *dev = phy->dev;
2032 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
2033
2034 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
2035 mt76_set(dev, reg, BIT(11) | BIT(9));
2036 }
2037
mt7996_mac_reset_counters(struct mt7996_phy * phy)2038 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
2039 {
2040 struct mt7996_dev *dev = phy->dev;
2041 u8 band_idx = phy->mt76->band_idx;
2042 int i;
2043
2044 for (i = 0; i < 16; i++)
2045 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2046
2047 phy->mt76->survey_time = ktime_get_boottime();
2048
2049 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
2050
2051 /* reset airtime counters */
2052 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
2053 MT_WF_RMAC_MIB_RXTIME_CLR);
2054
2055 mt7996_mcu_get_chan_mib_info(phy, true);
2056 }
2057
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)2058 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
2059 {
2060 s16 coverage_class = phy->coverage_class;
2061 struct mt7996_dev *dev = phy->dev;
2062 struct mt7996_phy *phy2 = mt7996_phy2(dev);
2063 struct mt7996_phy *phy3 = mt7996_phy3(dev);
2064 u32 reg_offset;
2065 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
2066 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
2067 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
2068 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
2069 u8 band_idx = phy->mt76->band_idx;
2070 int offset;
2071
2072 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
2073 return;
2074
2075 if (phy2)
2076 coverage_class = max_t(s16, dev->phy.coverage_class,
2077 phy2->coverage_class);
2078
2079 if (phy3)
2080 coverage_class = max_t(s16, coverage_class,
2081 phy3->coverage_class);
2082
2083 offset = 3 * coverage_class;
2084 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
2085 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
2086
2087 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
2088 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
2089 }
2090
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)2091 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
2092 {
2093 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
2094 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
2095 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
2096
2097 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
2098 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
2099 }
2100
2101 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)2102 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
2103 {
2104 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
2105 struct mt7996_dev *dev = phy->dev;
2106 u32 val, sum = 0, n = 0;
2107 int ant, i;
2108
2109 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
2110 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
2111
2112 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
2113 val = mt76_rr(dev, reg);
2114 sum += val * nf_power[i];
2115 n += val;
2116 }
2117 }
2118
2119 return n ? sum / n : 0;
2120 }
2121
mt7996_update_channel(struct mt76_phy * mphy)2122 void mt7996_update_channel(struct mt76_phy *mphy)
2123 {
2124 struct mt7996_phy *phy = mphy->priv;
2125 struct mt76_channel_state *state = mphy->chan_state;
2126 int nf;
2127
2128 mt7996_mcu_get_chan_mib_info(phy, false);
2129
2130 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
2131 if (!phy->noise)
2132 phy->noise = nf << 4;
2133 else if (nf)
2134 phy->noise += nf - (phy->noise >> 4);
2135
2136 state->noise = -(phy->noise >> 4);
2137 }
2138
2139 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)2140 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
2141 {
2142 bool ret;
2143
2144 ret = wait_event_timeout(dev->reset_wait,
2145 (READ_ONCE(dev->recovery.state) & state),
2146 MT7996_RESET_TIMEOUT);
2147
2148 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
2149 return ret;
2150 }
2151
2152 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)2153 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
2154 {
2155 struct ieee80211_bss_conf *link_conf;
2156 struct mt7996_phy *phy = priv;
2157 struct mt7996_dev *dev = phy->dev;
2158 unsigned int link_id;
2159
2160
2161 switch (vif->type) {
2162 case NL80211_IFTYPE_MESH_POINT:
2163 case NL80211_IFTYPE_ADHOC:
2164 case NL80211_IFTYPE_AP:
2165 break;
2166 default:
2167 return;
2168 }
2169
2170 for_each_vif_active_link(vif, link_conf, link_id) {
2171 struct mt7996_vif_link *link;
2172 struct mt7996_phy *link_phy;
2173
2174 link = mt7996_vif_link(dev, vif, link_id);
2175 if (!link)
2176 continue;
2177
2178 link_phy = mt7996_vif_link_phy(link);
2179 if (link_phy != phy)
2180 continue;
2181
2182 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf,
2183 link_conf->enable_beacon);
2184 }
2185 }
2186
mt7996_mac_update_beacons(struct mt7996_phy * phy)2187 void mt7996_mac_update_beacons(struct mt7996_phy *phy)
2188 {
2189 ieee80211_iterate_active_interfaces(phy->mt76->hw,
2190 IEEE80211_IFACE_ITER_RESUME_ALL,
2191 mt7996_update_vif_beacon, phy);
2192 }
2193
2194 static void
mt7996_update_beacons(struct mt7996_dev * dev)2195 mt7996_update_beacons(struct mt7996_dev *dev)
2196 {
2197 struct mt76_phy *phy2, *phy3;
2198
2199 mt7996_mac_update_beacons(&dev->phy);
2200
2201 phy2 = dev->mt76.phys[MT_BAND1];
2202 if (phy2)
2203 mt7996_mac_update_beacons(phy2->priv);
2204
2205 phy3 = dev->mt76.phys[MT_BAND2];
2206 if (phy3)
2207 mt7996_mac_update_beacons(phy3->priv);
2208 }
2209
mt7996_tx_token_put(struct mt7996_dev * dev)2210 void mt7996_tx_token_put(struct mt7996_dev *dev)
2211 {
2212 struct mt76_txwi_cache *txwi;
2213 int id;
2214
2215 spin_lock_bh(&dev->mt76.token_lock);
2216 idr_for_each_entry(&dev->mt76.token, txwi, id) {
2217 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL);
2218 dev->mt76.token_count--;
2219 }
2220 spin_unlock_bh(&dev->mt76.token_lock);
2221 idr_destroy(&dev->mt76.token);
2222
2223 for (id = 0; id < __MT_MAX_BAND; id++) {
2224 struct mt76_phy *phy = dev->mt76.phys[id];
2225 if (phy)
2226 atomic_set(&phy->mgmt_tx_pending, 0);
2227 }
2228 }
2229
2230 static int
mt7996_mac_restart(struct mt7996_dev * dev)2231 mt7996_mac_restart(struct mt7996_dev *dev)
2232 {
2233 struct mt76_dev *mdev = &dev->mt76;
2234 struct mt7996_phy *phy;
2235 int i, ret;
2236
2237 if (dev->hif2) {
2238 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
2239 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
2240 }
2241
2242 if (dev_is_pci(mdev->dev)) {
2243 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
2244 if (dev->hif2)
2245 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
2246 }
2247
2248 set_bit(MT76_MCU_RESET, &dev->mphy.state);
2249 mt7996_for_each_phy(dev, phy)
2250 set_bit(MT76_RESET, &phy->mt76->state);
2251 wake_up(&dev->mt76.mcu.wait);
2252
2253 /* lock/unlock all queues to ensure that no tx is pending */
2254 mt7996_for_each_phy(dev, phy)
2255 mt76_txq_schedule_all(phy->mt76);
2256
2257 /* disable all tx/rx napi */
2258 mt76_worker_disable(&dev->mt76.tx_worker);
2259 mt76_for_each_q_rx(mdev, i) {
2260 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2261 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
2262 continue;
2263
2264 if (mdev->q_rx[i].ndesc)
2265 napi_disable(&dev->mt76.napi[i]);
2266 }
2267 napi_disable(&dev->mt76.tx_napi);
2268
2269 /* token reinit */
2270 mt7996_tx_token_put(dev);
2271 idr_init(&dev->mt76.token);
2272
2273 mt7996_dma_reset(dev, true);
2274
2275 mt76_for_each_q_rx(mdev, i) {
2276 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2277 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
2278 continue;
2279
2280 if (mdev->q_rx[i].ndesc) {
2281 napi_enable(&dev->mt76.napi[i]);
2282 local_bh_disable();
2283 napi_schedule(&dev->mt76.napi[i]);
2284 local_bh_enable();
2285 }
2286 }
2287 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2288 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
2289
2290 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
2291 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
2292 if (dev->hif2) {
2293 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
2294 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
2295 }
2296 if (dev_is_pci(mdev->dev)) {
2297 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
2298 if (dev->hif2)
2299 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
2300 }
2301
2302 /* load firmware */
2303 ret = mt7996_mcu_init_firmware(dev);
2304 if (ret)
2305 goto out;
2306
2307 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2308 mt7996_has_hwrro(dev)) {
2309 u32 wed_irq_mask = dev->mt76.mmio.irqmask |
2310 MT_INT_TX_DONE_BAND2;
2311
2312 mt7996_rro_hw_init(dev);
2313 mt76_for_each_q_rx(&dev->mt76, i) {
2314 if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) ||
2315 mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))
2316 mt76_queue_rx_reset(dev, i);
2317 }
2318
2319 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
2320 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
2321 false);
2322 mt7996_irq_enable(dev, wed_irq_mask);
2323 mt7996_irq_disable(dev, 0);
2324 }
2325
2326 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2327 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR,
2328 MT_INT_TX_RX_DONE_EXT);
2329 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
2330 MT_INT_TX_RX_DONE_EXT);
2331 }
2332
2333 /* set the necessary init items */
2334 ret = mt7996_mcu_set_eeprom(dev);
2335 if (ret)
2336 goto out;
2337
2338 mt7996_mac_init(dev);
2339 mt7996_for_each_phy(dev, phy)
2340 mt7996_init_txpower(phy);
2341 ret = mt7996_txbf_init(dev);
2342 if (ret)
2343 goto out;
2344
2345 mt7996_for_each_phy(dev, phy) {
2346 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
2347 continue;
2348
2349 ret = mt7996_run(phy);
2350 if (ret)
2351 goto out;
2352 }
2353
2354 out:
2355 /* reset done */
2356 mt7996_for_each_phy(dev, phy)
2357 clear_bit(MT76_RESET, &phy->mt76->state);
2358
2359 napi_enable(&dev->mt76.tx_napi);
2360 local_bh_disable();
2361 napi_schedule(&dev->mt76.tx_napi);
2362 local_bh_enable();
2363
2364 mt76_worker_enable(&dev->mt76.tx_worker);
2365 return ret;
2366 }
2367
2368 static void
mt7996_mac_reset_sta_iter(void * data,struct ieee80211_sta * sta)2369 mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta)
2370 {
2371 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2372 struct mt7996_dev *dev = data;
2373 int i;
2374
2375 for (i = 0; i < ARRAY_SIZE(msta->link); i++)
2376 mt7996_mac_sta_remove_link(dev, sta, i, true);
2377 }
2378
2379 static void
mt7996_mac_reset_vif_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2380 mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
2381 {
2382 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv;
2383 struct mt76_vif_data *mvif = mlink->mvif;
2384 struct mt7996_dev *dev = data;
2385 int i;
2386
2387 rcu_read_lock();
2388 for (i = 0; i < ARRAY_SIZE(mvif->link); i++) {
2389
2390 mlink = mt76_dereference(mvif->link[i], &dev->mt76);
2391 if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv)
2392 continue;
2393
2394 rcu_assign_pointer(mvif->link[i], NULL);
2395 kfree_rcu(mlink, rcu_head);
2396 }
2397 rcu_read_unlock();
2398 }
2399
2400 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)2401 mt7996_mac_full_reset(struct mt7996_dev *dev)
2402 {
2403 struct ieee80211_hw *hw = mt76_hw(dev);
2404 struct mt7996_phy *phy;
2405 LIST_HEAD(list);
2406 int i;
2407
2408 dev->recovery.hw_full_reset = true;
2409
2410 wake_up(&dev->mt76.mcu.wait);
2411 ieee80211_stop_queues(hw);
2412
2413 cancel_work_sync(&dev->wed_rro.work);
2414 mt7996_for_each_phy(dev, phy)
2415 cancel_delayed_work_sync(&phy->mt76->mac_work);
2416
2417 mt76_abort_scan(&dev->mt76);
2418
2419 mutex_lock(&dev->mt76.mutex);
2420 for (i = 0; i < 10; i++) {
2421 if (!mt7996_mac_restart(dev))
2422 break;
2423 }
2424
2425 if (i == 10)
2426 dev_err(dev->mt76.dev, "chip full reset failed\n");
2427
2428 mt7996_for_each_phy(dev, phy)
2429 phy->omac_mask = 0;
2430
2431 ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev);
2432 ieee80211_iterate_active_interfaces_atomic(hw,
2433 IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER,
2434 mt7996_mac_reset_vif_iter, dev);
2435 mt76_reset_device(&dev->mt76);
2436
2437 INIT_LIST_HEAD(&dev->sta_rc_list);
2438 INIT_LIST_HEAD(&dev->twt_list);
2439
2440 spin_lock_bh(&dev->wed_rro.lock);
2441 list_splice_init(&dev->wed_rro.poll_list, &list);
2442 spin_unlock_bh(&dev->wed_rro.lock);
2443
2444 while (!list_empty(&list)) {
2445 struct mt7996_wed_rro_session_id *e;
2446
2447 e = list_first_entry(&list, struct mt7996_wed_rro_session_id,
2448 list);
2449 list_del_init(&e->list);
2450 kfree(e);
2451 }
2452
2453 i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA);
2454 dev->mt76.global_wcid.idx = i;
2455 dev->recovery.hw_full_reset = false;
2456
2457 mutex_unlock(&dev->mt76.mutex);
2458
2459 ieee80211_restart_hw(mt76_hw(dev));
2460 }
2461
mt7996_mac_reset_work(struct work_struct * work)2462 void mt7996_mac_reset_work(struct work_struct *work)
2463 {
2464 struct ieee80211_hw *hw;
2465 struct mt7996_dev *dev;
2466 struct mt7996_phy *phy;
2467 int i;
2468
2469 dev = container_of(work, struct mt7996_dev, reset_work);
2470 hw = mt76_hw(dev);
2471
2472 /* chip full reset */
2473 if (dev->recovery.restart) {
2474 /* disable WA/WM WDT */
2475 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
2476 MT_MCU_CMD_WDT_MASK);
2477
2478 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
2479 dev->recovery.wa_reset_count++;
2480 else
2481 dev->recovery.wm_reset_count++;
2482
2483 mt7996_mac_full_reset(dev);
2484
2485 /* enable mcu irq */
2486 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
2487 mt7996_irq_disable(dev, 0);
2488
2489 /* enable WA/WM WDT */
2490 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
2491
2492 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
2493 dev->recovery.restart = false;
2494 return;
2495 }
2496
2497 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
2498 return;
2499
2500 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
2501 wiphy_name(hw->wiphy));
2502
2503 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
2504 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
2505
2506 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
2507 mtk_wed_device_stop(&dev->mt76.mmio.wed);
2508
2509 mt7996_npu_hw_stop(dev);
2510 ieee80211_stop_queues(mt76_hw(dev));
2511
2512 set_bit(MT76_RESET, &dev->mphy.state);
2513 set_bit(MT76_MCU_RESET, &dev->mphy.state);
2514 mt76_abort_scan(&dev->mt76);
2515 wake_up(&dev->mt76.mcu.wait);
2516
2517 cancel_work_sync(&dev->wed_rro.work);
2518 mt7996_for_each_phy(dev, phy) {
2519 mt76_abort_roc(phy->mt76);
2520 set_bit(MT76_RESET, &phy->mt76->state);
2521 cancel_delayed_work_sync(&phy->mt76->mac_work);
2522 }
2523
2524 mt76_worker_disable(&dev->mt76.tx_worker);
2525 mt76_for_each_q_rx(&dev->mt76, i) {
2526 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2527 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2528 continue;
2529
2530 if (mt76_npu_device_active(&dev->mt76) &&
2531 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2532 continue;
2533
2534 if (mt76_queue_is_npu_txfree(&dev->mt76.q_rx[i]))
2535 continue;
2536
2537 napi_disable(&dev->mt76.napi[i]);
2538 }
2539 napi_disable(&dev->mt76.tx_napi);
2540
2541 mutex_lock(&dev->mt76.mutex);
2542
2543 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
2544
2545 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
2546 mt7996_dma_reset(dev, false);
2547
2548 mt7996_tx_token_put(dev);
2549 idr_init(&dev->mt76.token);
2550
2551 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
2552 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
2553 }
2554
2555 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
2556 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
2557
2558 /* enable DMA Rx/Tx and interrupt */
2559 mt7996_dma_start(dev, false, false);
2560
2561 if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3)
2562 mt76_set(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK);
2563
2564 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
2565 u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 |
2566 dev->mt76.mmio.irqmask;
2567
2568 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
2569 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
2570 true);
2571 mt7996_irq_enable(dev, wed_irq_mask);
2572 mt7996_irq_disable(dev, 0);
2573 }
2574
2575 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2576 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
2577 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
2578 MT_INT_TX_RX_DONE_EXT);
2579 }
2580
2581 __mt7996_npu_hw_init(dev);
2582
2583 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2584 mt7996_for_each_phy(dev, phy)
2585 clear_bit(MT76_RESET, &phy->mt76->state);
2586
2587 mt76_for_each_q_rx(&dev->mt76, i) {
2588 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2589 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2590 continue;
2591
2592 if (mt76_npu_device_active(&dev->mt76) &&
2593 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2594 continue;
2595
2596 if (mt76_queue_is_npu_txfree(&dev->mt76.q_rx[i]))
2597 continue;
2598
2599 napi_enable(&dev->mt76.napi[i]);
2600 local_bh_disable();
2601 napi_schedule(&dev->mt76.napi[i]);
2602 local_bh_enable();
2603 }
2604
2605 tasklet_schedule(&dev->mt76.irq_tasklet);
2606
2607 mt76_worker_enable(&dev->mt76.tx_worker);
2608
2609 napi_enable(&dev->mt76.tx_napi);
2610 local_bh_disable();
2611 napi_schedule(&dev->mt76.tx_napi);
2612 local_bh_enable();
2613
2614 ieee80211_wake_queues(hw);
2615 mt7996_update_beacons(dev);
2616
2617 mutex_unlock(&dev->mt76.mutex);
2618
2619 mt7996_for_each_phy(dev, phy)
2620 ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
2621 MT7996_WATCHDOG_TIME);
2622 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
2623 wiphy_name(dev->mt76.hw->wiphy));
2624 }
2625
2626 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)2627 void mt7996_mac_dump_work(struct work_struct *work)
2628 {
2629 const struct mt7996_mem_region *mem_region;
2630 struct mt7996_crash_data *crash_data;
2631 struct mt7996_dev *dev;
2632 struct mt7996_mem_hdr *hdr;
2633 size_t buf_len;
2634 int i;
2635 u32 num;
2636 u8 *buf;
2637
2638 dev = container_of(work, struct mt7996_dev, dump_work);
2639
2640 mutex_lock(&dev->dump_mutex);
2641
2642 crash_data = mt7996_coredump_new(dev);
2643 if (!crash_data) {
2644 mutex_unlock(&dev->dump_mutex);
2645 goto skip_coredump;
2646 }
2647
2648 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2649 if (!mem_region || !crash_data->memdump_buf_len) {
2650 mutex_unlock(&dev->dump_mutex);
2651 goto skip_memdump;
2652 }
2653
2654 buf = crash_data->memdump_buf;
2655 buf_len = crash_data->memdump_buf_len;
2656
2657 /* dumping memory content... */
2658 memset(buf, 0, buf_len);
2659 for (i = 0; i < num; i++) {
2660 if (mem_region->len > buf_len) {
2661 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2662 mem_region->name, mem_region->len);
2663 break;
2664 }
2665
2666 /* reserve space for the header */
2667 hdr = (void *)buf;
2668 buf += sizeof(*hdr);
2669 buf_len -= sizeof(*hdr);
2670
2671 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2672 mem_region->len);
2673
2674 hdr->start = mem_region->start;
2675 hdr->len = mem_region->len;
2676
2677 if (!mem_region->len)
2678 /* note: the header remains, just with zero length */
2679 break;
2680
2681 buf += mem_region->len;
2682 buf_len -= mem_region->len;
2683
2684 mem_region++;
2685 }
2686
2687 mutex_unlock(&dev->dump_mutex);
2688
2689 skip_memdump:
2690 mt7996_coredump_submit(dev);
2691 skip_coredump:
2692 queue_work(dev->mt76.wq, &dev->reset_work);
2693 }
2694
mt7996_reset(struct mt7996_dev * dev)2695 void mt7996_reset(struct mt7996_dev *dev)
2696 {
2697 if (!dev->recovery.hw_init_done)
2698 return;
2699
2700 if (dev->recovery.hw_full_reset)
2701 return;
2702
2703 /* wm/wa exception: do full recovery */
2704 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2705 dev->recovery.restart = true;
2706 dev_info(dev->mt76.dev,
2707 "%s indicated firmware crash, attempting recovery\n",
2708 wiphy_name(dev->mt76.hw->wiphy));
2709
2710 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2711 queue_work(dev->mt76.wq, &dev->dump_work);
2712 return;
2713 }
2714
2715 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA) {
2716 set_bit(MT76_MCU_RESET, &dev->mphy.state);
2717 wake_up(&dev->mt76.mcu.wait);
2718 }
2719
2720 queue_work(dev->mt76.wq, &dev->reset_work);
2721 wake_up(&dev->reset_wait);
2722 }
2723
mt7996_mac_update_stats(struct mt7996_phy * phy)2724 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2725 {
2726 struct mt76_mib_stats *mib = &phy->mib;
2727 struct mt7996_dev *dev = phy->dev;
2728 u8 band_idx = phy->mt76->band_idx;
2729 u32 cnt;
2730 int i;
2731
2732 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2733 mib->fcs_err_cnt += cnt;
2734
2735 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2736 mib->rx_fifo_full_cnt += cnt;
2737
2738 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2739 mib->rx_mpdu_cnt += cnt;
2740
2741 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2742 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2743
2744 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2745 mib->rx_vector_mismatch_cnt += cnt;
2746
2747 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2748 mib->rx_delimiter_fail_cnt += cnt;
2749
2750 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2751 mib->rx_len_mismatch_cnt += cnt;
2752
2753 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2754 mib->tx_ampdu_cnt += cnt;
2755
2756 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2757 mib->tx_stop_q_empty_cnt += cnt;
2758
2759 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2760 mib->tx_mpdu_attempts_cnt += cnt;
2761
2762 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2763 mib->tx_mpdu_success_cnt += cnt;
2764
2765 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2766 mib->rx_ampdu_cnt += cnt;
2767
2768 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2769 mib->rx_ampdu_bytes_cnt += cnt;
2770
2771 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2772 mib->rx_ampdu_valid_subframe_cnt += cnt;
2773
2774 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2775 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2776
2777 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2778 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2779
2780 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2781 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2782
2783 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2784 mib->rx_pfdrop_cnt += cnt;
2785
2786 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2787 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2788
2789 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2790 mib->rx_ba_cnt += cnt;
2791
2792 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2793 mib->tx_bf_ebf_ppdu_cnt += cnt;
2794
2795 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2796 mib->tx_bf_ibf_ppdu_cnt += cnt;
2797
2798 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2799 mib->tx_mu_bf_cnt += cnt;
2800
2801 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2802 mib->tx_mu_mpdu_cnt += cnt;
2803
2804 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2805 mib->tx_mu_acked_mpdu_cnt += cnt;
2806
2807 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2808 mib->tx_su_acked_mpdu_cnt += cnt;
2809
2810 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2811 mib->tx_bf_rx_fb_ht_cnt += cnt;
2812 mib->tx_bf_rx_fb_all_cnt += cnt;
2813
2814 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2815 mib->tx_bf_rx_fb_vht_cnt += cnt;
2816 mib->tx_bf_rx_fb_all_cnt += cnt;
2817
2818 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2819 mib->tx_bf_rx_fb_he_cnt += cnt;
2820 mib->tx_bf_rx_fb_all_cnt += cnt;
2821
2822 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2823 mib->tx_bf_rx_fb_eht_cnt += cnt;
2824 mib->tx_bf_rx_fb_all_cnt += cnt;
2825
2826 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2827 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2828 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2829 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2830
2831 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2832 mib->tx_bf_fb_trig_cnt += cnt;
2833
2834 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2835 mib->tx_bf_fb_cpl_cnt += cnt;
2836
2837 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2838 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2839 mib->tx_amsdu[i] += cnt;
2840 mib->tx_amsdu_cnt += cnt;
2841 }
2842
2843 /* rts count */
2844 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2845 mib->rts_cnt += cnt;
2846
2847 /* rts retry count */
2848 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2849 mib->rts_retries_cnt += cnt;
2850
2851 /* ba miss count */
2852 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2853 mib->ba_miss_cnt += cnt;
2854
2855 /* ack fail count */
2856 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2857 mib->ack_fail_cnt += cnt;
2858
2859 for (i = 0; i < 16; i++) {
2860 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2861 phy->mt76->aggr_stats[i] += cnt;
2862 }
2863 }
2864
mt7996_mac_sta_rc_work(struct work_struct * work)2865 void mt7996_mac_sta_rc_work(struct work_struct *work)
2866 {
2867 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2868 struct mt7996_sta_link *msta_link;
2869 struct ieee80211_vif *vif;
2870 struct mt7996_vif *mvif;
2871 LIST_HEAD(list);
2872 u32 changed;
2873
2874 mutex_lock(&dev->mt76.mutex);
2875
2876 spin_lock_bh(&dev->mt76.sta_poll_lock);
2877 list_splice_init(&dev->sta_rc_list, &list);
2878
2879 while (!list_empty(&list)) {
2880 msta_link = list_first_entry(&list, struct mt7996_sta_link,
2881 rc_list);
2882 list_del_init(&msta_link->rc_list);
2883
2884 changed = msta_link->changed;
2885 msta_link->changed = 0;
2886 mvif = msta_link->sta->vif;
2887 vif = container_of((void *)mvif, struct ieee80211_vif,
2888 drv_priv);
2889
2890 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2891
2892 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2893 IEEE80211_RC_NSS_CHANGED |
2894 IEEE80211_RC_BW_CHANGED))
2895 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif,
2896 msta_link->wcid.link_id,
2897 true);
2898
2899 if (changed & IEEE80211_RC_SMPS_CHANGED)
2900 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL,
2901 msta_link->wcid.link_id,
2902 RATE_PARAM_MMPS_UPDATE);
2903
2904 spin_lock_bh(&dev->mt76.sta_poll_lock);
2905 }
2906
2907 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2908
2909 mutex_unlock(&dev->mt76.mutex);
2910 }
2911
mt7996_mac_work(struct work_struct * work)2912 void mt7996_mac_work(struct work_struct *work)
2913 {
2914 struct mt7996_phy *phy;
2915 struct mt76_phy *mphy;
2916
2917 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2918 mac_work.work);
2919 phy = mphy->priv;
2920
2921 mutex_lock(&mphy->dev->mutex);
2922
2923 mt76_update_survey(mphy);
2924 if (++mphy->mac_work_count == 5) {
2925 mphy->mac_work_count = 0;
2926
2927 mt7996_mac_update_stats(phy);
2928
2929 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2930 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2931 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2932 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2933 }
2934 }
2935
2936 mutex_unlock(&mphy->dev->mutex);
2937
2938 mt76_beacon_mon_check(mphy);
2939 mt76_tx_status_check(mphy->dev, false);
2940
2941 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2942 MT7996_WATCHDOG_TIME);
2943 }
2944
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2945 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2946 {
2947 struct mt7996_dev *dev = phy->dev;
2948 int rdd_idx = mt7996_get_rdd_idx(phy, false);
2949
2950 if (rdd_idx < 0)
2951 return;
2952
2953 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
2954 }
2955
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int rdd_idx)2956 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
2957 {
2958 int region;
2959
2960 switch (dev->mt76.region) {
2961 case NL80211_DFS_ETSI:
2962 region = 0;
2963 break;
2964 case NL80211_DFS_JP:
2965 region = 2;
2966 break;
2967 case NL80211_DFS_FCC:
2968 default:
2969 region = 1;
2970 break;
2971 }
2972
2973 return mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
2974 }
2975
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2976 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2977 {
2978 struct mt7996_dev *dev = phy->dev;
2979 int err, rdd_idx;
2980
2981 rdd_idx = mt7996_get_rdd_idx(phy, false);
2982 if (rdd_idx < 0)
2983 return -EINVAL;
2984
2985 /* start CAC */
2986 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
2987 if (err < 0)
2988 return err;
2989
2990 err = mt7996_dfs_start_rdd(dev, rdd_idx);
2991
2992 return err;
2993 }
2994
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2995 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2996 {
2997 struct mt7996_dev *dev = phy->dev;
2998 enum mt76_dfs_state dfs_state, prev_state;
2999 int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
3000
3001 prev_state = phy->mt76->dfs_state;
3002 dfs_state = mt76_phy_dfs_state(phy->mt76);
3003
3004 if (prev_state == dfs_state || rdd_idx < 0)
3005 return 0;
3006
3007 if (prev_state == MT_DFS_STATE_UNKNOWN)
3008 mt7996_dfs_stop_radar_detector(phy);
3009
3010 if (dfs_state == MT_DFS_STATE_DISABLED)
3011 goto stop;
3012
3013 if (prev_state <= MT_DFS_STATE_DISABLED) {
3014 err = mt7996_dfs_start_radar_detector(phy);
3015 if (err < 0)
3016 return err;
3017
3018 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
3019 }
3020
3021 if (dfs_state == MT_DFS_STATE_CAC)
3022 return 0;
3023
3024 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
3025 if (err < 0) {
3026 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
3027 return err;
3028 }
3029
3030 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
3031 return 0;
3032
3033 stop:
3034 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
3035 if (err < 0)
3036 return err;
3037
3038 mt7996_dfs_stop_radar_detector(phy);
3039 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
3040
3041 return 0;
3042 }
3043
3044 static int
mt7996_mac_twt_duration_align(int duration)3045 mt7996_mac_twt_duration_align(int duration)
3046 {
3047 return duration << 8;
3048 }
3049
3050 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)3051 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
3052 struct mt7996_twt_flow *flow)
3053 {
3054 struct mt7996_twt_flow *iter, *iter_next;
3055 u32 duration = flow->duration << 8;
3056 u64 start_tsf;
3057
3058 iter = list_first_entry_or_null(&dev->twt_list,
3059 struct mt7996_twt_flow, list);
3060 if (!iter || !iter->sched || iter->start_tsf > duration) {
3061 /* add flow as first entry in the list */
3062 list_add(&flow->list, &dev->twt_list);
3063 return 0;
3064 }
3065
3066 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
3067 start_tsf = iter->start_tsf +
3068 mt7996_mac_twt_duration_align(iter->duration);
3069 if (list_is_last(&iter->list, &dev->twt_list))
3070 break;
3071
3072 if (!iter_next->sched ||
3073 iter_next->start_tsf > start_tsf + duration) {
3074 list_add(&flow->list, &iter->list);
3075 goto out;
3076 }
3077 }
3078
3079 /* add flow as last entry in the list */
3080 list_add_tail(&flow->list, &dev->twt_list);
3081 out:
3082 return start_tsf;
3083 }
3084
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)3085 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
3086 {
3087 struct ieee80211_twt_params *twt_agrt;
3088 u64 interval, duration;
3089 u16 mantissa;
3090 u8 exp;
3091
3092 /* only individual agreement supported */
3093 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
3094 return -EOPNOTSUPP;
3095
3096 /* only 256us unit supported */
3097 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
3098 return -EOPNOTSUPP;
3099
3100 twt_agrt = (struct ieee80211_twt_params *)twt->params;
3101
3102 /* explicit agreement not supported */
3103 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
3104 return -EOPNOTSUPP;
3105
3106 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
3107 le16_to_cpu(twt_agrt->req_type));
3108 mantissa = le16_to_cpu(twt_agrt->mantissa);
3109 duration = twt_agrt->min_twt_dur << 8;
3110
3111 interval = (u64)mantissa << exp;
3112 if (interval < duration)
3113 return -EOPNOTSUPP;
3114
3115 return 0;
3116 }
3117
3118 static bool
mt7996_mac_twt_param_equal(struct mt7996_sta_link * msta_link,struct ieee80211_twt_params * twt_agrt)3119 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
3120 struct ieee80211_twt_params *twt_agrt)
3121 {
3122 u16 type = le16_to_cpu(twt_agrt->req_type);
3123 u8 exp;
3124 int i;
3125
3126 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
3127 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
3128 struct mt7996_twt_flow *f;
3129
3130 if (!(msta_link->twt.flowid_mask & BIT(i)))
3131 continue;
3132
3133 f = &msta_link->twt.flow[i];
3134 if (f->duration == twt_agrt->min_twt_dur &&
3135 f->mantissa == twt_agrt->mantissa &&
3136 f->exp == exp &&
3137 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
3138 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
3139 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
3140 return true;
3141 }
3142
3143 return false;
3144 }
3145
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)3146 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
3147 struct ieee80211_sta *sta,
3148 struct ieee80211_twt_setup *twt)
3149 {
3150 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
3151 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
3152 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
3153 struct mt7996_sta_link *msta_link = &msta->deflink;
3154 u16 req_type = le16_to_cpu(twt_agrt->req_type);
3155 enum ieee80211_twt_setup_cmd sta_setup_cmd;
3156 struct mt7996_dev *dev = mt7996_hw_dev(hw);
3157 struct mt7996_twt_flow *flow;
3158 u8 flowid, table_id, exp;
3159
3160 if (mt7996_mac_check_twt_req(twt))
3161 goto out;
3162
3163 mutex_lock(&dev->mt76.mutex);
3164
3165 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
3166 goto unlock;
3167
3168 if (hweight8(msta_link->twt.flowid_mask) ==
3169 ARRAY_SIZE(msta_link->twt.flow))
3170 goto unlock;
3171
3172 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
3173 setup_cmd = TWT_SETUP_CMD_DICTATE;
3174 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
3175 goto unlock;
3176 }
3177
3178 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
3179 goto unlock;
3180
3181 flowid = ffs(~msta_link->twt.flowid_mask) - 1;
3182 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
3183 twt_agrt->req_type |= le16_encode_bits(flowid,
3184 IEEE80211_TWT_REQTYPE_FLOWID);
3185
3186 table_id = ffs(~dev->twt.table_mask) - 1;
3187 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
3188 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
3189
3190 flow = &msta_link->twt.flow[flowid];
3191 memset(flow, 0, sizeof(*flow));
3192 INIT_LIST_HEAD(&flow->list);
3193 flow->wcid = msta_link->wcid.idx;
3194 flow->table_id = table_id;
3195 flow->id = flowid;
3196 flow->duration = twt_agrt->min_twt_dur;
3197 flow->mantissa = twt_agrt->mantissa;
3198 flow->exp = exp;
3199 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
3200 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
3201 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
3202
3203 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
3204 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
3205 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
3206 u64 flow_tsf, curr_tsf;
3207 u32 rem;
3208
3209 flow->sched = true;
3210 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
3211 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
3212 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
3213 flow_tsf = curr_tsf + interval - rem;
3214 twt_agrt->twt = cpu_to_le64(flow_tsf);
3215 } else {
3216 list_add_tail(&flow->list, &dev->twt_list);
3217 }
3218 flow->tsf = le64_to_cpu(twt_agrt->twt);
3219
3220 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
3221 MCU_TWT_AGRT_ADD))
3222 goto unlock;
3223
3224 setup_cmd = TWT_SETUP_CMD_ACCEPT;
3225 dev->twt.table_mask |= BIT(table_id);
3226 msta_link->twt.flowid_mask |= BIT(flowid);
3227 dev->twt.n_agrt++;
3228
3229 unlock:
3230 mutex_unlock(&dev->mt76.mutex);
3231 out:
3232 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
3233 twt_agrt->req_type |=
3234 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
3235 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
3236 }
3237
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_vif_link * link,struct mt7996_sta_link * msta_link,u8 flowid)3238 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
3239 struct mt7996_vif_link *link,
3240 struct mt7996_sta_link *msta_link,
3241 u8 flowid)
3242 {
3243 struct mt7996_twt_flow *flow;
3244
3245 lockdep_assert_held(&dev->mt76.mutex);
3246
3247 if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
3248 return;
3249
3250 if (!(msta_link->twt.flowid_mask & BIT(flowid)))
3251 return;
3252
3253 flow = &msta_link->twt.flow[flowid];
3254 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
3255 return;
3256
3257 list_del_init(&flow->list);
3258 msta_link->twt.flowid_mask &= ~BIT(flowid);
3259 dev->twt.table_mask &= ~BIT(flow->table_id);
3260 dev->twt.n_agrt--;
3261 }
3262