1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
15
16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28 };
29
30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39 };
40
41 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53 };
54
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,bool unicast)55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
56 u16 idx, bool unicast)
57 {
58 struct mt7996_sta *sta;
59 struct mt76_wcid *wcid;
60
61 if (idx >= ARRAY_SIZE(dev->mt76.wcid))
62 return NULL;
63
64 wcid = rcu_dereference(dev->mt76.wcid[idx]);
65 if (unicast || !wcid)
66 return wcid;
67
68 if (!wcid->sta)
69 return NULL;
70
71 sta = container_of(wcid, struct mt7996_sta, wcid);
72 if (!sta->vif)
73 return NULL;
74
75 return &sta->vif->sta.wcid;
76 }
77
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)78 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
79 {
80 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
81 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
82
83 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
84 0, 5000);
85 }
86
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)87 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
88 {
89 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
90 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
91
92 return MT_WTBL_LMAC_OFFS(wcid, dw);
93 }
94
mt7996_mac_sta_poll(struct mt7996_dev * dev)95 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
96 {
97 static const u8 ac_to_tid[] = {
98 [IEEE80211_AC_BE] = 0,
99 [IEEE80211_AC_BK] = 1,
100 [IEEE80211_AC_VI] = 4,
101 [IEEE80211_AC_VO] = 6
102 };
103 struct ieee80211_sta *sta;
104 struct mt7996_sta *msta;
105 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
106 LIST_HEAD(sta_poll_list);
107 int i;
108
109 spin_lock_bh(&dev->mt76.sta_poll_lock);
110 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
111 spin_unlock_bh(&dev->mt76.sta_poll_lock);
112
113 rcu_read_lock();
114
115 while (true) {
116 bool clear = false;
117 u32 addr, val;
118 u16 idx;
119 s8 rssi[4];
120
121 spin_lock_bh(&dev->mt76.sta_poll_lock);
122 if (list_empty(&sta_poll_list)) {
123 spin_unlock_bh(&dev->mt76.sta_poll_lock);
124 break;
125 }
126 msta = list_first_entry(&sta_poll_list,
127 struct mt7996_sta, wcid.poll_list);
128 list_del_init(&msta->wcid.poll_list);
129 spin_unlock_bh(&dev->mt76.sta_poll_lock);
130
131 idx = msta->wcid.idx;
132
133 /* refresh peer's airtime reporting */
134 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
135
136 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
137 u32 tx_last = msta->airtime_ac[i];
138 u32 rx_last = msta->airtime_ac[i + 4];
139
140 msta->airtime_ac[i] = mt76_rr(dev, addr);
141 msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
142
143 tx_time[i] = msta->airtime_ac[i] - tx_last;
144 rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
145
146 if ((tx_last | rx_last) & BIT(30))
147 clear = true;
148
149 addr += 8;
150 }
151
152 if (clear) {
153 mt7996_mac_wtbl_update(dev, idx,
154 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
155 memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
156 }
157
158 if (!msta->wcid.sta)
159 continue;
160
161 sta = container_of((void *)msta, struct ieee80211_sta,
162 drv_priv);
163 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
164 u8 q = mt76_connac_lmac_mapping(i);
165 u32 tx_cur = tx_time[q];
166 u32 rx_cur = rx_time[q];
167 u8 tid = ac_to_tid[i];
168
169 if (!tx_cur && !rx_cur)
170 continue;
171
172 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
173 }
174
175 /* get signal strength of resp frames (CTS/BA/ACK) */
176 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
177 val = mt76_rr(dev, addr);
178
179 rssi[0] = to_rssi(GENMASK(7, 0), val);
180 rssi[1] = to_rssi(GENMASK(15, 8), val);
181 rssi[2] = to_rssi(GENMASK(23, 16), val);
182 rssi[3] = to_rssi(GENMASK(31, 14), val);
183
184 msta->ack_signal =
185 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
186
187 ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
188 }
189
190 rcu_read_unlock();
191 }
192
mt7996_mac_enable_rtscts(struct mt7996_dev * dev,struct ieee80211_vif * vif,bool enable)193 void mt7996_mac_enable_rtscts(struct mt7996_dev *dev,
194 struct ieee80211_vif *vif, bool enable)
195 {
196 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
197 u32 addr;
198
199 addr = mt7996_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
200 if (enable)
201 mt76_set(dev, addr, BIT(5));
202 else
203 mt76_clear(dev, addr, BIT(5));
204 }
205
206 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)207 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
208 {
209 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
210 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
211 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
212 __le32 *rxd = (__le32 *)skb->data;
213 struct ieee80211_sta *sta;
214 struct ieee80211_vif *vif;
215 struct ieee80211_hdr hdr;
216 u16 frame_control;
217
218 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
219 MT_RXD3_NORMAL_U2M)
220 return -EINVAL;
221
222 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
223 return -EINVAL;
224
225 if (!msta || !msta->vif)
226 return -EINVAL;
227
228 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
229 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
230
231 /* store the info from RXD and ethhdr to avoid being overridden */
232 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
233 hdr.frame_control = cpu_to_le16(frame_control);
234 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
235 hdr.duration_id = 0;
236
237 ether_addr_copy(hdr.addr1, vif->addr);
238 ether_addr_copy(hdr.addr2, sta->addr);
239 switch (frame_control & (IEEE80211_FCTL_TODS |
240 IEEE80211_FCTL_FROMDS)) {
241 case 0:
242 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
243 break;
244 case IEEE80211_FCTL_FROMDS:
245 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
246 break;
247 case IEEE80211_FCTL_TODS:
248 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
249 break;
250 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
251 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
252 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
253 break;
254 default:
255 return -EINVAL;
256 }
257
258 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
259 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
260 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
261 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
262 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
263 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
264 else
265 skb_pull(skb, 2);
266
267 if (ieee80211_has_order(hdr.frame_control))
268 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
269 IEEE80211_HT_CTL_LEN);
270 if (ieee80211_is_data_qos(hdr.frame_control)) {
271 __le16 qos_ctrl;
272
273 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
274 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
275 IEEE80211_QOS_CTL_LEN);
276 }
277
278 if (ieee80211_has_a4(hdr.frame_control))
279 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
280 else
281 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
282
283 return 0;
284 }
285
286 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)287 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
288 struct mt76_rx_status *status,
289 struct ieee80211_supported_band *sband,
290 __le32 *rxv, u8 *mode)
291 {
292 u32 v0, v2;
293 u8 stbc, gi, bw, dcm, nss;
294 int i, idx;
295 bool cck = false;
296
297 v0 = le32_to_cpu(rxv[0]);
298 v2 = le32_to_cpu(rxv[2]);
299
300 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
301 i = idx;
302 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
303
304 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
305 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
306 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
307 dcm = FIELD_GET(MT_PRXV_DCM, v2);
308 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
309
310 switch (*mode) {
311 case MT_PHY_TYPE_CCK:
312 cck = true;
313 fallthrough;
314 case MT_PHY_TYPE_OFDM:
315 i = mt76_get_rate(&dev->mt76, sband, i, cck);
316 break;
317 case MT_PHY_TYPE_HT_GF:
318 case MT_PHY_TYPE_HT:
319 status->encoding = RX_ENC_HT;
320 if (gi)
321 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
322 if (i > 31)
323 return -EINVAL;
324 break;
325 case MT_PHY_TYPE_VHT:
326 status->nss = nss;
327 status->encoding = RX_ENC_VHT;
328 if (gi)
329 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
330 if (i > 11)
331 return -EINVAL;
332 break;
333 case MT_PHY_TYPE_HE_MU:
334 case MT_PHY_TYPE_HE_SU:
335 case MT_PHY_TYPE_HE_EXT_SU:
336 case MT_PHY_TYPE_HE_TB:
337 status->nss = nss;
338 status->encoding = RX_ENC_HE;
339 i &= GENMASK(3, 0);
340
341 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
342 status->he_gi = gi;
343
344 status->he_dcm = dcm;
345 break;
346 case MT_PHY_TYPE_EHT_SU:
347 case MT_PHY_TYPE_EHT_TRIG:
348 case MT_PHY_TYPE_EHT_MU:
349 status->nss = nss;
350 status->encoding = RX_ENC_EHT;
351 i &= GENMASK(3, 0);
352
353 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
354 status->eht.gi = gi;
355 break;
356 default:
357 return -EINVAL;
358 }
359 status->rate_idx = i;
360
361 switch (bw) {
362 case IEEE80211_STA_RX_BW_20:
363 break;
364 case IEEE80211_STA_RX_BW_40:
365 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
366 (idx & MT_PRXV_TX_ER_SU_106T)) {
367 status->bw = RATE_INFO_BW_HE_RU;
368 status->he_ru =
369 NL80211_RATE_INFO_HE_RU_ALLOC_106;
370 } else {
371 status->bw = RATE_INFO_BW_40;
372 }
373 break;
374 case IEEE80211_STA_RX_BW_80:
375 status->bw = RATE_INFO_BW_80;
376 break;
377 case IEEE80211_STA_RX_BW_160:
378 status->bw = RATE_INFO_BW_160;
379 break;
380 /* rxv reports bw 320-1 and 320-2 separately */
381 case IEEE80211_STA_RX_BW_320:
382 case IEEE80211_STA_RX_BW_320 + 1:
383 status->bw = RATE_INFO_BW_320;
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
390 if (*mode < MT_PHY_TYPE_HE_SU && gi)
391 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
392
393 return 0;
394 }
395
396 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)397 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
398 struct mt7996_sta *msta, struct sk_buff *skb,
399 u32 info)
400 {
401 struct ieee80211_vif *vif;
402 struct wireless_dev *wdev;
403
404 if (!msta || !msta->vif)
405 return;
406
407 if (!mt76_queue_is_wed_rx(q))
408 return;
409
410 if (!(info & MT_DMA_INFO_PPE_VLD))
411 return;
412
413 vif = container_of((void *)msta->vif, struct ieee80211_vif,
414 drv_priv);
415 wdev = ieee80211_vif_to_wdev(vif);
416 skb->dev = wdev->netdev;
417
418 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
419 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
420 FIELD_GET(MT_DMA_PPE_ENTRY, info));
421 }
422
423 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)424 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
425 struct sk_buff *skb, u32 *info)
426 {
427 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
428 struct mt76_phy *mphy = &dev->mt76.phy;
429 struct mt7996_phy *phy = &dev->phy;
430 struct ieee80211_supported_band *sband;
431 __le32 *rxd = (__le32 *)skb->data;
432 __le32 *rxv = NULL;
433 u32 rxd0 = le32_to_cpu(rxd[0]);
434 u32 rxd1 = le32_to_cpu(rxd[1]);
435 u32 rxd2 = le32_to_cpu(rxd[2]);
436 u32 rxd3 = le32_to_cpu(rxd[3]);
437 u32 rxd4 = le32_to_cpu(rxd[4]);
438 u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
439 u32 csum_status = *(u32 *)skb->cb;
440 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
441 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
442 bool unicast, insert_ccmp_hdr = false;
443 u8 remove_pad, amsdu_info, band_idx;
444 u8 mode = 0, qos_ctl = 0;
445 bool hdr_trans;
446 u16 hdr_gap;
447 u16 seq_ctrl = 0;
448 __le16 fc = 0;
449 int idx;
450 u8 hw_aggr = false;
451 struct mt7996_sta *msta = NULL;
452
453 hw_aggr = status->aggr;
454 memset(status, 0, sizeof(*status));
455
456 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
457 mphy = dev->mt76.phys[band_idx];
458 phy = mphy->priv;
459 status->phy_idx = mphy->band_idx;
460
461 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
462 return -EINVAL;
463
464 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
465 return -EINVAL;
466
467 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
468 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
469 return -EINVAL;
470
471 /* ICV error or CCMP/BIP/WPI MIC error */
472 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
473 status->flag |= RX_FLAG_ONLY_MONITOR;
474
475 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
476 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
477 status->wcid = mt7996_rx_get_wcid(dev, idx, unicast);
478
479 if (status->wcid) {
480 msta = container_of(status->wcid, struct mt7996_sta, wcid);
481 spin_lock_bh(&dev->mt76.sta_poll_lock);
482 if (list_empty(&msta->wcid.poll_list))
483 list_add_tail(&msta->wcid.poll_list,
484 &dev->mt76.sta_poll_list);
485 spin_unlock_bh(&dev->mt76.sta_poll_lock);
486 }
487
488 status->freq = mphy->chandef.chan->center_freq;
489 status->band = mphy->chandef.chan->band;
490 if (status->band == NL80211_BAND_5GHZ)
491 sband = &mphy->sband_5g.sband;
492 else if (status->band == NL80211_BAND_6GHZ)
493 sband = &mphy->sband_6g.sband;
494 else
495 sband = &mphy->sband_2g.sband;
496
497 if (!sband->channels)
498 return -EINVAL;
499
500 if ((rxd0 & csum_mask) == csum_mask &&
501 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
502 skb->ip_summed = CHECKSUM_UNNECESSARY;
503
504 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
505 status->flag |= RX_FLAG_FAILED_FCS_CRC;
506
507 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
508 status->flag |= RX_FLAG_MMIC_ERROR;
509
510 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
511 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
512 status->flag |= RX_FLAG_DECRYPTED;
513 status->flag |= RX_FLAG_IV_STRIPPED;
514 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
515 }
516
517 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
518
519 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
520 return -EINVAL;
521
522 rxd += 8;
523 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
524 u32 v0 = le32_to_cpu(rxd[0]);
525 u32 v2 = le32_to_cpu(rxd[2]);
526
527 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
528 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
529 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
530
531 rxd += 4;
532 if ((u8 *)rxd - skb->data >= skb->len)
533 return -EINVAL;
534 }
535
536 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
537 u8 *data = (u8 *)rxd;
538
539 if (status->flag & RX_FLAG_DECRYPTED) {
540 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
541 case MT_CIPHER_AES_CCMP:
542 case MT_CIPHER_CCMP_CCX:
543 case MT_CIPHER_CCMP_256:
544 insert_ccmp_hdr =
545 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
546 fallthrough;
547 case MT_CIPHER_TKIP:
548 case MT_CIPHER_TKIP_NO_MIC:
549 case MT_CIPHER_GCMP:
550 case MT_CIPHER_GCMP_256:
551 status->iv[0] = data[5];
552 status->iv[1] = data[4];
553 status->iv[2] = data[3];
554 status->iv[3] = data[2];
555 status->iv[4] = data[1];
556 status->iv[5] = data[0];
557 break;
558 default:
559 break;
560 }
561 }
562 rxd += 4;
563 if ((u8 *)rxd - skb->data >= skb->len)
564 return -EINVAL;
565 }
566
567 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
568 status->timestamp = le32_to_cpu(rxd[0]);
569 status->flag |= RX_FLAG_MACTIME_START;
570
571 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
572 status->flag |= RX_FLAG_AMPDU_DETAILS;
573
574 /* all subframes of an A-MPDU have the same timestamp */
575 if (phy->rx_ampdu_ts != status->timestamp) {
576 if (!++phy->ampdu_ref)
577 phy->ampdu_ref++;
578 }
579 phy->rx_ampdu_ts = status->timestamp;
580
581 status->ampdu_ref = phy->ampdu_ref;
582 }
583
584 rxd += 4;
585 if ((u8 *)rxd - skb->data >= skb->len)
586 return -EINVAL;
587 }
588
589 /* RXD Group 3 - P-RXV */
590 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
591 u32 v3;
592 int ret;
593
594 rxv = rxd;
595 rxd += 4;
596 if ((u8 *)rxd - skb->data >= skb->len)
597 return -EINVAL;
598
599 v3 = le32_to_cpu(rxv[3]);
600
601 status->chains = mphy->antenna_mask;
602 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
603 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
604 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
605 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
606
607 /* RXD Group 5 - C-RXV */
608 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
609 rxd += 24;
610 if ((u8 *)rxd - skb->data >= skb->len)
611 return -EINVAL;
612 }
613
614 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
615 if (ret < 0)
616 return ret;
617 }
618
619 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
620 status->amsdu = !!amsdu_info;
621 if (status->amsdu) {
622 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
623 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
624 }
625
626 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
627 if (hdr_trans && ieee80211_has_morefrags(fc)) {
628 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
629 return -EINVAL;
630 hdr_trans = false;
631 } else {
632 int pad_start = 0;
633
634 skb_pull(skb, hdr_gap);
635 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
636 pad_start = ieee80211_get_hdrlen_from_skb(skb);
637 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
638 /* When header translation failure is indicated,
639 * the hardware will insert an extra 2-byte field
640 * containing the data length after the protocol
641 * type field. This happens either when the LLC-SNAP
642 * pattern did not match, or if a VLAN header was
643 * detected.
644 */
645 pad_start = 12;
646 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
647 pad_start += 4;
648 else
649 pad_start = 0;
650 }
651
652 if (pad_start) {
653 memmove(skb->data + 2, skb->data, pad_start);
654 skb_pull(skb, 2);
655 }
656 }
657
658 if (!hdr_trans) {
659 struct ieee80211_hdr *hdr;
660
661 if (insert_ccmp_hdr) {
662 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
663
664 mt76_insert_ccmp_hdr(skb, key_id);
665 }
666
667 hdr = mt76_skb_get_hdr(skb);
668 fc = hdr->frame_control;
669 if (ieee80211_is_data_qos(fc)) {
670 u8 *qos = ieee80211_get_qos_ctl(hdr);
671
672 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
673 qos_ctl = *qos;
674
675 /* Mesh DA/SA/Length will be stripped after hardware
676 * de-amsdu, so here needs to clear amsdu present bit
677 * to mark it as a normal mesh frame.
678 */
679 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
680 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
681 }
682 } else {
683 status->flag |= RX_FLAG_8023;
684 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
685 *info);
686 }
687
688 if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
689 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
690
691 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
692 return 0;
693
694 status->aggr = unicast &&
695 !ieee80211_is_qos_nullfunc(fc);
696 status->qos_ctl = qos_ctl;
697 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
698
699 return 0;
700 }
701
702 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)703 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
704 struct sk_buff *skb, struct mt76_wcid *wcid)
705 {
706 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
707 u8 fc_type, fc_stype;
708 u16 ethertype;
709 bool wmm = false;
710 u32 val;
711
712 if (wcid->sta) {
713 struct ieee80211_sta *sta;
714
715 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
716 wmm = sta->wme;
717 }
718
719 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
720 FIELD_PREP(MT_TXD1_TID, tid);
721
722 ethertype = get_unaligned_be16(&skb->data[12]);
723 if (ethertype >= ETH_P_802_3_MIN)
724 val |= MT_TXD1_ETH_802_3;
725
726 txwi[1] |= cpu_to_le32(val);
727
728 fc_type = IEEE80211_FTYPE_DATA >> 2;
729 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
730
731 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
732 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
733
734 txwi[2] |= cpu_to_le32(val);
735 }
736
737 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key)738 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
739 struct sk_buff *skb, struct ieee80211_key_conf *key)
740 {
741 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
742 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
743 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
744 bool multicast = is_multicast_ether_addr(hdr->addr1);
745 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
746 __le16 fc = hdr->frame_control;
747 u8 fc_type, fc_stype;
748 u32 val;
749
750 if (ieee80211_is_action(fc) &&
751 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
752 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
753 tid = MT_TX_ADDBA;
754 else if (ieee80211_is_mgmt(hdr->frame_control))
755 tid = MT_TX_NORMAL;
756
757 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
758 FIELD_PREP(MT_TXD1_HDR_INFO,
759 ieee80211_get_hdrlen_from_skb(skb) / 2) |
760 FIELD_PREP(MT_TXD1_TID, tid);
761
762 if (!ieee80211_is_data(fc) || multicast ||
763 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
764 val |= MT_TXD1_FIXED_RATE;
765
766 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
767 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
768 val |= MT_TXD1_BIP;
769 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
770 }
771
772 txwi[1] |= cpu_to_le32(val);
773
774 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
775 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
776
777 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
778 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
779
780 txwi[2] |= cpu_to_le32(val);
781
782 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
783 if (ieee80211_is_beacon(fc)) {
784 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
785 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
786 }
787
788 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
789 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
790
791 if (ieee80211_is_back_req(hdr->frame_control)) {
792 struct ieee80211_bar *bar;
793
794 bar = (struct ieee80211_bar *)skb->data;
795 seqno = le16_to_cpu(bar->start_seq_num);
796 }
797
798 val = MT_TXD3_SN_VALID |
799 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
800 txwi[3] |= cpu_to_le32(val);
801 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
802 }
803 }
804
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)805 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
806 struct sk_buff *skb, struct mt76_wcid *wcid,
807 struct ieee80211_key_conf *key, int pid,
808 enum mt76_txq_id qid, u32 changed)
809 {
810 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
811 struct ieee80211_vif *vif = info->control.vif;
812 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
813 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
814 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
815 struct mt76_vif *mvif;
816 u16 tx_count = 15;
817 u32 val;
818 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
819 BSS_CHANGED_FILS_DISCOVERY));
820 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
821 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
822
823 mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
824 if (mvif) {
825 omac_idx = mvif->omac_idx;
826 wmm_idx = mvif->wmm_idx;
827 band_idx = mvif->band_idx;
828 }
829
830 if (inband_disc) {
831 p_fmt = MT_TX_TYPE_FW;
832 q_idx = MT_LMAC_ALTX0;
833 } else if (beacon) {
834 p_fmt = MT_TX_TYPE_FW;
835 q_idx = MT_LMAC_BCN0;
836 } else if (qid >= MT_TXQ_PSD) {
837 p_fmt = MT_TX_TYPE_CT;
838 q_idx = MT_LMAC_ALTX0;
839 } else {
840 p_fmt = MT_TX_TYPE_CT;
841 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
842 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
843 }
844
845 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
846 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
847 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
848 txwi[0] = cpu_to_le32(val);
849
850 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
851 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
852
853 if (band_idx)
854 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
855
856 txwi[1] = cpu_to_le32(val);
857 txwi[2] = 0;
858
859 val = MT_TXD3_SW_POWER_MGMT |
860 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
861 if (key)
862 val |= MT_TXD3_PROTECT_FRAME;
863 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
864 val |= MT_TXD3_NO_ACK;
865 if (wcid->amsdu)
866 val |= MT_TXD3_HW_AMSDU;
867
868 txwi[3] = cpu_to_le32(val);
869 txwi[4] = 0;
870
871 val = FIELD_PREP(MT_TXD5_PID, pid);
872 if (pid >= MT_PACKET_ID_FIRST)
873 val |= MT_TXD5_TX_STATUS_HOST;
874 txwi[5] = cpu_to_le32(val);
875
876 val = MT_TXD6_DIS_MAT | MT_TXD6_DAS;
877 if (is_mt7996(&dev->mt76))
878 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
879 else
880 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
881 txwi[6] = cpu_to_le32(val);
882 txwi[7] = 0;
883
884 if (is_8023)
885 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
886 else
887 mt7996_mac_write_txwi_80211(dev, txwi, skb, key);
888
889 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
891 bool mcast = ieee80211_is_data(hdr->frame_control) &&
892 is_multicast_ether_addr(hdr->addr1);
893 u8 idx = MT7996_BASIC_RATES_TBL;
894
895 if (mvif) {
896 if (mcast && mvif->mcast_rates_idx)
897 idx = mvif->mcast_rates_idx;
898 else if (beacon && mvif->beacon_rates_idx)
899 idx = mvif->beacon_rates_idx;
900 else
901 idx = mvif->basic_rates_idx;
902 }
903
904 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
905 txwi[6] |= cpu_to_le32(val);
906 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
907 }
908 }
909
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)910 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
911 enum mt76_txq_id qid, struct mt76_wcid *wcid,
912 struct ieee80211_sta *sta,
913 struct mt76_tx_info *tx_info)
914 {
915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
916 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
917 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
918 struct ieee80211_key_conf *key = info->control.hw_key;
919 struct ieee80211_vif *vif = info->control.vif;
920 struct mt76_connac_txp_common *txp;
921 struct mt76_txwi_cache *t;
922 int id, i, pid, nbuf = tx_info->nbuf - 1;
923 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
924 u8 *txwi = (u8 *)txwi_ptr;
925
926 if (unlikely(tx_info->skb->len <= ETH_HLEN))
927 return -EINVAL;
928
929 if (!wcid)
930 wcid = &dev->mt76.global_wcid;
931
932 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
933 t->skb = tx_info->skb;
934
935 id = mt76_token_consume(mdev, &t);
936 if (id < 0)
937 return id;
938
939 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
940 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
941 pid, qid, 0);
942
943 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
944 for (i = 0; i < nbuf; i++) {
945 u16 len;
946
947 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
948 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
949 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
950 tx_info->buf[i + 1].addr >> 32);
951 #endif
952
953 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
954 txp->fw.len[i] = cpu_to_le16(len);
955 }
956 txp->fw.nbuf = nbuf;
957
958 txp->fw.flags =
959 cpu_to_le16(MT_CT_INFO_FROM_HOST | MT_CT_INFO_APPLY_TXD);
960
961 if (!key)
962 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
963
964 if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
965 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
966
967 if (vif) {
968 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
969
970 txp->fw.bss_idx = mvif->mt76.idx;
971 }
972
973 txp->fw.token = cpu_to_le16(id);
974 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
975
976 tx_info->skb = NULL;
977
978 /* pass partial skb header to fw */
979 tx_info->buf[1].len = MT_CT_PARSE_LEN;
980 tx_info->buf[1].skip_unmap = true;
981 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
982
983 return 0;
984 }
985
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)986 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
987 {
988 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
989 __le32 *txwi = ptr;
990 u32 val;
991
992 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
993
994 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
995 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
996 txwi[0] = cpu_to_le32(val);
997
998 val = BIT(31) |
999 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1000 txwi[1] = cpu_to_le32(val);
1001
1002 txp->token = cpu_to_le16(token_id);
1003 txp->nbuf = 1;
1004 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1005
1006 return MT_TXD_SIZE + sizeof(*txp);
1007 }
1008
1009 static void
mt7996_tx_check_aggr(struct ieee80211_sta * sta,struct sk_buff * skb)1010 mt7996_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
1011 {
1012 struct mt7996_sta *msta;
1013 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1014 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1015 u16 fc, tid;
1016
1017 if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
1018 return;
1019
1020 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1021 if (tid >= 6) /* skip VO queue */
1022 return;
1023
1024 if (is_8023) {
1025 fc = IEEE80211_FTYPE_DATA |
1026 (sta->wme ? IEEE80211_STYPE_QOS_DATA : IEEE80211_STYPE_DATA);
1027 } else {
1028 /* No need to get precise TID for Action/Management Frame,
1029 * since it will not meet the following Frame Control
1030 * condition anyway.
1031 */
1032
1033 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1034
1035 fc = le16_to_cpu(hdr->frame_control) &
1036 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1037 }
1038
1039 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1040 return;
1041
1042 msta = (struct mt7996_sta *)sta->drv_priv;
1043 if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
1044 ieee80211_start_tx_ba_session(sta, tid, 0);
1045 }
1046
1047 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,struct list_head * free_list)1048 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1049 struct ieee80211_sta *sta, struct list_head *free_list)
1050 {
1051 struct mt76_dev *mdev = &dev->mt76;
1052 struct mt76_wcid *wcid;
1053 __le32 *txwi;
1054 u16 wcid_idx;
1055
1056 mt76_connac_txp_skb_unmap(mdev, t);
1057 if (!t->skb)
1058 goto out;
1059
1060 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1061 if (sta) {
1062 wcid = (struct mt76_wcid *)sta->drv_priv;
1063 wcid_idx = wcid->idx;
1064
1065 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1066 mt7996_tx_check_aggr(sta, t->skb);
1067 } else {
1068 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1069 }
1070
1071 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1072
1073 out:
1074 t->skb = NULL;
1075 mt76_put_txwi(mdev, t);
1076 }
1077
1078 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1079 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1080 {
1081 __le32 *tx_free = (__le32 *)data, *cur_info;
1082 struct mt76_dev *mdev = &dev->mt76;
1083 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1084 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1085 struct mt76_txwi_cache *txwi;
1086 struct ieee80211_sta *sta = NULL;
1087 struct mt76_wcid *wcid = NULL;
1088 LIST_HEAD(free_list);
1089 struct sk_buff *skb, *tmp;
1090 void *end = data + len;
1091 bool wake = false;
1092 u16 total, count = 0;
1093
1094 /* clean DMA queues and unmap buffers first */
1095 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1096 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1097 if (phy2) {
1098 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1099 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1100 }
1101 if (phy3) {
1102 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1103 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1104 }
1105
1106 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 5))
1107 return;
1108
1109 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1110 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1111 u32 msdu, info;
1112 u8 i;
1113
1114 if (WARN_ON_ONCE((void *)cur_info >= end))
1115 return;
1116 /* 1'b1: new wcid pair.
1117 * 1'b0: msdu_id with the same 'wcid pair' as above.
1118 */
1119 info = le32_to_cpu(*cur_info);
1120 if (info & MT_TXFREE_INFO_PAIR) {
1121 struct mt7996_sta *msta;
1122 u16 idx;
1123
1124 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1125 wcid = rcu_dereference(dev->mt76.wcid[idx]);
1126 sta = wcid_to_sta(wcid);
1127 if (!sta)
1128 continue;
1129
1130 msta = container_of(wcid, struct mt7996_sta, wcid);
1131 spin_lock_bh(&mdev->sta_poll_lock);
1132 if (list_empty(&msta->wcid.poll_list))
1133 list_add_tail(&msta->wcid.poll_list,
1134 &mdev->sta_poll_list);
1135 spin_unlock_bh(&mdev->sta_poll_lock);
1136 continue;
1137 } else if (info & MT_TXFREE_INFO_HEADER) {
1138 u32 tx_retries = 0, tx_failed = 0;
1139
1140 if (!wcid)
1141 continue;
1142
1143 tx_retries =
1144 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1145 tx_failed = tx_retries +
1146 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1147
1148 wcid->stats.tx_retries += tx_retries;
1149 wcid->stats.tx_failed += tx_failed;
1150 continue;
1151 }
1152
1153 for (i = 0; i < 2; i++) {
1154 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1155 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1156 continue;
1157
1158 count++;
1159 txwi = mt76_token_release(mdev, msdu, &wake);
1160 if (!txwi)
1161 continue;
1162
1163 mt7996_txwi_free(dev, txwi, sta, &free_list);
1164 }
1165 }
1166
1167 mt7996_mac_sta_poll(dev);
1168
1169 if (wake)
1170 mt76_set_tx_blocked(&dev->mt76, false);
1171
1172 mt76_worker_schedule(&dev->mt76.tx_worker);
1173
1174 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1175 skb_list_del_init(skb);
1176 napi_consume_skb(skb, 1);
1177 }
1178 }
1179
1180 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1181 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1182 int pid, __le32 *txs_data)
1183 {
1184 struct mt76_sta_stats *stats = &wcid->stats;
1185 struct ieee80211_supported_band *sband;
1186 struct mt76_dev *mdev = &dev->mt76;
1187 struct mt76_phy *mphy;
1188 struct ieee80211_tx_info *info;
1189 struct sk_buff_head list;
1190 struct rate_info rate = {};
1191 struct sk_buff *skb;
1192 bool cck = false;
1193 u32 txrate, txs, mode, stbc;
1194
1195 txs = le32_to_cpu(txs_data[0]);
1196
1197 mt76_tx_status_lock(mdev, &list);
1198 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1199
1200 if (skb) {
1201 info = IEEE80211_SKB_CB(skb);
1202 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1203 info->flags |= IEEE80211_TX_STAT_ACK;
1204
1205 info->status.ampdu_len = 1;
1206 info->status.ampdu_ack_len =
1207 !!(info->flags & IEEE80211_TX_STAT_ACK);
1208
1209 info->status.rates[0].idx = -1;
1210 }
1211
1212 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1213 struct ieee80211_sta *sta;
1214 u8 tid;
1215
1216 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
1217 tid = FIELD_GET(MT_TXS0_TID, txs);
1218 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1219 }
1220
1221 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1222
1223 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1224 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1225 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1226
1227 if (stbc && rate.nss > 1)
1228 rate.nss >>= 1;
1229
1230 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1231 stats->tx_nss[rate.nss - 1]++;
1232 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1233 stats->tx_mcs[rate.mcs]++;
1234
1235 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1236 switch (mode) {
1237 case MT_PHY_TYPE_CCK:
1238 cck = true;
1239 fallthrough;
1240 case MT_PHY_TYPE_OFDM:
1241 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1242
1243 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1244 sband = &mphy->sband_5g.sband;
1245 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1246 sband = &mphy->sband_6g.sband;
1247 else
1248 sband = &mphy->sband_2g.sband;
1249
1250 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1251 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1252 break;
1253 case MT_PHY_TYPE_HT:
1254 case MT_PHY_TYPE_HT_GF:
1255 if (rate.mcs > 31)
1256 goto out;
1257
1258 rate.flags = RATE_INFO_FLAGS_MCS;
1259 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1260 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1261 break;
1262 case MT_PHY_TYPE_VHT:
1263 if (rate.mcs > 9)
1264 goto out;
1265
1266 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1267 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1268 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1269 break;
1270 case MT_PHY_TYPE_HE_SU:
1271 case MT_PHY_TYPE_HE_EXT_SU:
1272 case MT_PHY_TYPE_HE_TB:
1273 case MT_PHY_TYPE_HE_MU:
1274 if (rate.mcs > 11)
1275 goto out;
1276
1277 rate.he_gi = wcid->rate.he_gi;
1278 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1279 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1280 break;
1281 case MT_PHY_TYPE_EHT_SU:
1282 case MT_PHY_TYPE_EHT_TRIG:
1283 case MT_PHY_TYPE_EHT_MU:
1284 if (rate.mcs > 13)
1285 goto out;
1286
1287 rate.eht_gi = wcid->rate.eht_gi;
1288 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1289 break;
1290 default:
1291 goto out;
1292 }
1293
1294 stats->tx_mode[mode]++;
1295
1296 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1297 case IEEE80211_STA_RX_BW_320:
1298 rate.bw = RATE_INFO_BW_320;
1299 stats->tx_bw[4]++;
1300 break;
1301 case IEEE80211_STA_RX_BW_160:
1302 rate.bw = RATE_INFO_BW_160;
1303 stats->tx_bw[3]++;
1304 break;
1305 case IEEE80211_STA_RX_BW_80:
1306 rate.bw = RATE_INFO_BW_80;
1307 stats->tx_bw[2]++;
1308 break;
1309 case IEEE80211_STA_RX_BW_40:
1310 rate.bw = RATE_INFO_BW_40;
1311 stats->tx_bw[1]++;
1312 break;
1313 default:
1314 rate.bw = RATE_INFO_BW_20;
1315 stats->tx_bw[0]++;
1316 break;
1317 }
1318 wcid->rate = rate;
1319
1320 out:
1321 if (skb)
1322 mt76_tx_status_skb_done(mdev, skb, &list);
1323 mt76_tx_status_unlock(mdev, &list);
1324
1325 return !!skb;
1326 }
1327
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1328 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1329 {
1330 struct mt7996_sta *msta = NULL;
1331 struct mt76_wcid *wcid;
1332 __le32 *txs_data = data;
1333 u16 wcidx;
1334 u8 pid;
1335
1336 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1337 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1338
1339 if (pid < MT_PACKET_ID_NO_SKB)
1340 return;
1341
1342 if (wcidx >= mt7996_wtbl_size(dev))
1343 return;
1344
1345 rcu_read_lock();
1346
1347 wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
1348 if (!wcid)
1349 goto out;
1350
1351 msta = container_of(wcid, struct mt7996_sta, wcid);
1352
1353 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1354
1355 if (!wcid->sta)
1356 goto out;
1357
1358 spin_lock_bh(&dev->mt76.sta_poll_lock);
1359 if (list_empty(&msta->wcid.poll_list))
1360 list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
1361 spin_unlock_bh(&dev->mt76.sta_poll_lock);
1362
1363 out:
1364 rcu_read_unlock();
1365 }
1366
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1367 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1368 {
1369 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1370 __le32 *rxd = (__le32 *)data;
1371 __le32 *end = (__le32 *)&rxd[len / 4];
1372 enum rx_pkt_type type;
1373
1374 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1375 if (type != PKT_TYPE_NORMAL) {
1376 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1377
1378 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1379 MT_RXD0_SW_PKT_TYPE_FRAME))
1380 return true;
1381 }
1382
1383 switch (type) {
1384 case PKT_TYPE_TXRX_NOTIFY:
1385 mt7996_mac_tx_free(dev, data, len);
1386 return false;
1387 case PKT_TYPE_TXS:
1388 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1389 mt7996_mac_add_txs(dev, rxd);
1390 return false;
1391 case PKT_TYPE_RX_FW_MONITOR:
1392 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1393 return false;
1394 default:
1395 return true;
1396 }
1397 }
1398
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1399 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1400 struct sk_buff *skb, u32 *info)
1401 {
1402 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1403 __le32 *rxd = (__le32 *)skb->data;
1404 __le32 *end = (__le32 *)&skb->data[skb->len];
1405 enum rx_pkt_type type;
1406
1407 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1408 if (type != PKT_TYPE_NORMAL) {
1409 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1410
1411 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1412 MT_RXD0_SW_PKT_TYPE_FRAME))
1413 type = PKT_TYPE_NORMAL;
1414 }
1415
1416 switch (type) {
1417 case PKT_TYPE_TXRX_NOTIFY:
1418 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1419 q == MT_RXQ_TXFREE_BAND2) {
1420 dev_kfree_skb(skb);
1421 break;
1422 }
1423
1424 mt7996_mac_tx_free(dev, skb->data, skb->len);
1425 napi_consume_skb(skb, 1);
1426 break;
1427 case PKT_TYPE_RX_EVENT:
1428 mt7996_mcu_rx_event(dev, skb);
1429 break;
1430 case PKT_TYPE_TXS:
1431 for (rxd += 4; rxd + 8 <= end; rxd += 8)
1432 mt7996_mac_add_txs(dev, rxd);
1433 dev_kfree_skb(skb);
1434 break;
1435 case PKT_TYPE_RX_FW_MONITOR:
1436 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1437 dev_kfree_skb(skb);
1438 break;
1439 case PKT_TYPE_NORMAL:
1440 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1441 mt76_rx(&dev->mt76, q, skb);
1442 return;
1443 }
1444 fallthrough;
1445 default:
1446 dev_kfree_skb(skb);
1447 break;
1448 }
1449 }
1450
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)1451 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1452 {
1453 struct mt7996_dev *dev = phy->dev;
1454 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1455
1456 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1457 mt76_set(dev, reg, BIT(11) | BIT(9));
1458 }
1459
mt7996_mac_reset_counters(struct mt7996_phy * phy)1460 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1461 {
1462 struct mt7996_dev *dev = phy->dev;
1463 u8 band_idx = phy->mt76->band_idx;
1464 int i;
1465
1466 for (i = 0; i < 16; i++)
1467 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1468
1469 phy->mt76->survey_time = ktime_get_boottime();
1470
1471 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1472
1473 /* reset airtime counters */
1474 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1475 MT_WF_RMAC_MIB_RXTIME_CLR);
1476
1477 mt7996_mcu_get_chan_mib_info(phy, true);
1478 }
1479
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)1480 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1481 {
1482 s16 coverage_class = phy->coverage_class;
1483 struct mt7996_dev *dev = phy->dev;
1484 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1485 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1486 u32 reg_offset;
1487 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1488 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1489 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1490 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1491 u8 band_idx = phy->mt76->band_idx;
1492 int offset;
1493
1494 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1495 return;
1496
1497 if (phy2)
1498 coverage_class = max_t(s16, dev->phy.coverage_class,
1499 phy2->coverage_class);
1500
1501 if (phy3)
1502 coverage_class = max_t(s16, coverage_class,
1503 phy3->coverage_class);
1504
1505 offset = 3 * coverage_class;
1506 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1507 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1508
1509 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1510 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1511 }
1512
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)1513 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1514 {
1515 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1516 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1517 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1518
1519 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1520 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1521 }
1522
1523 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)1524 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1525 {
1526 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1527 struct mt7996_dev *dev = phy->dev;
1528 u32 val, sum = 0, n = 0;
1529 int ant, i;
1530
1531 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1532 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1533
1534 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1535 val = mt76_rr(dev, reg);
1536 sum += val * nf_power[i];
1537 n += val;
1538 }
1539 }
1540
1541 return n ? sum / n : 0;
1542 }
1543
mt7996_update_channel(struct mt76_phy * mphy)1544 void mt7996_update_channel(struct mt76_phy *mphy)
1545 {
1546 struct mt7996_phy *phy = mphy->priv;
1547 struct mt76_channel_state *state = mphy->chan_state;
1548 int nf;
1549
1550 mt7996_mcu_get_chan_mib_info(phy, false);
1551
1552 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1553 if (!phy->noise)
1554 phy->noise = nf << 4;
1555 else if (nf)
1556 phy->noise += nf - (phy->noise >> 4);
1557
1558 state->noise = -(phy->noise >> 4);
1559 }
1560
1561 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)1562 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1563 {
1564 bool ret;
1565
1566 ret = wait_event_timeout(dev->reset_wait,
1567 (READ_ONCE(dev->recovery.state) & state),
1568 MT7996_RESET_TIMEOUT);
1569
1570 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1571 return ret;
1572 }
1573
1574 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1575 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1576 {
1577 struct ieee80211_hw *hw = priv;
1578
1579 switch (vif->type) {
1580 case NL80211_IFTYPE_MESH_POINT:
1581 case NL80211_IFTYPE_ADHOC:
1582 case NL80211_IFTYPE_AP:
1583 mt7996_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon);
1584 break;
1585 default:
1586 break;
1587 }
1588 }
1589
1590 static void
mt7996_update_beacons(struct mt7996_dev * dev)1591 mt7996_update_beacons(struct mt7996_dev *dev)
1592 {
1593 struct mt76_phy *phy2, *phy3;
1594
1595 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1596 IEEE80211_IFACE_ITER_RESUME_ALL,
1597 mt7996_update_vif_beacon, dev->mt76.hw);
1598
1599 phy2 = dev->mt76.phys[MT_BAND1];
1600 if (!phy2)
1601 return;
1602
1603 ieee80211_iterate_active_interfaces(phy2->hw,
1604 IEEE80211_IFACE_ITER_RESUME_ALL,
1605 mt7996_update_vif_beacon, phy2->hw);
1606
1607 phy3 = dev->mt76.phys[MT_BAND2];
1608 if (!phy3)
1609 return;
1610
1611 ieee80211_iterate_active_interfaces(phy3->hw,
1612 IEEE80211_IFACE_ITER_RESUME_ALL,
1613 mt7996_update_vif_beacon, phy3->hw);
1614 }
1615
mt7996_tx_token_put(struct mt7996_dev * dev)1616 void mt7996_tx_token_put(struct mt7996_dev *dev)
1617 {
1618 struct mt76_txwi_cache *txwi;
1619 int id;
1620
1621 spin_lock_bh(&dev->mt76.token_lock);
1622 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1623 mt7996_txwi_free(dev, txwi, NULL, NULL);
1624 dev->mt76.token_count--;
1625 }
1626 spin_unlock_bh(&dev->mt76.token_lock);
1627 idr_destroy(&dev->mt76.token);
1628 }
1629
1630 static int
mt7996_mac_restart(struct mt7996_dev * dev)1631 mt7996_mac_restart(struct mt7996_dev *dev)
1632 {
1633 struct mt7996_phy *phy2, *phy3;
1634 struct mt76_dev *mdev = &dev->mt76;
1635 int i, ret;
1636
1637 phy2 = mt7996_phy2(dev);
1638 phy3 = mt7996_phy3(dev);
1639
1640 if (dev->hif2) {
1641 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1642 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1643 }
1644
1645 if (dev_is_pci(mdev->dev)) {
1646 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1647 if (dev->hif2)
1648 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1649 }
1650
1651 set_bit(MT76_RESET, &dev->mphy.state);
1652 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1653 wake_up(&dev->mt76.mcu.wait);
1654 if (phy2) {
1655 set_bit(MT76_RESET, &phy2->mt76->state);
1656 set_bit(MT76_MCU_RESET, &phy2->mt76->state);
1657 }
1658 if (phy3) {
1659 set_bit(MT76_RESET, &phy3->mt76->state);
1660 set_bit(MT76_MCU_RESET, &phy3->mt76->state);
1661 }
1662
1663 /* lock/unlock all queues to ensure that no tx is pending */
1664 mt76_txq_schedule_all(&dev->mphy);
1665 if (phy2)
1666 mt76_txq_schedule_all(phy2->mt76);
1667 if (phy3)
1668 mt76_txq_schedule_all(phy3->mt76);
1669
1670 /* disable all tx/rx napi */
1671 mt76_worker_disable(&dev->mt76.tx_worker);
1672 mt76_for_each_q_rx(mdev, i) {
1673 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1674 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1675 continue;
1676
1677 if (mdev->q_rx[i].ndesc)
1678 napi_disable(&dev->mt76.napi[i]);
1679 }
1680 napi_disable(&dev->mt76.tx_napi);
1681
1682 /* token reinit */
1683 mt7996_tx_token_put(dev);
1684 idr_init(&dev->mt76.token);
1685
1686 mt7996_dma_reset(dev, true);
1687
1688 local_bh_disable();
1689 mt76_for_each_q_rx(mdev, i) {
1690 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1691 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1692 continue;
1693
1694 if (mdev->q_rx[i].ndesc) {
1695 napi_enable(&dev->mt76.napi[i]);
1696 napi_schedule(&dev->mt76.napi[i]);
1697 }
1698 }
1699 local_bh_enable();
1700 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1701 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1702
1703 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1704 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1705 if (dev->hif2) {
1706 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1707 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1708 }
1709 if (dev_is_pci(mdev->dev)) {
1710 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1711 if (dev->hif2)
1712 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1713 }
1714
1715 /* load firmware */
1716 ret = mt7996_mcu_init_firmware(dev);
1717 if (ret)
1718 goto out;
1719
1720 /* set the necessary init items */
1721 ret = mt7996_mcu_set_eeprom(dev);
1722 if (ret)
1723 goto out;
1724
1725 mt7996_mac_init(dev);
1726 mt7996_init_txpower(&dev->phy);
1727 mt7996_init_txpower(phy2);
1728 mt7996_init_txpower(phy3);
1729 ret = mt7996_txbf_init(dev);
1730
1731 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1732 ret = mt7996_run(dev->mphy.hw);
1733 if (ret)
1734 goto out;
1735 }
1736
1737 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1738 ret = mt7996_run(phy2->mt76->hw);
1739 if (ret)
1740 goto out;
1741 }
1742
1743 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1744 ret = mt7996_run(phy3->mt76->hw);
1745 if (ret)
1746 goto out;
1747 }
1748
1749 out:
1750 /* reset done */
1751 clear_bit(MT76_RESET, &dev->mphy.state);
1752 if (phy2)
1753 clear_bit(MT76_RESET, &phy2->mt76->state);
1754 if (phy3)
1755 clear_bit(MT76_RESET, &phy3->mt76->state);
1756
1757 local_bh_disable();
1758 napi_enable(&dev->mt76.tx_napi);
1759 napi_schedule(&dev->mt76.tx_napi);
1760 local_bh_enable();
1761
1762 mt76_worker_enable(&dev->mt76.tx_worker);
1763 return ret;
1764 }
1765
1766 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)1767 mt7996_mac_full_reset(struct mt7996_dev *dev)
1768 {
1769 struct mt7996_phy *phy2, *phy3;
1770 int i;
1771
1772 phy2 = mt7996_phy2(dev);
1773 phy3 = mt7996_phy3(dev);
1774 dev->recovery.hw_full_reset = true;
1775
1776 wake_up(&dev->mt76.mcu.wait);
1777 ieee80211_stop_queues(mt76_hw(dev));
1778 if (phy2)
1779 ieee80211_stop_queues(phy2->mt76->hw);
1780 if (phy3)
1781 ieee80211_stop_queues(phy3->mt76->hw);
1782
1783 cancel_work_sync(&dev->wed_rro.work);
1784 cancel_delayed_work_sync(&dev->mphy.mac_work);
1785 if (phy2)
1786 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1787 if (phy3)
1788 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1789
1790 mutex_lock(&dev->mt76.mutex);
1791 for (i = 0; i < 10; i++) {
1792 if (!mt7996_mac_restart(dev))
1793 break;
1794 }
1795 mutex_unlock(&dev->mt76.mutex);
1796
1797 if (i == 10)
1798 dev_err(dev->mt76.dev, "chip full reset failed\n");
1799
1800 ieee80211_restart_hw(mt76_hw(dev));
1801 if (phy2)
1802 ieee80211_restart_hw(phy2->mt76->hw);
1803 if (phy3)
1804 ieee80211_restart_hw(phy3->mt76->hw);
1805
1806 ieee80211_wake_queues(mt76_hw(dev));
1807 if (phy2)
1808 ieee80211_wake_queues(phy2->mt76->hw);
1809 if (phy3)
1810 ieee80211_wake_queues(phy3->mt76->hw);
1811
1812 dev->recovery.hw_full_reset = false;
1813 ieee80211_queue_delayed_work(mt76_hw(dev),
1814 &dev->mphy.mac_work,
1815 MT7996_WATCHDOG_TIME);
1816 if (phy2)
1817 ieee80211_queue_delayed_work(phy2->mt76->hw,
1818 &phy2->mt76->mac_work,
1819 MT7996_WATCHDOG_TIME);
1820 if (phy3)
1821 ieee80211_queue_delayed_work(phy3->mt76->hw,
1822 &phy3->mt76->mac_work,
1823 MT7996_WATCHDOG_TIME);
1824 }
1825
mt7996_mac_reset_work(struct work_struct * work)1826 void mt7996_mac_reset_work(struct work_struct *work)
1827 {
1828 struct mt7996_phy *phy2, *phy3;
1829 struct mt7996_dev *dev;
1830 int i;
1831
1832 dev = container_of(work, struct mt7996_dev, reset_work);
1833 phy2 = mt7996_phy2(dev);
1834 phy3 = mt7996_phy3(dev);
1835
1836 /* chip full reset */
1837 if (dev->recovery.restart) {
1838 /* disable WA/WM WDT */
1839 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1840 MT_MCU_CMD_WDT_MASK);
1841
1842 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1843 dev->recovery.wa_reset_count++;
1844 else
1845 dev->recovery.wm_reset_count++;
1846
1847 mt7996_mac_full_reset(dev);
1848
1849 /* enable mcu irq */
1850 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1851 mt7996_irq_disable(dev, 0);
1852
1853 /* enable WA/WM WDT */
1854 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1855
1856 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1857 dev->recovery.restart = false;
1858 return;
1859 }
1860
1861 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1862 return;
1863
1864 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1865 wiphy_name(dev->mt76.hw->wiphy));
1866
1867 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
1868 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
1869
1870 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1871 mtk_wed_device_stop(&dev->mt76.mmio.wed);
1872
1873 ieee80211_stop_queues(mt76_hw(dev));
1874 if (phy2)
1875 ieee80211_stop_queues(phy2->mt76->hw);
1876 if (phy3)
1877 ieee80211_stop_queues(phy3->mt76->hw);
1878
1879 set_bit(MT76_RESET, &dev->mphy.state);
1880 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1881 wake_up(&dev->mt76.mcu.wait);
1882
1883 cancel_work_sync(&dev->wed_rro.work);
1884 cancel_delayed_work_sync(&dev->mphy.mac_work);
1885 if (phy2) {
1886 set_bit(MT76_RESET, &phy2->mt76->state);
1887 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1888 }
1889 if (phy3) {
1890 set_bit(MT76_RESET, &phy3->mt76->state);
1891 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1892 }
1893 mt76_worker_disable(&dev->mt76.tx_worker);
1894 mt76_for_each_q_rx(&dev->mt76, i) {
1895 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1896 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
1897 continue;
1898
1899 napi_disable(&dev->mt76.napi[i]);
1900 }
1901 napi_disable(&dev->mt76.tx_napi);
1902
1903 mutex_lock(&dev->mt76.mutex);
1904
1905 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
1906
1907 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
1908 mt7996_dma_reset(dev, false);
1909
1910 mt7996_tx_token_put(dev);
1911 idr_init(&dev->mt76.token);
1912
1913 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
1914 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
1915 }
1916
1917 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
1918 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
1919
1920 /* enable DMA Tx/Tx and interrupt */
1921 mt7996_dma_start(dev, false, false);
1922
1923 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
1924 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
1925 dev->mt76.mmio.irqmask;
1926
1927 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
1928 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
1929
1930 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
1931
1932 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
1933 true);
1934 mt7996_irq_enable(dev, wed_irq_mask);
1935 mt7996_irq_disable(dev, 0);
1936 }
1937
1938 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
1939 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
1940 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
1941 MT_INT_TX_RX_DONE_EXT);
1942 }
1943
1944 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1945 clear_bit(MT76_RESET, &dev->mphy.state);
1946 if (phy2)
1947 clear_bit(MT76_RESET, &phy2->mt76->state);
1948 if (phy3)
1949 clear_bit(MT76_RESET, &phy3->mt76->state);
1950
1951 local_bh_disable();
1952 mt76_for_each_q_rx(&dev->mt76, i) {
1953 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1954 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
1955 continue;
1956
1957 napi_enable(&dev->mt76.napi[i]);
1958 napi_schedule(&dev->mt76.napi[i]);
1959 }
1960 local_bh_enable();
1961
1962 tasklet_schedule(&dev->mt76.irq_tasklet);
1963
1964 mt76_worker_enable(&dev->mt76.tx_worker);
1965
1966 local_bh_disable();
1967 napi_enable(&dev->mt76.tx_napi);
1968 napi_schedule(&dev->mt76.tx_napi);
1969 local_bh_enable();
1970
1971 ieee80211_wake_queues(mt76_hw(dev));
1972 if (phy2)
1973 ieee80211_wake_queues(phy2->mt76->hw);
1974 if (phy3)
1975 ieee80211_wake_queues(phy3->mt76->hw);
1976
1977 mutex_unlock(&dev->mt76.mutex);
1978
1979 mt7996_update_beacons(dev);
1980
1981 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
1982 MT7996_WATCHDOG_TIME);
1983 if (phy2)
1984 ieee80211_queue_delayed_work(phy2->mt76->hw,
1985 &phy2->mt76->mac_work,
1986 MT7996_WATCHDOG_TIME);
1987 if (phy3)
1988 ieee80211_queue_delayed_work(phy3->mt76->hw,
1989 &phy3->mt76->mac_work,
1990 MT7996_WATCHDOG_TIME);
1991 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
1992 wiphy_name(dev->mt76.hw->wiphy));
1993 }
1994
1995 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)1996 void mt7996_mac_dump_work(struct work_struct *work)
1997 {
1998 const struct mt7996_mem_region *mem_region;
1999 struct mt7996_crash_data *crash_data;
2000 struct mt7996_dev *dev;
2001 struct mt7996_mem_hdr *hdr;
2002 size_t buf_len;
2003 int i;
2004 u32 num;
2005 u8 *buf;
2006
2007 dev = container_of(work, struct mt7996_dev, dump_work);
2008
2009 mutex_lock(&dev->dump_mutex);
2010
2011 crash_data = mt7996_coredump_new(dev);
2012 if (!crash_data) {
2013 mutex_unlock(&dev->dump_mutex);
2014 goto skip_coredump;
2015 }
2016
2017 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2018 if (!mem_region || !crash_data->memdump_buf_len) {
2019 mutex_unlock(&dev->dump_mutex);
2020 goto skip_memdump;
2021 }
2022
2023 buf = crash_data->memdump_buf;
2024 buf_len = crash_data->memdump_buf_len;
2025
2026 /* dumping memory content... */
2027 memset(buf, 0, buf_len);
2028 for (i = 0; i < num; i++) {
2029 if (mem_region->len > buf_len) {
2030 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2031 mem_region->name, mem_region->len);
2032 break;
2033 }
2034
2035 /* reserve space for the header */
2036 hdr = (void *)buf;
2037 buf += sizeof(*hdr);
2038 buf_len -= sizeof(*hdr);
2039
2040 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2041 mem_region->len);
2042
2043 hdr->start = mem_region->start;
2044 hdr->len = mem_region->len;
2045
2046 if (!mem_region->len)
2047 /* note: the header remains, just with zero length */
2048 break;
2049
2050 buf += mem_region->len;
2051 buf_len -= mem_region->len;
2052
2053 mem_region++;
2054 }
2055
2056 mutex_unlock(&dev->dump_mutex);
2057
2058 skip_memdump:
2059 mt7996_coredump_submit(dev);
2060 skip_coredump:
2061 queue_work(dev->mt76.wq, &dev->reset_work);
2062 }
2063
mt7996_reset(struct mt7996_dev * dev)2064 void mt7996_reset(struct mt7996_dev *dev)
2065 {
2066 if (!dev->recovery.hw_init_done)
2067 return;
2068
2069 if (dev->recovery.hw_full_reset)
2070 return;
2071
2072 /* wm/wa exception: do full recovery */
2073 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2074 dev->recovery.restart = true;
2075 dev_info(dev->mt76.dev,
2076 "%s indicated firmware crash, attempting recovery\n",
2077 wiphy_name(dev->mt76.hw->wiphy));
2078
2079 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2080 queue_work(dev->mt76.wq, &dev->dump_work);
2081 return;
2082 }
2083
2084 queue_work(dev->mt76.wq, &dev->reset_work);
2085 wake_up(&dev->reset_wait);
2086 }
2087
mt7996_mac_update_stats(struct mt7996_phy * phy)2088 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2089 {
2090 struct mt76_mib_stats *mib = &phy->mib;
2091 struct mt7996_dev *dev = phy->dev;
2092 u8 band_idx = phy->mt76->band_idx;
2093 u32 cnt;
2094 int i;
2095
2096 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2097 mib->fcs_err_cnt += cnt;
2098
2099 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2100 mib->rx_fifo_full_cnt += cnt;
2101
2102 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2103 mib->rx_mpdu_cnt += cnt;
2104
2105 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2106 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2107
2108 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2109 mib->rx_vector_mismatch_cnt += cnt;
2110
2111 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2112 mib->rx_delimiter_fail_cnt += cnt;
2113
2114 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2115 mib->rx_len_mismatch_cnt += cnt;
2116
2117 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2118 mib->tx_ampdu_cnt += cnt;
2119
2120 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2121 mib->tx_stop_q_empty_cnt += cnt;
2122
2123 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2124 mib->tx_mpdu_attempts_cnt += cnt;
2125
2126 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2127 mib->tx_mpdu_success_cnt += cnt;
2128
2129 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2130 mib->rx_ampdu_cnt += cnt;
2131
2132 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2133 mib->rx_ampdu_bytes_cnt += cnt;
2134
2135 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2136 mib->rx_ampdu_valid_subframe_cnt += cnt;
2137
2138 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2139 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2140
2141 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2142 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2143
2144 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2145 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2146
2147 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2148 mib->rx_pfdrop_cnt += cnt;
2149
2150 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2151 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2152
2153 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2154 mib->rx_ba_cnt += cnt;
2155
2156 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2157 mib->tx_bf_ebf_ppdu_cnt += cnt;
2158
2159 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2160 mib->tx_bf_ibf_ppdu_cnt += cnt;
2161
2162 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2163 mib->tx_mu_bf_cnt += cnt;
2164
2165 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2166 mib->tx_mu_mpdu_cnt += cnt;
2167
2168 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2169 mib->tx_mu_acked_mpdu_cnt += cnt;
2170
2171 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2172 mib->tx_su_acked_mpdu_cnt += cnt;
2173
2174 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2175 mib->tx_bf_rx_fb_ht_cnt += cnt;
2176 mib->tx_bf_rx_fb_all_cnt += cnt;
2177
2178 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2179 mib->tx_bf_rx_fb_vht_cnt += cnt;
2180 mib->tx_bf_rx_fb_all_cnt += cnt;
2181
2182 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2183 mib->tx_bf_rx_fb_he_cnt += cnt;
2184 mib->tx_bf_rx_fb_all_cnt += cnt;
2185
2186 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2187 mib->tx_bf_rx_fb_eht_cnt += cnt;
2188 mib->tx_bf_rx_fb_all_cnt += cnt;
2189
2190 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2191 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2192 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2193 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2194
2195 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2196 mib->tx_bf_fb_trig_cnt += cnt;
2197
2198 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2199 mib->tx_bf_fb_cpl_cnt += cnt;
2200
2201 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2202 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2203 mib->tx_amsdu[i] += cnt;
2204 mib->tx_amsdu_cnt += cnt;
2205 }
2206
2207 /* rts count */
2208 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2209 mib->rts_cnt += cnt;
2210
2211 /* rts retry count */
2212 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2213 mib->rts_retries_cnt += cnt;
2214
2215 /* ba miss count */
2216 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2217 mib->ba_miss_cnt += cnt;
2218
2219 /* ack fail count */
2220 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2221 mib->ack_fail_cnt += cnt;
2222
2223 for (i = 0; i < 16; i++) {
2224 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2225 phy->mt76->aggr_stats[i] += cnt;
2226 }
2227 }
2228
mt7996_mac_sta_rc_work(struct work_struct * work)2229 void mt7996_mac_sta_rc_work(struct work_struct *work)
2230 {
2231 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2232 struct ieee80211_sta *sta;
2233 struct ieee80211_vif *vif;
2234 struct mt7996_sta *msta;
2235 u32 changed;
2236 LIST_HEAD(list);
2237
2238 spin_lock_bh(&dev->mt76.sta_poll_lock);
2239 list_splice_init(&dev->sta_rc_list, &list);
2240
2241 while (!list_empty(&list)) {
2242 msta = list_first_entry(&list, struct mt7996_sta, rc_list);
2243 list_del_init(&msta->rc_list);
2244 changed = msta->changed;
2245 msta->changed = 0;
2246 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2247
2248 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
2249 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
2250
2251 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2252 IEEE80211_RC_NSS_CHANGED |
2253 IEEE80211_RC_BW_CHANGED))
2254 mt7996_mcu_add_rate_ctrl(dev, vif, sta, true);
2255
2256 if (changed & IEEE80211_RC_SMPS_CHANGED)
2257 mt7996_mcu_set_fixed_field(dev, vif, sta, NULL,
2258 RATE_PARAM_MMPS_UPDATE);
2259
2260 spin_lock_bh(&dev->mt76.sta_poll_lock);
2261 }
2262
2263 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2264 }
2265
mt7996_mac_work(struct work_struct * work)2266 void mt7996_mac_work(struct work_struct *work)
2267 {
2268 struct mt7996_phy *phy;
2269 struct mt76_phy *mphy;
2270
2271 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2272 mac_work.work);
2273 phy = mphy->priv;
2274
2275 mutex_lock(&mphy->dev->mutex);
2276
2277 mt76_update_survey(mphy);
2278 if (++mphy->mac_work_count == 5) {
2279 mphy->mac_work_count = 0;
2280
2281 mt7996_mac_update_stats(phy);
2282
2283 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2284 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2285 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2286 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2287 }
2288 }
2289
2290 mutex_unlock(&mphy->dev->mutex);
2291
2292 mt76_tx_status_check(mphy->dev, false);
2293
2294 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2295 MT7996_WATCHDOG_TIME);
2296 }
2297
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2298 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2299 {
2300 struct mt7996_dev *dev = phy->dev;
2301
2302 if (phy->rdd_state & BIT(0))
2303 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 0,
2304 MT_RX_SEL0, 0);
2305 if (phy->rdd_state & BIT(1))
2306 mt7996_mcu_rdd_cmd(dev, RDD_STOP, 1,
2307 MT_RX_SEL0, 0);
2308 }
2309
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int chain)2310 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int chain)
2311 {
2312 int err, region;
2313
2314 switch (dev->mt76.region) {
2315 case NL80211_DFS_ETSI:
2316 region = 0;
2317 break;
2318 case NL80211_DFS_JP:
2319 region = 2;
2320 break;
2321 case NL80211_DFS_FCC:
2322 default:
2323 region = 1;
2324 break;
2325 }
2326
2327 err = mt7996_mcu_rdd_cmd(dev, RDD_START, chain,
2328 MT_RX_SEL0, region);
2329 if (err < 0)
2330 return err;
2331
2332 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, chain,
2333 MT_RX_SEL0, 1);
2334 }
2335
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2336 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2337 {
2338 struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
2339 struct mt7996_dev *dev = phy->dev;
2340 u8 band_idx = phy->mt76->band_idx;
2341 int err;
2342
2343 /* start CAC */
2344 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, band_idx,
2345 MT_RX_SEL0, 0);
2346 if (err < 0)
2347 return err;
2348
2349 err = mt7996_dfs_start_rdd(dev, band_idx);
2350 if (err < 0)
2351 return err;
2352
2353 phy->rdd_state |= BIT(band_idx);
2354
2355 if (chandef->width == NL80211_CHAN_WIDTH_160 ||
2356 chandef->width == NL80211_CHAN_WIDTH_80P80) {
2357 err = mt7996_dfs_start_rdd(dev, 1);
2358 if (err < 0)
2359 return err;
2360
2361 phy->rdd_state |= BIT(1);
2362 }
2363
2364 return 0;
2365 }
2366
2367 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)2368 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2369 {
2370 const struct mt7996_dfs_radar_spec *radar_specs;
2371 struct mt7996_dev *dev = phy->dev;
2372 int err, i;
2373
2374 switch (dev->mt76.region) {
2375 case NL80211_DFS_FCC:
2376 radar_specs = &fcc_radar_specs;
2377 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2378 if (err < 0)
2379 return err;
2380 break;
2381 case NL80211_DFS_ETSI:
2382 radar_specs = &etsi_radar_specs;
2383 break;
2384 case NL80211_DFS_JP:
2385 radar_specs = &jp_radar_specs;
2386 break;
2387 default:
2388 return -EINVAL;
2389 }
2390
2391 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2392 err = mt7996_mcu_set_radar_th(dev, i,
2393 &radar_specs->radar_pattern[i]);
2394 if (err < 0)
2395 return err;
2396 }
2397
2398 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2399 }
2400
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2401 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2402 {
2403 struct mt7996_dev *dev = phy->dev;
2404 enum mt76_dfs_state dfs_state, prev_state;
2405 int err;
2406
2407 prev_state = phy->mt76->dfs_state;
2408 dfs_state = mt76_phy_dfs_state(phy->mt76);
2409
2410 if (prev_state == dfs_state)
2411 return 0;
2412
2413 if (prev_state == MT_DFS_STATE_UNKNOWN)
2414 mt7996_dfs_stop_radar_detector(phy);
2415
2416 if (dfs_state == MT_DFS_STATE_DISABLED)
2417 goto stop;
2418
2419 if (prev_state <= MT_DFS_STATE_DISABLED) {
2420 err = mt7996_dfs_init_radar_specs(phy);
2421 if (err < 0)
2422 return err;
2423
2424 err = mt7996_dfs_start_radar_detector(phy);
2425 if (err < 0)
2426 return err;
2427
2428 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2429 }
2430
2431 if (dfs_state == MT_DFS_STATE_CAC)
2432 return 0;
2433
2434 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END,
2435 phy->mt76->band_idx, MT_RX_SEL0, 0);
2436 if (err < 0) {
2437 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2438 return err;
2439 }
2440
2441 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2442 return 0;
2443
2444 stop:
2445 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START,
2446 phy->mt76->band_idx, MT_RX_SEL0, 0);
2447 if (err < 0)
2448 return err;
2449
2450 mt7996_dfs_stop_radar_detector(phy);
2451 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2452
2453 return 0;
2454 }
2455
2456 static int
mt7996_mac_twt_duration_align(int duration)2457 mt7996_mac_twt_duration_align(int duration)
2458 {
2459 return duration << 8;
2460 }
2461
2462 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)2463 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2464 struct mt7996_twt_flow *flow)
2465 {
2466 struct mt7996_twt_flow *iter, *iter_next;
2467 u32 duration = flow->duration << 8;
2468 u64 start_tsf;
2469
2470 iter = list_first_entry_or_null(&dev->twt_list,
2471 struct mt7996_twt_flow, list);
2472 if (!iter || !iter->sched || iter->start_tsf > duration) {
2473 /* add flow as first entry in the list */
2474 list_add(&flow->list, &dev->twt_list);
2475 return 0;
2476 }
2477
2478 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2479 start_tsf = iter->start_tsf +
2480 mt7996_mac_twt_duration_align(iter->duration);
2481 if (list_is_last(&iter->list, &dev->twt_list))
2482 break;
2483
2484 if (!iter_next->sched ||
2485 iter_next->start_tsf > start_tsf + duration) {
2486 list_add(&flow->list, &iter->list);
2487 goto out;
2488 }
2489 }
2490
2491 /* add flow as last entry in the list */
2492 list_add_tail(&flow->list, &dev->twt_list);
2493 out:
2494 return start_tsf;
2495 }
2496
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)2497 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2498 {
2499 struct ieee80211_twt_params *twt_agrt;
2500 u64 interval, duration;
2501 u16 mantissa;
2502 u8 exp;
2503
2504 /* only individual agreement supported */
2505 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2506 return -EOPNOTSUPP;
2507
2508 /* only 256us unit supported */
2509 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2510 return -EOPNOTSUPP;
2511
2512 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2513
2514 /* explicit agreement not supported */
2515 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2516 return -EOPNOTSUPP;
2517
2518 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2519 le16_to_cpu(twt_agrt->req_type));
2520 mantissa = le16_to_cpu(twt_agrt->mantissa);
2521 duration = twt_agrt->min_twt_dur << 8;
2522
2523 interval = (u64)mantissa << exp;
2524 if (interval < duration)
2525 return -EOPNOTSUPP;
2526
2527 return 0;
2528 }
2529
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2530 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2531 struct ieee80211_sta *sta,
2532 struct ieee80211_twt_setup *twt)
2533 {
2534 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2535 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2536 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2537 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2538 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2539 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2540 struct mt7996_twt_flow *flow;
2541 int flowid, table_id;
2542 u8 exp;
2543
2544 if (mt7996_mac_check_twt_req(twt))
2545 goto out;
2546
2547 mutex_lock(&dev->mt76.mutex);
2548
2549 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2550 goto unlock;
2551
2552 if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
2553 goto unlock;
2554
2555 flowid = ffs(~msta->twt.flowid_mask) - 1;
2556 le16p_replace_bits(&twt_agrt->req_type, flowid,
2557 IEEE80211_TWT_REQTYPE_FLOWID);
2558
2559 table_id = ffs(~dev->twt.table_mask) - 1;
2560 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2561 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2562
2563 flow = &msta->twt.flow[flowid];
2564 memset(flow, 0, sizeof(*flow));
2565 INIT_LIST_HEAD(&flow->list);
2566 flow->wcid = msta->wcid.idx;
2567 flow->table_id = table_id;
2568 flow->id = flowid;
2569 flow->duration = twt_agrt->min_twt_dur;
2570 flow->mantissa = twt_agrt->mantissa;
2571 flow->exp = exp;
2572 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2573 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2574 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2575
2576 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2577 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2578 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2579 u64 flow_tsf, curr_tsf;
2580 u32 rem;
2581
2582 flow->sched = true;
2583 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2584 curr_tsf = __mt7996_get_tsf(hw, msta->vif);
2585 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2586 flow_tsf = curr_tsf + interval - rem;
2587 twt_agrt->twt = cpu_to_le64(flow_tsf);
2588 } else {
2589 list_add_tail(&flow->list, &dev->twt_list);
2590 }
2591 flow->tsf = le64_to_cpu(twt_agrt->twt);
2592
2593 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
2594 goto unlock;
2595
2596 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2597 dev->twt.table_mask |= BIT(table_id);
2598 msta->twt.flowid_mask |= BIT(flowid);
2599 dev->twt.n_agrt++;
2600
2601 unlock:
2602 mutex_unlock(&dev->mt76.mutex);
2603 out:
2604 le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
2605 IEEE80211_TWT_REQTYPE_SETUP_CMD);
2606 twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
2607 (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
2608 }
2609
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_sta * msta,u8 flowid)2610 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2611 struct mt7996_sta *msta,
2612 u8 flowid)
2613 {
2614 struct mt7996_twt_flow *flow;
2615
2616 lockdep_assert_held(&dev->mt76.mutex);
2617
2618 if (flowid >= ARRAY_SIZE(msta->twt.flow))
2619 return;
2620
2621 if (!(msta->twt.flowid_mask & BIT(flowid)))
2622 return;
2623
2624 flow = &msta->twt.flow[flowid];
2625 if (mt7996_mcu_twt_agrt_update(dev, msta->vif, flow,
2626 MCU_TWT_AGRT_DELETE))
2627 return;
2628
2629 list_del_init(&flow->list);
2630 msta->twt.flowid_mask &= ~BIT(flowid);
2631 dev->twt.table_mask &= ~BIT(flow->table_id);
2632 dev->twt.n_agrt--;
2633 }
2634