1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (C) 2022 MediaTek Inc.
4 */
5
6 #include <linux/etherdevice.h>
7 #include <linux/timekeeping.h>
8 #include "coredump.h"
9 #include "mt7996.h"
10 #include "../dma.h"
11 #include "mac.h"
12 #include "mcu.h"
13
14 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
15
16 static const struct mt7996_dfs_radar_spec etsi_radar_specs = {
17 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
18 .radar_pattern = {
19 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
20 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
21 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
22 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
23 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
24 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
25 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
26 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
27 },
28 };
29
30 static const struct mt7996_dfs_radar_spec fcc_radar_specs = {
31 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
32 .radar_pattern = {
33 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
34 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
35 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
36 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
37 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
38 },
39 };
40
41 static const struct mt7996_dfs_radar_spec jp_radar_specs = {
42 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
43 .radar_pattern = {
44 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
45 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
46 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
47 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
48 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
49 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
50 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
51 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
52 },
53 };
54
mt7996_rx_get_wcid(struct mt7996_dev * dev,u16 idx,u8 band_idx)55 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev,
56 u16 idx, u8 band_idx)
57 {
58 struct mt7996_sta_link *msta_link;
59 struct mt7996_sta *msta;
60 struct mt7996_vif *mvif;
61 struct mt76_wcid *wcid;
62 int i;
63
64 wcid = mt76_wcid_ptr(dev, idx);
65 if (!wcid)
66 return NULL;
67
68 if (!mt7996_band_valid(dev, band_idx))
69 return NULL;
70
71 if (wcid->phy_idx == band_idx)
72 return wcid;
73
74 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
75 msta = msta_link->sta;
76 if (!msta || !msta->vif)
77 return NULL;
78
79 mvif = msta->vif;
80 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) {
81 struct mt76_vif_link *mlink;
82
83 mlink = rcu_dereference(mvif->mt76.link[i]);
84 if (!mlink)
85 continue;
86
87 if (mlink->band_idx != band_idx)
88 continue;
89
90 msta_link = rcu_dereference(msta->link[i]);
91 break;
92 }
93
94 return &msta_link->wcid;
95 }
96
mt7996_mac_wtbl_update(struct mt7996_dev * dev,int idx,u32 mask)97 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask)
98 {
99 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
100 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
101
102 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
103 0, 5000);
104 }
105
mt7996_mac_wtbl_lmac_addr(struct mt7996_dev * dev,u16 wcid,u8 dw)106 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw)
107 {
108 mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
109 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
110
111 return MT_WTBL_LMAC_OFFS(wcid, dw);
112 }
113
mt7996_mac_sta_poll(struct mt7996_dev * dev)114 static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
115 {
116 static const u8 ac_to_tid[] = {
117 [IEEE80211_AC_BE] = 0,
118 [IEEE80211_AC_BK] = 1,
119 [IEEE80211_AC_VI] = 4,
120 [IEEE80211_AC_VO] = 6
121 };
122 struct mt7996_sta_link *msta_link;
123 struct mt76_vif_link *mlink;
124 struct ieee80211_sta *sta;
125 struct mt7996_sta *msta;
126 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
127 LIST_HEAD(sta_poll_list);
128 struct mt76_wcid *wcid;
129 int i;
130
131 spin_lock_bh(&dev->mt76.sta_poll_lock);
132 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
133 spin_unlock_bh(&dev->mt76.sta_poll_lock);
134
135 rcu_read_lock();
136
137 while (true) {
138 bool clear = false;
139 u32 addr, val;
140 u16 idx;
141 s8 rssi[4];
142
143 spin_lock_bh(&dev->mt76.sta_poll_lock);
144 if (list_empty(&sta_poll_list)) {
145 spin_unlock_bh(&dev->mt76.sta_poll_lock);
146 break;
147 }
148 msta_link = list_first_entry(&sta_poll_list,
149 struct mt7996_sta_link,
150 wcid.poll_list);
151 msta = msta_link->sta;
152 wcid = &msta_link->wcid;
153 list_del_init(&wcid->poll_list);
154 spin_unlock_bh(&dev->mt76.sta_poll_lock);
155
156 idx = wcid->idx;
157
158 /* refresh peer's airtime reporting */
159 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20);
160
161 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
162 u32 tx_last = msta_link->airtime_ac[i];
163 u32 rx_last = msta_link->airtime_ac[i + 4];
164
165 msta_link->airtime_ac[i] = mt76_rr(dev, addr);
166 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
167
168 tx_time[i] = msta_link->airtime_ac[i] - tx_last;
169 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last;
170
171 if ((tx_last | rx_last) & BIT(30))
172 clear = true;
173
174 addr += 8;
175 }
176
177 if (clear) {
178 mt7996_mac_wtbl_update(dev, idx,
179 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
180 memset(msta_link->airtime_ac, 0,
181 sizeof(msta_link->airtime_ac));
182 }
183
184 if (!wcid->sta)
185 continue;
186
187 sta = container_of((void *)msta, struct ieee80211_sta,
188 drv_priv);
189 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
190 u8 q = mt76_connac_lmac_mapping(i);
191 u32 tx_cur = tx_time[q];
192 u32 rx_cur = rx_time[q];
193 u8 tid = ac_to_tid[i];
194
195 if (!tx_cur && !rx_cur)
196 continue;
197
198 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur);
199 }
200
201 /* get signal strength of resp frames (CTS/BA/ACK) */
202 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34);
203 val = mt76_rr(dev, addr);
204
205 rssi[0] = to_rssi(GENMASK(7, 0), val);
206 rssi[1] = to_rssi(GENMASK(15, 8), val);
207 rssi[2] = to_rssi(GENMASK(23, 16), val);
208 rssi[3] = to_rssi(GENMASK(31, 14), val);
209
210 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]);
211 if (mlink) {
212 struct mt76_phy *mphy = mt76_vif_link_phy(mlink);
213
214 if (mphy)
215 msta_link->ack_signal =
216 mt76_rx_signal(mphy->antenna_mask,
217 rssi);
218 }
219
220 ewma_avg_signal_add(&msta_link->avg_ack_signal,
221 -msta_link->ack_signal);
222 }
223
224 rcu_read_unlock();
225 }
226
227 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7996_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)228 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
229 {
230 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
231 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
232 struct mt7996_sta *msta = (struct mt7996_sta *)status->wcid;
233 __le32 *rxd = (__le32 *)skb->data;
234 struct ieee80211_sta *sta;
235 struct ieee80211_vif *vif;
236 struct ieee80211_hdr hdr;
237 u16 frame_control;
238
239 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
240 MT_RXD3_NORMAL_U2M)
241 return -EINVAL;
242
243 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
244 return -EINVAL;
245
246 if (!msta || !msta->vif)
247 return -EINVAL;
248
249 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
250 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
251
252 /* store the info from RXD and ethhdr to avoid being overridden */
253 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
254 hdr.frame_control = cpu_to_le16(frame_control);
255 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
256 hdr.duration_id = 0;
257
258 ether_addr_copy(hdr.addr1, vif->addr);
259 ether_addr_copy(hdr.addr2, sta->addr);
260 switch (frame_control & (IEEE80211_FCTL_TODS |
261 IEEE80211_FCTL_FROMDS)) {
262 case 0:
263 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
264 break;
265 case IEEE80211_FCTL_FROMDS:
266 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
267 break;
268 case IEEE80211_FCTL_TODS:
269 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
270 break;
271 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
272 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
273 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
274 break;
275 default:
276 return -EINVAL;
277 }
278
279 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
280 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
281 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
282 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
283 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
284 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
285 else
286 skb_pull(skb, 2);
287
288 if (ieee80211_has_order(hdr.frame_control))
289 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
290 IEEE80211_HT_CTL_LEN);
291 if (ieee80211_is_data_qos(hdr.frame_control)) {
292 __le16 qos_ctrl;
293
294 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
295 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
296 IEEE80211_QOS_CTL_LEN);
297 }
298
299 if (ieee80211_has_a4(hdr.frame_control))
300 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
301 else
302 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
303
304 return 0;
305 }
306
307 static int
mt7996_mac_fill_rx_rate(struct mt7996_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)308 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
309 struct mt76_rx_status *status,
310 struct ieee80211_supported_band *sband,
311 __le32 *rxv, u8 *mode)
312 {
313 u32 v0, v2;
314 u8 stbc, gi, bw, dcm, nss;
315 int i, idx;
316 bool cck = false;
317
318 v0 = le32_to_cpu(rxv[0]);
319 v2 = le32_to_cpu(rxv[2]);
320
321 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
322 i = idx;
323 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
324
325 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
326 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
327 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
328 dcm = FIELD_GET(MT_PRXV_DCM, v2);
329 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
330
331 switch (*mode) {
332 case MT_PHY_TYPE_CCK:
333 cck = true;
334 fallthrough;
335 case MT_PHY_TYPE_OFDM:
336 i = mt76_get_rate(&dev->mt76, sband, i, cck);
337 break;
338 case MT_PHY_TYPE_HT_GF:
339 case MT_PHY_TYPE_HT:
340 status->encoding = RX_ENC_HT;
341 if (gi)
342 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
343 if (i > 31)
344 return -EINVAL;
345 break;
346 case MT_PHY_TYPE_VHT:
347 status->nss = nss;
348 status->encoding = RX_ENC_VHT;
349 if (gi)
350 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
351 if (i > 11)
352 return -EINVAL;
353 break;
354 case MT_PHY_TYPE_HE_MU:
355 case MT_PHY_TYPE_HE_SU:
356 case MT_PHY_TYPE_HE_EXT_SU:
357 case MT_PHY_TYPE_HE_TB:
358 status->nss = nss;
359 status->encoding = RX_ENC_HE;
360 i &= GENMASK(3, 0);
361
362 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
363 status->he_gi = gi;
364
365 status->he_dcm = dcm;
366 break;
367 case MT_PHY_TYPE_EHT_SU:
368 case MT_PHY_TYPE_EHT_TRIG:
369 case MT_PHY_TYPE_EHT_MU:
370 status->nss = nss;
371 status->encoding = RX_ENC_EHT;
372 i &= GENMASK(3, 0);
373
374 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
375 status->eht.gi = gi;
376 break;
377 default:
378 return -EINVAL;
379 }
380 status->rate_idx = i;
381
382 switch (bw) {
383 case IEEE80211_STA_RX_BW_20:
384 break;
385 case IEEE80211_STA_RX_BW_40:
386 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
387 (idx & MT_PRXV_TX_ER_SU_106T)) {
388 status->bw = RATE_INFO_BW_HE_RU;
389 status->he_ru =
390 NL80211_RATE_INFO_HE_RU_ALLOC_106;
391 } else {
392 status->bw = RATE_INFO_BW_40;
393 }
394 break;
395 case IEEE80211_STA_RX_BW_80:
396 status->bw = RATE_INFO_BW_80;
397 break;
398 case IEEE80211_STA_RX_BW_160:
399 status->bw = RATE_INFO_BW_160;
400 break;
401 /* rxv reports bw 320-1 and 320-2 separately */
402 case IEEE80211_STA_RX_BW_320:
403 case IEEE80211_STA_RX_BW_320 + 1:
404 status->bw = RATE_INFO_BW_320;
405 break;
406 default:
407 return -EINVAL;
408 }
409
410 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
411 if (*mode < MT_PHY_TYPE_HE_SU && gi)
412 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
413
414 return 0;
415 }
416
417 static void
mt7996_wed_check_ppe(struct mt7996_dev * dev,struct mt76_queue * q,struct mt7996_sta * msta,struct sk_buff * skb,u32 info)418 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q,
419 struct mt7996_sta *msta, struct sk_buff *skb,
420 u32 info)
421 {
422 struct ieee80211_vif *vif;
423 struct wireless_dev *wdev;
424
425 if (!msta || !msta->vif)
426 return;
427
428 if (!mt76_queue_is_wed_rx(q))
429 return;
430
431 if (!(info & MT_DMA_INFO_PPE_VLD))
432 return;
433
434 vif = container_of((void *)msta->vif, struct ieee80211_vif,
435 drv_priv);
436 wdev = ieee80211_vif_to_wdev(vif);
437 skb->dev = wdev->netdev;
438
439 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
440 FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
441 FIELD_GET(MT_DMA_PPE_ENTRY, info));
442 }
443
444 static int
mt7996_mac_fill_rx(struct mt7996_dev * dev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)445 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q,
446 struct sk_buff *skb, u32 *info)
447 {
448 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
449 struct mt76_phy *mphy = &dev->mt76.phy;
450 struct mt7996_phy *phy = &dev->phy;
451 struct ieee80211_supported_band *sband;
452 __le32 *rxd = (__le32 *)skb->data;
453 __le32 *rxv = NULL;
454 u32 rxd0 = le32_to_cpu(rxd[0]);
455 u32 rxd1 = le32_to_cpu(rxd[1]);
456 u32 rxd2 = le32_to_cpu(rxd[2]);
457 u32 rxd3 = le32_to_cpu(rxd[3]);
458 u32 rxd4 = le32_to_cpu(rxd[4]);
459 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
460 u32 csum_status = *(u32 *)skb->cb;
461 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP;
462 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask;
463 bool unicast, insert_ccmp_hdr = false;
464 u8 remove_pad, amsdu_info, band_idx;
465 u8 mode = 0, qos_ctl = 0;
466 bool hdr_trans;
467 u16 hdr_gap;
468 u16 seq_ctrl = 0;
469 __le16 fc = 0;
470 int idx;
471 u8 hw_aggr = false;
472 struct mt7996_sta *msta = NULL;
473
474 hw_aggr = status->aggr;
475 memset(status, 0, sizeof(*status));
476
477 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1);
478 mphy = dev->mt76.phys[band_idx];
479 phy = mphy->priv;
480 status->phy_idx = mphy->band_idx;
481
482 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
483 return -EINVAL;
484
485 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
486 return -EINVAL;
487
488 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
489 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
490 return -EINVAL;
491
492 /* ICV error or CCMP/BIP/WPI MIC error */
493 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
494 status->flag |= RX_FLAG_ONLY_MONITOR;
495
496 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
497 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
498 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx);
499
500 if (status->wcid) {
501 struct mt7996_sta_link *msta_link;
502
503 msta_link = container_of(status->wcid, struct mt7996_sta_link,
504 wcid);
505 msta = msta_link->sta;
506 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
507 }
508
509 status->freq = mphy->chandef.chan->center_freq;
510 status->band = mphy->chandef.chan->band;
511 if (status->band == NL80211_BAND_5GHZ)
512 sband = &mphy->sband_5g.sband;
513 else if (status->band == NL80211_BAND_6GHZ)
514 sband = &mphy->sband_6g.sband;
515 else
516 sband = &mphy->sband_2g.sband;
517
518 if (!sband->channels)
519 return -EINVAL;
520
521 if ((rxd3 & csum_mask) == csum_mask &&
522 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
523 skb->ip_summed = CHECKSUM_UNNECESSARY;
524
525 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR)
526 status->flag |= RX_FLAG_FAILED_FCS_CRC;
527
528 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
529 status->flag |= RX_FLAG_MMIC_ERROR;
530
531 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
532 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
533 status->flag |= RX_FLAG_DECRYPTED;
534 status->flag |= RX_FLAG_IV_STRIPPED;
535 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
536 }
537
538 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
539
540 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
541 return -EINVAL;
542
543 rxd += 8;
544 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
545 u32 v0 = le32_to_cpu(rxd[0]);
546 u32 v2 = le32_to_cpu(rxd[2]);
547
548 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
549 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
550 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
551
552 rxd += 4;
553 if ((u8 *)rxd - skb->data >= skb->len)
554 return -EINVAL;
555 }
556
557 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
558 u8 *data = (u8 *)rxd;
559
560 if (status->flag & RX_FLAG_DECRYPTED) {
561 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
562 case MT_CIPHER_AES_CCMP:
563 case MT_CIPHER_CCMP_CCX:
564 case MT_CIPHER_CCMP_256:
565 insert_ccmp_hdr =
566 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
567 fallthrough;
568 case MT_CIPHER_TKIP:
569 case MT_CIPHER_TKIP_NO_MIC:
570 case MT_CIPHER_GCMP:
571 case MT_CIPHER_GCMP_256:
572 status->iv[0] = data[5];
573 status->iv[1] = data[4];
574 status->iv[2] = data[3];
575 status->iv[3] = data[2];
576 status->iv[4] = data[1];
577 status->iv[5] = data[0];
578 break;
579 default:
580 break;
581 }
582 }
583 rxd += 4;
584 if ((u8 *)rxd - skb->data >= skb->len)
585 return -EINVAL;
586 }
587
588 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
589 status->timestamp = le32_to_cpu(rxd[0]);
590 status->flag |= RX_FLAG_MACTIME_START;
591
592 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
593 status->flag |= RX_FLAG_AMPDU_DETAILS;
594
595 /* all subframes of an A-MPDU have the same timestamp */
596 if (phy->rx_ampdu_ts != status->timestamp) {
597 if (!++phy->ampdu_ref)
598 phy->ampdu_ref++;
599 }
600 phy->rx_ampdu_ts = status->timestamp;
601
602 status->ampdu_ref = phy->ampdu_ref;
603 }
604
605 rxd += 4;
606 if ((u8 *)rxd - skb->data >= skb->len)
607 return -EINVAL;
608 }
609
610 /* RXD Group 3 - P-RXV */
611 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
612 u32 v3;
613 int ret;
614
615 rxv = rxd;
616 rxd += 4;
617 if ((u8 *)rxd - skb->data >= skb->len)
618 return -EINVAL;
619
620 v3 = le32_to_cpu(rxv[3]);
621
622 status->chains = mphy->antenna_mask;
623 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
624 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
625 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
626 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
627
628 /* RXD Group 5 - C-RXV */
629 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
630 rxd += 24;
631 if ((u8 *)rxd - skb->data >= skb->len)
632 return -EINVAL;
633 }
634
635 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
636 if (ret < 0)
637 return ret;
638 }
639
640 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
641 status->amsdu = !!amsdu_info;
642 if (status->amsdu) {
643 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
644 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
645 }
646
647 /* IEEE 802.11 fragmentation can only be applied to unicast frames.
648 * Hence, drop fragments with multicast/broadcast RA.
649 * This check fixes vulnerabilities, like CVE-2020-26145.
650 */
651 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) &&
652 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M)
653 return -EINVAL;
654
655 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
656 if (hdr_trans && ieee80211_has_morefrags(fc)) {
657 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap))
658 return -EINVAL;
659 hdr_trans = false;
660 } else {
661 int pad_start = 0;
662
663 skb_pull(skb, hdr_gap);
664 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) {
665 pad_start = ieee80211_get_hdrlen_from_skb(skb);
666 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
667 /* When header translation failure is indicated,
668 * the hardware will insert an extra 2-byte field
669 * containing the data length after the protocol
670 * type field. This happens either when the LLC-SNAP
671 * pattern did not match, or if a VLAN header was
672 * detected.
673 */
674 pad_start = 12;
675 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
676 pad_start += 4;
677 else
678 pad_start = 0;
679 }
680
681 if (pad_start) {
682 memmove(skb->data + 2, skb->data, pad_start);
683 skb_pull(skb, 2);
684 }
685 }
686
687 if (!hdr_trans) {
688 struct ieee80211_hdr *hdr;
689
690 if (insert_ccmp_hdr) {
691 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
692
693 mt76_insert_ccmp_hdr(skb, key_id);
694 }
695
696 hdr = mt76_skb_get_hdr(skb);
697 fc = hdr->frame_control;
698 if (ieee80211_is_data_qos(fc)) {
699 u8 *qos = ieee80211_get_qos_ctl(hdr);
700
701 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
702 qos_ctl = *qos;
703
704 /* Mesh DA/SA/Length will be stripped after hardware
705 * de-amsdu, so here needs to clear amsdu present bit
706 * to mark it as a normal mesh frame.
707 */
708 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu)
709 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
710 }
711 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
712 } else {
713 status->flag |= RX_FLAG_8023;
714 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
715 *info);
716 }
717
718 if (rxv && !(status->flag & RX_FLAG_8023)) {
719 switch (status->encoding) {
720 case RX_ENC_EHT:
721 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
722 break;
723 case RX_ENC_HE:
724 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
725 break;
726 default:
727 break;
728 }
729 }
730
731 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr)
732 return 0;
733
734 status->aggr = unicast &&
735 !ieee80211_is_qos_nullfunc(fc);
736 status->qos_ctl = qos_ctl;
737 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
738
739 return 0;
740 }
741
742 static void
mt7996_mac_write_txwi_8023(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)743 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi,
744 struct sk_buff *skb, struct mt76_wcid *wcid)
745 {
746 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
747 u8 fc_type, fc_stype;
748 u16 ethertype;
749 bool wmm = false;
750 u32 val;
751
752 if (wcid->sta) {
753 struct ieee80211_sta *sta = wcid_to_sta(wcid);
754
755 wmm = sta->wme;
756 }
757
758 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
759 FIELD_PREP(MT_TXD1_TID, tid);
760
761 ethertype = get_unaligned_be16(&skb->data[12]);
762 if (ethertype >= ETH_P_802_3_MIN)
763 val |= MT_TXD1_ETH_802_3;
764
765 txwi[1] |= cpu_to_le32(val);
766
767 fc_type = IEEE80211_FTYPE_DATA >> 2;
768 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
769
770 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
771 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
772
773 txwi[2] |= cpu_to_le32(val);
774
775 if (wcid->amsdu)
776 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU);
777 }
778
779 static void
mt7996_mac_write_txwi_80211(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key,struct mt76_wcid * wcid)780 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
781 struct sk_buff *skb,
782 struct ieee80211_key_conf *key,
783 struct mt76_wcid *wcid)
784 {
785 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
786 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
787 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
788 bool multicast = is_multicast_ether_addr(hdr->addr1);
789 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
790 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl;
791 u16 seqno = le16_to_cpu(sc);
792 u8 fc_type, fc_stype;
793 u32 val;
794
795 if (ieee80211_is_action(fc) &&
796 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
797 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
798 if (is_mt7990(&dev->mt76))
799 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid));
800 tid = MT_TX_ADDBA;
801 } else if (ieee80211_is_mgmt(hdr->frame_control)) {
802 tid = MT_TX_NORMAL;
803 }
804
805 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
806 FIELD_PREP(MT_TXD1_HDR_INFO,
807 ieee80211_get_hdrlen_from_skb(skb) / 2) |
808 FIELD_PREP(MT_TXD1_TID, tid);
809
810 if (!ieee80211_is_data(fc) || multicast ||
811 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
812 val |= MT_TXD1_FIXED_RATE;
813
814 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb)) {
815 val |= MT_TXD1_BIP;
816 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
817 }
818
819 txwi[1] |= cpu_to_le32(val);
820
821 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
822 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
823
824 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
825 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
826
827 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc))
828 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST);
829 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
830 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID);
831 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc))
832 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST);
833 else
834 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE);
835
836 txwi[2] |= cpu_to_le32(val);
837
838 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
839 if (ieee80211_is_beacon(fc)) {
840 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT);
841 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
842 }
843
844 if (multicast && ieee80211_vif_is_mld(info->control.vif)) {
845 val = MT_TXD3_SN_VALID |
846 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
847 txwi[3] |= cpu_to_le32(val);
848 }
849
850 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
851 if (ieee80211_is_back_req(hdr->frame_control)) {
852 struct ieee80211_bar *bar;
853
854 bar = (struct ieee80211_bar *)skb->data;
855 seqno = le16_to_cpu(bar->start_seq_num);
856 }
857
858 val = MT_TXD3_SN_VALID |
859 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
860 txwi[3] |= cpu_to_le32(val);
861 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
862 }
863
864 if (ieee80211_vif_is_mld(info->control.vif) &&
865 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))))
866 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
867
868 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) &&
869 ieee80211_vif_is_mld(info->control.vif)) {
870 txwi[5] |= cpu_to_le32(MT_TXD5_FL);
871 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
872 }
873
874 if (!wcid->sta && ieee80211_is_mgmt(fc))
875 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT);
876 }
877
mt7996_mac_write_txwi(struct mt7996_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)878 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
879 struct sk_buff *skb, struct mt76_wcid *wcid,
880 struct ieee80211_key_conf *key, int pid,
881 enum mt76_txq_id qid, u32 changed)
882 {
883 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
884 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
885 struct ieee80211_vif *vif = info->control.vif;
886 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
887 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
888 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
889 struct mt76_vif_link *mlink = NULL;
890 struct mt7996_vif *mvif;
891 unsigned int link_id;
892 u16 tx_count = 15;
893 u32 val;
894 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
895 BSS_CHANGED_FILS_DISCOVERY));
896 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
897 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc);
898
899 if (wcid != &dev->mt76.global_wcid)
900 link_id = wcid->link_id;
901 else
902 link_id = u32_get_bits(info->control.flags,
903 IEEE80211_TX_CTRL_MLO_LINK);
904
905 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
906 if (mvif)
907 mlink = rcu_dereference(mvif->mt76.link[link_id]);
908
909 if (mlink) {
910 omac_idx = mlink->omac_idx;
911 wmm_idx = mlink->wmm_idx;
912 band_idx = mlink->band_idx;
913 }
914
915 if (inband_disc) {
916 p_fmt = MT_TX_TYPE_FW;
917 q_idx = MT_LMAC_ALTX0;
918 } else if (beacon) {
919 p_fmt = MT_TX_TYPE_FW;
920 q_idx = MT_LMAC_BCN0;
921 } else if (qid >= MT_TXQ_PSD) {
922 p_fmt = MT_TX_TYPE_CT;
923 q_idx = MT_LMAC_ALTX0;
924 } else {
925 p_fmt = MT_TX_TYPE_CT;
926 q_idx = wmm_idx * MT7996_MAX_WMM_SETS +
927 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
928 }
929
930 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
931 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
932 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
933 txwi[0] = cpu_to_le32(val);
934
935 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
936 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
937
938 if (band_idx)
939 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
940
941 txwi[1] = cpu_to_le32(val);
942 txwi[2] = 0;
943
944 val = MT_TXD3_SW_POWER_MGMT |
945 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
946 if (key)
947 val |= MT_TXD3_PROTECT_FRAME;
948 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
949 val |= MT_TXD3_NO_ACK;
950
951 txwi[3] = cpu_to_le32(val);
952 txwi[4] = 0;
953
954 val = FIELD_PREP(MT_TXD5_PID, pid);
955 if (pid >= MT_PACKET_ID_FIRST)
956 val |= MT_TXD5_TX_STATUS_HOST;
957 txwi[5] = cpu_to_le32(val);
958
959 val = MT_TXD6_DAS;
960 if (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
961 val |= MT_TXD6_DIS_MAT;
962
963 if (is_mt7996(&dev->mt76))
964 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
965 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control))
966 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1);
967
968 txwi[6] = cpu_to_le32(val);
969 txwi[7] = 0;
970
971 if (is_8023)
972 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid);
973 else
974 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid);
975
976 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
977 bool mcast = ieee80211_is_data(hdr->frame_control) &&
978 is_multicast_ether_addr(hdr->addr1);
979 u8 idx = MT7996_BASIC_RATES_TBL;
980
981 if (mlink) {
982 if (mcast && mlink->mcast_rates_idx)
983 idx = mlink->mcast_rates_idx;
984 else if (beacon && mlink->beacon_rates_idx)
985 idx = mlink->beacon_rates_idx;
986 else
987 idx = mlink->basic_rates_idx;
988 }
989
990 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW;
991 if (mcast)
992 val |= MT_TXD6_DIS_MAT;
993 txwi[6] |= cpu_to_le32(val);
994 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
995 }
996 }
997
998 static bool
mt7996_tx_use_mgmt(struct mt7996_dev * dev,struct sk_buff * skb)999 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb)
1000 {
1001 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1002
1003 if (ieee80211_is_mgmt(hdr->frame_control))
1004 return true;
1005
1006 /* for SDO to bypass specific data frame */
1007 if (!mt7996_has_wa(dev)) {
1008 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
1009 return true;
1010
1011 if (ieee80211_has_a4(hdr->frame_control) &&
1012 !ieee80211_is_data_present(hdr->frame_control))
1013 return true;
1014 }
1015
1016 return false;
1017 }
1018
mt7996_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)1019 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1020 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1021 struct ieee80211_sta *sta,
1022 struct mt76_tx_info *tx_info)
1023 {
1024 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1025 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1026 struct ieee80211_key_conf *key = info->control.hw_key;
1027 struct ieee80211_vif *vif = info->control.vif;
1028 struct mt76_connac_txp_common *txp;
1029 struct mt76_txwi_cache *t;
1030 int id, i, pid, nbuf = tx_info->nbuf - 1;
1031 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1032 u8 *txwi = (u8 *)txwi_ptr;
1033
1034 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1035 return -EINVAL;
1036
1037 if (!wcid)
1038 wcid = &dev->mt76.global_wcid;
1039
1040 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
1041 t->skb = tx_info->skb;
1042
1043 id = mt76_token_consume(mdev, &t);
1044 if (id < 0)
1045 return id;
1046
1047 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
1048 memset(txwi_ptr, 0, MT_TXD_SIZE);
1049 /* Transmit non qos data by 802.11 header and need to fill txd by host*/
1050 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1051 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
1052 pid, qid, 0);
1053
1054 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
1055 for (i = 0; i < nbuf; i++) {
1056 u16 len;
1057
1058 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len);
1059 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1060 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H,
1061 tx_info->buf[i + 1].addr >> 32);
1062 #endif
1063
1064 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
1065 txp->fw.len[i] = cpu_to_le16(len);
1066 }
1067 txp->fw.nbuf = nbuf;
1068
1069 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
1070
1071 if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
1072 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
1073
1074 if (!key)
1075 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
1076
1077 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb))
1078 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
1079
1080 if (vif) {
1081 struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
1082 struct mt76_vif_link *mlink = NULL;
1083
1084 if (wcid->offchannel)
1085 mlink = rcu_dereference(mvif->mt76.offchannel_link);
1086 if (!mlink)
1087 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]);
1088
1089 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx;
1090 }
1091
1092 txp->fw.token = cpu_to_le16(id);
1093 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff);
1094
1095 tx_info->skb = NULL;
1096
1097 /* pass partial skb header to fw */
1098 tx_info->buf[1].len = MT_CT_PARSE_LEN;
1099 tx_info->buf[1].skip_unmap = true;
1100 tx_info->nbuf = MT_CT_DMA_BUF_NUM;
1101
1102 return 0;
1103 }
1104
mt7996_wed_init_buf(void * ptr,dma_addr_t phys,int token_id)1105 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
1106 {
1107 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
1108 __le32 *txwi = ptr;
1109 u32 val;
1110
1111 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
1112
1113 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
1114 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
1115 txwi[0] = cpu_to_le32(val);
1116
1117 val = BIT(31) |
1118 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
1119 txwi[1] = cpu_to_le32(val);
1120
1121 txp->token = cpu_to_le16(token_id);
1122 txp->nbuf = 1;
1123 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
1124
1125 return MT_TXD_SIZE + sizeof(*txp);
1126 }
1127
1128 static void
mt7996_tx_check_aggr(struct ieee80211_link_sta * link_sta,struct mt76_wcid * wcid,struct sk_buff * skb)1129 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta,
1130 struct mt76_wcid *wcid, struct sk_buff *skb)
1131 {
1132 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1133 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
1134 u16 fc, tid;
1135
1136 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
1137 return;
1138
1139 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
1140 if (tid >= 6) /* skip VO queue */
1141 return;
1142
1143 if (is_8023) {
1144 fc = IEEE80211_FTYPE_DATA |
1145 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA
1146 : IEEE80211_STYPE_DATA);
1147 } else {
1148 /* No need to get precise TID for Action/Management Frame,
1149 * since it will not meet the following Frame Control
1150 * condition anyway.
1151 */
1152
1153 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1154
1155 fc = le16_to_cpu(hdr->frame_control) &
1156 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
1157 }
1158
1159 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
1160 return;
1161
1162 if (!test_and_set_bit(tid, &wcid->ampdu_state))
1163 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0);
1164 }
1165
1166 static void
mt7996_txwi_free(struct mt7996_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_link_sta * link_sta,struct mt76_wcid * wcid,struct list_head * free_list)1167 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
1168 struct ieee80211_link_sta *link_sta,
1169 struct mt76_wcid *wcid, struct list_head *free_list)
1170 {
1171 struct mt76_dev *mdev = &dev->mt76;
1172 __le32 *txwi;
1173 u16 wcid_idx;
1174
1175 mt76_connac_txp_skb_unmap(mdev, t);
1176 if (!t->skb)
1177 goto out;
1178
1179 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1180 if (link_sta) {
1181 wcid_idx = wcid->idx;
1182 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1183 mt7996_tx_check_aggr(link_sta, wcid, t->skb);
1184 } else {
1185 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX);
1186 }
1187
1188 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1189
1190 out:
1191 t->skb = NULL;
1192 mt76_put_txwi(mdev, t);
1193 }
1194
1195 static void
mt7996_mac_tx_free(struct mt7996_dev * dev,void * data,int len)1196 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len)
1197 {
1198 __le32 *tx_free = (__le32 *)data, *cur_info;
1199 struct mt76_dev *mdev = &dev->mt76;
1200 struct mt76_phy *phy2 = mdev->phys[MT_BAND1];
1201 struct mt76_phy *phy3 = mdev->phys[MT_BAND2];
1202 struct ieee80211_link_sta *link_sta = NULL;
1203 struct mt76_txwi_cache *txwi;
1204 struct mt76_wcid *wcid = NULL;
1205 LIST_HEAD(free_list);
1206 struct sk_buff *skb, *tmp;
1207 void *end = data + len;
1208 bool wake = false;
1209 u16 total, count = 0;
1210 u8 ver;
1211
1212 /* clean DMA queues and unmap buffers first */
1213 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1214 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1215 if (phy2) {
1216 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false);
1217 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false);
1218 }
1219 if (phy3) {
1220 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false);
1221 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false);
1222 }
1223
1224 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER);
1225 if (WARN_ON_ONCE(ver < 5))
1226 return;
1227
1228 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1229 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1230 u32 msdu, info;
1231 u8 i;
1232
1233 if (WARN_ON_ONCE((void *)cur_info >= end))
1234 return;
1235 /* 1'b1: new wcid pair.
1236 * 1'b0: msdu_id with the same 'wcid pair' as above.
1237 */
1238 info = le32_to_cpu(*cur_info);
1239 if (info & MT_TXFREE_INFO_PAIR) {
1240 struct ieee80211_sta *sta;
1241 u16 idx;
1242
1243 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1244 wcid = mt76_wcid_ptr(dev, idx);
1245 sta = wcid_to_sta(wcid);
1246 if (!sta)
1247 goto next;
1248
1249 link_sta = rcu_dereference(sta->link[wcid->link_id]);
1250 if (!link_sta)
1251 goto next;
1252
1253 mt76_wcid_add_poll(&dev->mt76, wcid);
1254 next:
1255 /* ver 7 has a new DW with pair = 1, skip it */
1256 if (ver == 7 && ((void *)(cur_info + 1) < end) &&
1257 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR))
1258 cur_info++;
1259 continue;
1260 } else if (info & MT_TXFREE_INFO_HEADER) {
1261 u32 tx_retries = 0, tx_failed = 0;
1262
1263 if (!wcid)
1264 continue;
1265
1266 tx_retries =
1267 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1268 tx_failed = tx_retries +
1269 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1270
1271 wcid->stats.tx_retries += tx_retries;
1272 wcid->stats.tx_failed += tx_failed;
1273 continue;
1274 }
1275
1276 for (i = 0; i < 2; i++) {
1277 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1278 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1279 continue;
1280
1281 count++;
1282 txwi = mt76_token_release(mdev, msdu, &wake);
1283 if (!txwi)
1284 continue;
1285
1286 mt7996_txwi_free(dev, txwi, link_sta, wcid,
1287 &free_list);
1288 }
1289 }
1290
1291 mt7996_mac_sta_poll(dev);
1292
1293 if (wake)
1294 mt76_set_tx_blocked(&dev->mt76, false);
1295
1296 mt76_worker_schedule(&dev->mt76.tx_worker);
1297
1298 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1299 skb_list_del_init(skb);
1300 napi_consume_skb(skb, 1);
1301 }
1302 }
1303
1304 static bool
mt7996_mac_add_txs_skb(struct mt7996_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)1305 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid,
1306 int pid, __le32 *txs_data)
1307 {
1308 struct mt76_sta_stats *stats = &wcid->stats;
1309 struct ieee80211_supported_band *sband;
1310 struct mt76_dev *mdev = &dev->mt76;
1311 struct mt76_phy *mphy;
1312 struct ieee80211_tx_info *info;
1313 struct sk_buff_head list;
1314 struct rate_info rate = {};
1315 struct sk_buff *skb = NULL;
1316 bool cck = false;
1317 u32 txrate, txs, mode, stbc;
1318
1319 txs = le32_to_cpu(txs_data[0]);
1320
1321 mt76_tx_status_lock(mdev, &list);
1322
1323 /* only report MPDU TXS */
1324 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) {
1325 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
1326 if (skb) {
1327 info = IEEE80211_SKB_CB(skb);
1328 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
1329 info->flags |= IEEE80211_TX_STAT_ACK;
1330
1331 info->status.ampdu_len = 1;
1332 info->status.ampdu_ack_len =
1333 !!(info->flags & IEEE80211_TX_STAT_ACK);
1334
1335 info->status.rates[0].idx = -1;
1336 }
1337 }
1338
1339 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) {
1340 struct ieee80211_sta *sta;
1341 u8 tid;
1342
1343 sta = wcid_to_sta(wcid);
1344 tid = FIELD_GET(MT_TXS0_TID, txs);
1345 ieee80211_refresh_tx_agg_session_timer(sta, tid);
1346 }
1347
1348 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
1349
1350 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
1351 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
1352 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
1353
1354 if (stbc && rate.nss > 1)
1355 rate.nss >>= 1;
1356
1357 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
1358 stats->tx_nss[rate.nss - 1]++;
1359 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
1360 stats->tx_mcs[rate.mcs]++;
1361
1362 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
1363 switch (mode) {
1364 case MT_PHY_TYPE_CCK:
1365 cck = true;
1366 fallthrough;
1367 case MT_PHY_TYPE_OFDM:
1368 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
1369
1370 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
1371 sband = &mphy->sband_5g.sband;
1372 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
1373 sband = &mphy->sband_6g.sband;
1374 else
1375 sband = &mphy->sband_2g.sband;
1376
1377 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
1378 rate.legacy = sband->bitrates[rate.mcs].bitrate;
1379 break;
1380 case MT_PHY_TYPE_HT:
1381 case MT_PHY_TYPE_HT_GF:
1382 if (rate.mcs > 31)
1383 goto out;
1384
1385 rate.flags = RATE_INFO_FLAGS_MCS;
1386 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1387 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1388 break;
1389 case MT_PHY_TYPE_VHT:
1390 if (rate.mcs > 9)
1391 goto out;
1392
1393 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
1394 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
1395 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1396 break;
1397 case MT_PHY_TYPE_HE_SU:
1398 case MT_PHY_TYPE_HE_EXT_SU:
1399 case MT_PHY_TYPE_HE_TB:
1400 case MT_PHY_TYPE_HE_MU:
1401 if (rate.mcs > 11)
1402 goto out;
1403
1404 rate.he_gi = wcid->rate.he_gi;
1405 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
1406 rate.flags = RATE_INFO_FLAGS_HE_MCS;
1407 break;
1408 case MT_PHY_TYPE_EHT_SU:
1409 case MT_PHY_TYPE_EHT_TRIG:
1410 case MT_PHY_TYPE_EHT_MU:
1411 if (rate.mcs > 13)
1412 goto out;
1413
1414 rate.eht_gi = wcid->rate.eht_gi;
1415 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
1416 break;
1417 default:
1418 goto out;
1419 }
1420
1421 stats->tx_mode[mode]++;
1422
1423 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1424 case IEEE80211_STA_RX_BW_320:
1425 rate.bw = RATE_INFO_BW_320;
1426 stats->tx_bw[4]++;
1427 break;
1428 case IEEE80211_STA_RX_BW_160:
1429 rate.bw = RATE_INFO_BW_160;
1430 stats->tx_bw[3]++;
1431 break;
1432 case IEEE80211_STA_RX_BW_80:
1433 rate.bw = RATE_INFO_BW_80;
1434 stats->tx_bw[2]++;
1435 break;
1436 case IEEE80211_STA_RX_BW_40:
1437 rate.bw = RATE_INFO_BW_40;
1438 stats->tx_bw[1]++;
1439 break;
1440 default:
1441 rate.bw = RATE_INFO_BW_20;
1442 stats->tx_bw[0]++;
1443 break;
1444 }
1445 wcid->rate = rate;
1446
1447 out:
1448 if (skb)
1449 mt76_tx_status_skb_done(mdev, skb, &list);
1450 mt76_tx_status_unlock(mdev, &list);
1451
1452 return !!skb;
1453 }
1454
mt7996_mac_add_txs(struct mt7996_dev * dev,void * data)1455 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
1456 {
1457 struct mt7996_sta_link *msta_link;
1458 struct mt76_wcid *wcid;
1459 __le32 *txs_data = data;
1460 u16 wcidx;
1461 u8 pid;
1462
1463 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1464 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1465
1466 if (pid < MT_PACKET_ID_NO_SKB)
1467 return;
1468
1469 rcu_read_lock();
1470
1471 wcid = mt76_wcid_ptr(dev, wcidx);
1472 if (!wcid)
1473 goto out;
1474
1475 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data);
1476
1477 if (!wcid->sta)
1478 goto out;
1479
1480 msta_link = container_of(wcid, struct mt7996_sta_link, wcid);
1481 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid);
1482
1483 out:
1484 rcu_read_unlock();
1485 }
1486
mt7996_rx_check(struct mt76_dev * mdev,void * data,int len)1487 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len)
1488 {
1489 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1490 __le32 *rxd = (__le32 *)data;
1491 __le32 *end = (__le32 *)&rxd[len / 4];
1492 enum rx_pkt_type type;
1493
1494 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1495 if (type != PKT_TYPE_NORMAL) {
1496 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1497
1498 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1499 MT_RXD0_SW_PKT_TYPE_FRAME))
1500 return true;
1501 }
1502
1503 switch (type) {
1504 case PKT_TYPE_TXRX_NOTIFY:
1505 mt7996_mac_tx_free(dev, data, len);
1506 return false;
1507 case PKT_TYPE_TXS:
1508 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1509 mt7996_mac_add_txs(dev, rxd);
1510 return false;
1511 case PKT_TYPE_RX_FW_MONITOR:
1512 mt7996_debugfs_rx_fw_monitor(dev, data, len);
1513 return false;
1514 default:
1515 return true;
1516 }
1517 }
1518
mt7996_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1519 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1520 struct sk_buff *skb, u32 *info)
1521 {
1522 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76);
1523 __le32 *rxd = (__le32 *)skb->data;
1524 __le32 *end = (__le32 *)&skb->data[skb->len];
1525 enum rx_pkt_type type;
1526
1527 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1528 if (type != PKT_TYPE_NORMAL) {
1529 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1530
1531 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1532 MT_RXD0_SW_PKT_TYPE_FRAME))
1533 type = PKT_TYPE_NORMAL;
1534 }
1535
1536 switch (type) {
1537 case PKT_TYPE_TXRX_NOTIFY:
1538 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) &&
1539 q == MT_RXQ_TXFREE_BAND2) {
1540 dev_kfree_skb(skb);
1541 break;
1542 }
1543
1544 mt7996_mac_tx_free(dev, skb->data, skb->len);
1545 napi_consume_skb(skb, 1);
1546 break;
1547 case PKT_TYPE_RX_EVENT:
1548 mt7996_mcu_rx_event(dev, skb);
1549 break;
1550 case PKT_TYPE_TXS:
1551 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE)
1552 mt7996_mac_add_txs(dev, rxd);
1553 dev_kfree_skb(skb);
1554 break;
1555 case PKT_TYPE_RX_FW_MONITOR:
1556 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
1557 dev_kfree_skb(skb);
1558 break;
1559 case PKT_TYPE_NORMAL:
1560 if (!mt7996_mac_fill_rx(dev, q, skb, info)) {
1561 mt76_rx(&dev->mt76, q, skb);
1562 return;
1563 }
1564 fallthrough;
1565 default:
1566 dev_kfree_skb(skb);
1567 break;
1568 }
1569 }
1570
mt7996_mac_cca_stats_reset(struct mt7996_phy * phy)1571 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
1572 {
1573 struct mt7996_dev *dev = phy->dev;
1574 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx);
1575
1576 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN);
1577 mt76_set(dev, reg, BIT(11) | BIT(9));
1578 }
1579
mt7996_mac_reset_counters(struct mt7996_phy * phy)1580 void mt7996_mac_reset_counters(struct mt7996_phy *phy)
1581 {
1582 struct mt7996_dev *dev = phy->dev;
1583 u8 band_idx = phy->mt76->band_idx;
1584 int i;
1585
1586 for (i = 0; i < 16; i++)
1587 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
1588
1589 phy->mt76->survey_time = ktime_get_boottime();
1590
1591 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
1592
1593 /* reset airtime counters */
1594 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx),
1595 MT_WF_RMAC_MIB_RXTIME_CLR);
1596
1597 mt7996_mcu_get_chan_mib_info(phy, true);
1598 }
1599
mt7996_mac_set_coverage_class(struct mt7996_phy * phy)1600 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy)
1601 {
1602 s16 coverage_class = phy->coverage_class;
1603 struct mt7996_dev *dev = phy->dev;
1604 struct mt7996_phy *phy2 = mt7996_phy2(dev);
1605 struct mt7996_phy *phy3 = mt7996_phy3(dev);
1606 u32 reg_offset;
1607 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
1608 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
1609 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
1610 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
1611 u8 band_idx = phy->mt76->band_idx;
1612 int offset;
1613
1614 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
1615 return;
1616
1617 if (phy2)
1618 coverage_class = max_t(s16, dev->phy.coverage_class,
1619 phy2->coverage_class);
1620
1621 if (phy3)
1622 coverage_class = max_t(s16, coverage_class,
1623 phy3->coverage_class);
1624
1625 offset = 3 * coverage_class;
1626 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
1627 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
1628
1629 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset);
1630 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset);
1631 }
1632
mt7996_mac_enable_nf(struct mt7996_dev * dev,u8 band)1633 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band)
1634 {
1635 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band),
1636 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY |
1637 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR);
1638
1639 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band),
1640 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5));
1641 }
1642
1643 static u8
mt7996_phy_get_nf(struct mt7996_phy * phy,u8 band_idx)1644 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx)
1645 {
1646 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
1647 struct mt7996_dev *dev = phy->dev;
1648 u32 val, sum = 0, n = 0;
1649 int ant, i;
1650
1651 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) {
1652 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant);
1653
1654 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
1655 val = mt76_rr(dev, reg);
1656 sum += val * nf_power[i];
1657 n += val;
1658 }
1659 }
1660
1661 return n ? sum / n : 0;
1662 }
1663
mt7996_update_channel(struct mt76_phy * mphy)1664 void mt7996_update_channel(struct mt76_phy *mphy)
1665 {
1666 struct mt7996_phy *phy = mphy->priv;
1667 struct mt76_channel_state *state = mphy->chan_state;
1668 int nf;
1669
1670 mt7996_mcu_get_chan_mib_info(phy, false);
1671
1672 nf = mt7996_phy_get_nf(phy, mphy->band_idx);
1673 if (!phy->noise)
1674 phy->noise = nf << 4;
1675 else if (nf)
1676 phy->noise += nf - (phy->noise >> 4);
1677
1678 state->noise = -(phy->noise >> 4);
1679 }
1680
1681 static bool
mt7996_wait_reset_state(struct mt7996_dev * dev,u32 state)1682 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state)
1683 {
1684 bool ret;
1685
1686 ret = wait_event_timeout(dev->reset_wait,
1687 (READ_ONCE(dev->recovery.state) & state),
1688 MT7996_RESET_TIMEOUT);
1689
1690 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
1691 return ret;
1692 }
1693
1694 static void
mt7996_update_vif_beacon(void * priv,u8 * mac,struct ieee80211_vif * vif)1695 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
1696 {
1697 struct ieee80211_hw *hw = priv;
1698
1699 switch (vif->type) {
1700 case NL80211_IFTYPE_MESH_POINT:
1701 case NL80211_IFTYPE_ADHOC:
1702 case NL80211_IFTYPE_AP:
1703 mt7996_mcu_add_beacon(hw, vif, &vif->bss_conf);
1704 break;
1705 default:
1706 break;
1707 }
1708 }
1709
1710 static void
mt7996_update_beacons(struct mt7996_dev * dev)1711 mt7996_update_beacons(struct mt7996_dev *dev)
1712 {
1713 struct mt76_phy *phy2, *phy3;
1714
1715 ieee80211_iterate_active_interfaces(dev->mt76.hw,
1716 IEEE80211_IFACE_ITER_RESUME_ALL,
1717 mt7996_update_vif_beacon, dev->mt76.hw);
1718
1719 phy2 = dev->mt76.phys[MT_BAND1];
1720 if (!phy2)
1721 return;
1722
1723 ieee80211_iterate_active_interfaces(phy2->hw,
1724 IEEE80211_IFACE_ITER_RESUME_ALL,
1725 mt7996_update_vif_beacon, phy2->hw);
1726
1727 phy3 = dev->mt76.phys[MT_BAND2];
1728 if (!phy3)
1729 return;
1730
1731 ieee80211_iterate_active_interfaces(phy3->hw,
1732 IEEE80211_IFACE_ITER_RESUME_ALL,
1733 mt7996_update_vif_beacon, phy3->hw);
1734 }
1735
mt7996_tx_token_put(struct mt7996_dev * dev)1736 void mt7996_tx_token_put(struct mt7996_dev *dev)
1737 {
1738 struct mt76_txwi_cache *txwi;
1739 int id;
1740
1741 spin_lock_bh(&dev->mt76.token_lock);
1742 idr_for_each_entry(&dev->mt76.token, txwi, id) {
1743 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL);
1744 dev->mt76.token_count--;
1745 }
1746 spin_unlock_bh(&dev->mt76.token_lock);
1747 idr_destroy(&dev->mt76.token);
1748 }
1749
1750 static int
mt7996_mac_restart(struct mt7996_dev * dev)1751 mt7996_mac_restart(struct mt7996_dev *dev)
1752 {
1753 struct mt7996_phy *phy2, *phy3;
1754 struct mt76_dev *mdev = &dev->mt76;
1755 int i, ret;
1756
1757 phy2 = mt7996_phy2(dev);
1758 phy3 = mt7996_phy3(dev);
1759
1760 if (dev->hif2) {
1761 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
1762 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1763 }
1764
1765 if (dev_is_pci(mdev->dev)) {
1766 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
1767 if (dev->hif2)
1768 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
1769 }
1770
1771 set_bit(MT76_RESET, &dev->mphy.state);
1772 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1773 wake_up(&dev->mt76.mcu.wait);
1774 if (phy2)
1775 set_bit(MT76_RESET, &phy2->mt76->state);
1776 if (phy3)
1777 set_bit(MT76_RESET, &phy3->mt76->state);
1778
1779 /* lock/unlock all queues to ensure that no tx is pending */
1780 mt76_txq_schedule_all(&dev->mphy);
1781 if (phy2)
1782 mt76_txq_schedule_all(phy2->mt76);
1783 if (phy3)
1784 mt76_txq_schedule_all(phy3->mt76);
1785
1786 /* disable all tx/rx napi */
1787 mt76_worker_disable(&dev->mt76.tx_worker);
1788 mt76_for_each_q_rx(mdev, i) {
1789 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1790 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1791 continue;
1792
1793 if (mdev->q_rx[i].ndesc)
1794 napi_disable(&dev->mt76.napi[i]);
1795 }
1796 napi_disable(&dev->mt76.tx_napi);
1797
1798 /* token reinit */
1799 mt7996_tx_token_put(dev);
1800 idr_init(&dev->mt76.token);
1801
1802 mt7996_dma_reset(dev, true);
1803
1804 mt76_for_each_q_rx(mdev, i) {
1805 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
1806 mt76_queue_is_wed_rro(&mdev->q_rx[i]))
1807 continue;
1808
1809 if (mdev->q_rx[i].ndesc) {
1810 napi_enable(&dev->mt76.napi[i]);
1811 local_bh_disable();
1812 napi_schedule(&dev->mt76.napi[i]);
1813 local_bh_enable();
1814 }
1815 }
1816 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
1817 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
1818
1819 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
1820 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
1821 if (dev->hif2) {
1822 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
1823 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
1824 }
1825 if (dev_is_pci(mdev->dev)) {
1826 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
1827 if (dev->hif2)
1828 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
1829 }
1830
1831 /* load firmware */
1832 ret = mt7996_mcu_init_firmware(dev);
1833 if (ret)
1834 goto out;
1835
1836 /* set the necessary init items */
1837 ret = mt7996_mcu_set_eeprom(dev);
1838 if (ret)
1839 goto out;
1840
1841 mt7996_mac_init(dev);
1842 mt7996_init_txpower(&dev->phy);
1843 mt7996_init_txpower(phy2);
1844 mt7996_init_txpower(phy3);
1845 ret = mt7996_txbf_init(dev);
1846
1847 if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
1848 ret = mt7996_run(&dev->phy);
1849 if (ret)
1850 goto out;
1851 }
1852
1853 if (phy2 && test_bit(MT76_STATE_RUNNING, &phy2->mt76->state)) {
1854 ret = mt7996_run(phy2);
1855 if (ret)
1856 goto out;
1857 }
1858
1859 if (phy3 && test_bit(MT76_STATE_RUNNING, &phy3->mt76->state)) {
1860 ret = mt7996_run(phy3);
1861 if (ret)
1862 goto out;
1863 }
1864
1865 out:
1866 /* reset done */
1867 clear_bit(MT76_RESET, &dev->mphy.state);
1868 if (phy2)
1869 clear_bit(MT76_RESET, &phy2->mt76->state);
1870 if (phy3)
1871 clear_bit(MT76_RESET, &phy3->mt76->state);
1872
1873 napi_enable(&dev->mt76.tx_napi);
1874 local_bh_disable();
1875 napi_schedule(&dev->mt76.tx_napi);
1876 local_bh_enable();
1877
1878 mt76_worker_enable(&dev->mt76.tx_worker);
1879 return ret;
1880 }
1881
1882 static void
mt7996_mac_full_reset(struct mt7996_dev * dev)1883 mt7996_mac_full_reset(struct mt7996_dev *dev)
1884 {
1885 struct mt7996_phy *phy2, *phy3;
1886 int i;
1887
1888 phy2 = mt7996_phy2(dev);
1889 phy3 = mt7996_phy3(dev);
1890 dev->recovery.hw_full_reset = true;
1891
1892 wake_up(&dev->mt76.mcu.wait);
1893 ieee80211_stop_queues(mt76_hw(dev));
1894 if (phy2)
1895 ieee80211_stop_queues(phy2->mt76->hw);
1896 if (phy3)
1897 ieee80211_stop_queues(phy3->mt76->hw);
1898
1899 cancel_work_sync(&dev->wed_rro.work);
1900 cancel_delayed_work_sync(&dev->mphy.mac_work);
1901 if (phy2)
1902 cancel_delayed_work_sync(&phy2->mt76->mac_work);
1903 if (phy3)
1904 cancel_delayed_work_sync(&phy3->mt76->mac_work);
1905
1906 mutex_lock(&dev->mt76.mutex);
1907 for (i = 0; i < 10; i++) {
1908 if (!mt7996_mac_restart(dev))
1909 break;
1910 }
1911 mutex_unlock(&dev->mt76.mutex);
1912
1913 if (i == 10)
1914 dev_err(dev->mt76.dev, "chip full reset failed\n");
1915
1916 ieee80211_restart_hw(mt76_hw(dev));
1917 if (phy2)
1918 ieee80211_restart_hw(phy2->mt76->hw);
1919 if (phy3)
1920 ieee80211_restart_hw(phy3->mt76->hw);
1921
1922 ieee80211_wake_queues(mt76_hw(dev));
1923 if (phy2)
1924 ieee80211_wake_queues(phy2->mt76->hw);
1925 if (phy3)
1926 ieee80211_wake_queues(phy3->mt76->hw);
1927
1928 dev->recovery.hw_full_reset = false;
1929 ieee80211_queue_delayed_work(mt76_hw(dev),
1930 &dev->mphy.mac_work,
1931 MT7996_WATCHDOG_TIME);
1932 if (phy2)
1933 ieee80211_queue_delayed_work(phy2->mt76->hw,
1934 &phy2->mt76->mac_work,
1935 MT7996_WATCHDOG_TIME);
1936 if (phy3)
1937 ieee80211_queue_delayed_work(phy3->mt76->hw,
1938 &phy3->mt76->mac_work,
1939 MT7996_WATCHDOG_TIME);
1940 }
1941
mt7996_mac_reset_work(struct work_struct * work)1942 void mt7996_mac_reset_work(struct work_struct *work)
1943 {
1944 struct mt7996_phy *phy2, *phy3;
1945 struct mt7996_dev *dev;
1946 int i;
1947
1948 dev = container_of(work, struct mt7996_dev, reset_work);
1949 phy2 = mt7996_phy2(dev);
1950 phy3 = mt7996_phy3(dev);
1951
1952 /* chip full reset */
1953 if (dev->recovery.restart) {
1954 /* disable WA/WM WDT */
1955 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
1956 MT_MCU_CMD_WDT_MASK);
1957
1958 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
1959 dev->recovery.wa_reset_count++;
1960 else
1961 dev->recovery.wm_reset_count++;
1962
1963 mt7996_mac_full_reset(dev);
1964
1965 /* enable mcu irq */
1966 mt7996_irq_enable(dev, MT_INT_MCU_CMD);
1967 mt7996_irq_disable(dev, 0);
1968
1969 /* enable WA/WM WDT */
1970 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
1971
1972 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
1973 dev->recovery.restart = false;
1974 return;
1975 }
1976
1977 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
1978 return;
1979
1980 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.",
1981 wiphy_name(dev->mt76.hw->wiphy));
1982
1983 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2))
1984 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2);
1985
1986 if (mtk_wed_device_active(&dev->mt76.mmio.wed))
1987 mtk_wed_device_stop(&dev->mt76.mmio.wed);
1988
1989 ieee80211_stop_queues(mt76_hw(dev));
1990 if (phy2)
1991 ieee80211_stop_queues(phy2->mt76->hw);
1992 if (phy3)
1993 ieee80211_stop_queues(phy3->mt76->hw);
1994
1995 set_bit(MT76_RESET, &dev->mphy.state);
1996 set_bit(MT76_MCU_RESET, &dev->mphy.state);
1997 wake_up(&dev->mt76.mcu.wait);
1998
1999 cancel_work_sync(&dev->wed_rro.work);
2000 cancel_delayed_work_sync(&dev->mphy.mac_work);
2001 if (phy2) {
2002 set_bit(MT76_RESET, &phy2->mt76->state);
2003 cancel_delayed_work_sync(&phy2->mt76->mac_work);
2004 }
2005 if (phy3) {
2006 set_bit(MT76_RESET, &phy3->mt76->state);
2007 cancel_delayed_work_sync(&phy3->mt76->mac_work);
2008 }
2009 mt76_worker_disable(&dev->mt76.tx_worker);
2010 mt76_for_each_q_rx(&dev->mt76, i) {
2011 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2012 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2013 continue;
2014
2015 napi_disable(&dev->mt76.napi[i]);
2016 }
2017 napi_disable(&dev->mt76.tx_napi);
2018
2019 mutex_lock(&dev->mt76.mutex);
2020
2021 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
2022
2023 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
2024 mt7996_dma_reset(dev, false);
2025
2026 mt7996_tx_token_put(dev);
2027 idr_init(&dev->mt76.token);
2028
2029 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
2030 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
2031 }
2032
2033 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
2034 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
2035
2036 /* enable DMA Tx/Tx and interrupt */
2037 mt7996_dma_start(dev, false, false);
2038
2039 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
2040 u32 wed_irq_mask = MT_INT_RRO_RX_DONE | MT_INT_TX_DONE_BAND2 |
2041 dev->mt76.mmio.irqmask;
2042
2043 if (mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
2044 wed_irq_mask &= ~MT_INT_RX_DONE_RRO_IND;
2045
2046 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
2047
2048 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask,
2049 true);
2050 mt7996_irq_enable(dev, wed_irq_mask);
2051 mt7996_irq_disable(dev, 0);
2052 }
2053
2054 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) {
2055 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT);
2056 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2,
2057 MT_INT_TX_RX_DONE_EXT);
2058 }
2059
2060 clear_bit(MT76_MCU_RESET, &dev->mphy.state);
2061 clear_bit(MT76_RESET, &dev->mphy.state);
2062 if (phy2)
2063 clear_bit(MT76_RESET, &phy2->mt76->state);
2064 if (phy3)
2065 clear_bit(MT76_RESET, &phy3->mt76->state);
2066
2067 mt76_for_each_q_rx(&dev->mt76, i) {
2068 if (mtk_wed_device_active(&dev->mt76.mmio.wed) &&
2069 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]))
2070 continue;
2071
2072 napi_enable(&dev->mt76.napi[i]);
2073 local_bh_disable();
2074 napi_schedule(&dev->mt76.napi[i]);
2075 local_bh_enable();
2076 }
2077
2078 tasklet_schedule(&dev->mt76.irq_tasklet);
2079
2080 mt76_worker_enable(&dev->mt76.tx_worker);
2081
2082 napi_enable(&dev->mt76.tx_napi);
2083 local_bh_disable();
2084 napi_schedule(&dev->mt76.tx_napi);
2085 local_bh_enable();
2086
2087 ieee80211_wake_queues(mt76_hw(dev));
2088 if (phy2)
2089 ieee80211_wake_queues(phy2->mt76->hw);
2090 if (phy3)
2091 ieee80211_wake_queues(phy3->mt76->hw);
2092
2093 mutex_unlock(&dev->mt76.mutex);
2094
2095 mt7996_update_beacons(dev);
2096
2097 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
2098 MT7996_WATCHDOG_TIME);
2099 if (phy2)
2100 ieee80211_queue_delayed_work(phy2->mt76->hw,
2101 &phy2->mt76->mac_work,
2102 MT7996_WATCHDOG_TIME);
2103 if (phy3)
2104 ieee80211_queue_delayed_work(phy3->mt76->hw,
2105 &phy3->mt76->mac_work,
2106 MT7996_WATCHDOG_TIME);
2107 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.",
2108 wiphy_name(dev->mt76.hw->wiphy));
2109 }
2110
2111 /* firmware coredump */
mt7996_mac_dump_work(struct work_struct * work)2112 void mt7996_mac_dump_work(struct work_struct *work)
2113 {
2114 const struct mt7996_mem_region *mem_region;
2115 struct mt7996_crash_data *crash_data;
2116 struct mt7996_dev *dev;
2117 struct mt7996_mem_hdr *hdr;
2118 size_t buf_len;
2119 int i;
2120 u32 num;
2121 u8 *buf;
2122
2123 dev = container_of(work, struct mt7996_dev, dump_work);
2124
2125 mutex_lock(&dev->dump_mutex);
2126
2127 crash_data = mt7996_coredump_new(dev);
2128 if (!crash_data) {
2129 mutex_unlock(&dev->dump_mutex);
2130 goto skip_coredump;
2131 }
2132
2133 mem_region = mt7996_coredump_get_mem_layout(dev, &num);
2134 if (!mem_region || !crash_data->memdump_buf_len) {
2135 mutex_unlock(&dev->dump_mutex);
2136 goto skip_memdump;
2137 }
2138
2139 buf = crash_data->memdump_buf;
2140 buf_len = crash_data->memdump_buf_len;
2141
2142 /* dumping memory content... */
2143 memset(buf, 0, buf_len);
2144 for (i = 0; i < num; i++) {
2145 if (mem_region->len > buf_len) {
2146 dev_warn(dev->mt76.dev, "%s len %zu is too large\n",
2147 mem_region->name, mem_region->len);
2148 break;
2149 }
2150
2151 /* reserve space for the header */
2152 hdr = (void *)buf;
2153 buf += sizeof(*hdr);
2154 buf_len -= sizeof(*hdr);
2155
2156 mt7996_memcpy_fromio(dev, buf, mem_region->start,
2157 mem_region->len);
2158
2159 hdr->start = mem_region->start;
2160 hdr->len = mem_region->len;
2161
2162 if (!mem_region->len)
2163 /* note: the header remains, just with zero length */
2164 break;
2165
2166 buf += mem_region->len;
2167 buf_len -= mem_region->len;
2168
2169 mem_region++;
2170 }
2171
2172 mutex_unlock(&dev->dump_mutex);
2173
2174 skip_memdump:
2175 mt7996_coredump_submit(dev);
2176 skip_coredump:
2177 queue_work(dev->mt76.wq, &dev->reset_work);
2178 }
2179
mt7996_reset(struct mt7996_dev * dev)2180 void mt7996_reset(struct mt7996_dev *dev)
2181 {
2182 if (!dev->recovery.hw_init_done)
2183 return;
2184
2185 if (dev->recovery.hw_full_reset)
2186 return;
2187
2188 /* wm/wa exception: do full recovery */
2189 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
2190 dev->recovery.restart = true;
2191 dev_info(dev->mt76.dev,
2192 "%s indicated firmware crash, attempting recovery\n",
2193 wiphy_name(dev->mt76.hw->wiphy));
2194
2195 mt7996_irq_disable(dev, MT_INT_MCU_CMD);
2196 queue_work(dev->mt76.wq, &dev->dump_work);
2197 return;
2198 }
2199
2200 queue_work(dev->mt76.wq, &dev->reset_work);
2201 wake_up(&dev->reset_wait);
2202 }
2203
mt7996_mac_update_stats(struct mt7996_phy * phy)2204 void mt7996_mac_update_stats(struct mt7996_phy *phy)
2205 {
2206 struct mt76_mib_stats *mib = &phy->mib;
2207 struct mt7996_dev *dev = phy->dev;
2208 u8 band_idx = phy->mt76->band_idx;
2209 u32 cnt;
2210 int i;
2211
2212 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx));
2213 mib->fcs_err_cnt += cnt;
2214
2215 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx));
2216 mib->rx_fifo_full_cnt += cnt;
2217
2218 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx));
2219 mib->rx_mpdu_cnt += cnt;
2220
2221 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx));
2222 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
2223
2224 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx));
2225 mib->rx_vector_mismatch_cnt += cnt;
2226
2227 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx));
2228 mib->rx_delimiter_fail_cnt += cnt;
2229
2230 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx));
2231 mib->rx_len_mismatch_cnt += cnt;
2232
2233 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx));
2234 mib->tx_ampdu_cnt += cnt;
2235
2236 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx));
2237 mib->tx_stop_q_empty_cnt += cnt;
2238
2239 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx));
2240 mib->tx_mpdu_attempts_cnt += cnt;
2241
2242 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx));
2243 mib->tx_mpdu_success_cnt += cnt;
2244
2245 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx));
2246 mib->rx_ampdu_cnt += cnt;
2247
2248 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx));
2249 mib->rx_ampdu_bytes_cnt += cnt;
2250
2251 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx));
2252 mib->rx_ampdu_valid_subframe_cnt += cnt;
2253
2254 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx));
2255 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
2256
2257 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx));
2258 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt);
2259
2260 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx));
2261 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt);
2262
2263 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx));
2264 mib->rx_pfdrop_cnt += cnt;
2265
2266 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx));
2267 mib->rx_vec_queue_overflow_drop_cnt += cnt;
2268
2269 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx));
2270 mib->rx_ba_cnt += cnt;
2271
2272 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx));
2273 mib->tx_bf_ebf_ppdu_cnt += cnt;
2274
2275 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx));
2276 mib->tx_bf_ibf_ppdu_cnt += cnt;
2277
2278 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx));
2279 mib->tx_mu_bf_cnt += cnt;
2280
2281 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx));
2282 mib->tx_mu_mpdu_cnt += cnt;
2283
2284 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx));
2285 mib->tx_mu_acked_mpdu_cnt += cnt;
2286
2287 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx));
2288 mib->tx_su_acked_mpdu_cnt += cnt;
2289
2290 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx));
2291 mib->tx_bf_rx_fb_ht_cnt += cnt;
2292 mib->tx_bf_rx_fb_all_cnt += cnt;
2293
2294 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx));
2295 mib->tx_bf_rx_fb_vht_cnt += cnt;
2296 mib->tx_bf_rx_fb_all_cnt += cnt;
2297
2298 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx));
2299 mib->tx_bf_rx_fb_he_cnt += cnt;
2300 mib->tx_bf_rx_fb_all_cnt += cnt;
2301
2302 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx));
2303 mib->tx_bf_rx_fb_eht_cnt += cnt;
2304 mib->tx_bf_rx_fb_all_cnt += cnt;
2305
2306 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx));
2307 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
2308 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
2309 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
2310
2311 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx));
2312 mib->tx_bf_fb_trig_cnt += cnt;
2313
2314 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx));
2315 mib->tx_bf_fb_cpl_cnt += cnt;
2316
2317 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
2318 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
2319 mib->tx_amsdu[i] += cnt;
2320 mib->tx_amsdu_cnt += cnt;
2321 }
2322
2323 /* rts count */
2324 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx));
2325 mib->rts_cnt += cnt;
2326
2327 /* rts retry count */
2328 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx));
2329 mib->rts_retries_cnt += cnt;
2330
2331 /* ba miss count */
2332 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx));
2333 mib->ba_miss_cnt += cnt;
2334
2335 /* ack fail count */
2336 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx));
2337 mib->ack_fail_cnt += cnt;
2338
2339 for (i = 0; i < 16; i++) {
2340 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i));
2341 phy->mt76->aggr_stats[i] += cnt;
2342 }
2343 }
2344
mt7996_mac_sta_rc_work(struct work_struct * work)2345 void mt7996_mac_sta_rc_work(struct work_struct *work)
2346 {
2347 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work);
2348 struct mt7996_sta_link *msta_link;
2349 struct ieee80211_vif *vif;
2350 struct mt7996_vif *mvif;
2351 LIST_HEAD(list);
2352 u32 changed;
2353
2354 spin_lock_bh(&dev->mt76.sta_poll_lock);
2355 list_splice_init(&dev->sta_rc_list, &list);
2356
2357 while (!list_empty(&list)) {
2358 msta_link = list_first_entry(&list, struct mt7996_sta_link,
2359 rc_list);
2360 list_del_init(&msta_link->rc_list);
2361
2362 changed = msta_link->changed;
2363 msta_link->changed = 0;
2364 mvif = msta_link->sta->vif;
2365 vif = container_of((void *)mvif, struct ieee80211_vif,
2366 drv_priv);
2367
2368 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2369
2370 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
2371 IEEE80211_RC_NSS_CHANGED |
2372 IEEE80211_RC_BW_CHANGED))
2373 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif,
2374 msta_link->wcid.link_id,
2375 true);
2376
2377 if (changed & IEEE80211_RC_SMPS_CHANGED)
2378 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL,
2379 msta_link->wcid.link_id,
2380 RATE_PARAM_MMPS_UPDATE);
2381
2382 spin_lock_bh(&dev->mt76.sta_poll_lock);
2383 }
2384
2385 spin_unlock_bh(&dev->mt76.sta_poll_lock);
2386 }
2387
mt7996_mac_work(struct work_struct * work)2388 void mt7996_mac_work(struct work_struct *work)
2389 {
2390 struct mt7996_phy *phy;
2391 struct mt76_phy *mphy;
2392
2393 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
2394 mac_work.work);
2395 phy = mphy->priv;
2396
2397 mutex_lock(&mphy->dev->mutex);
2398
2399 mt76_update_survey(mphy);
2400 if (++mphy->mac_work_count == 5) {
2401 mphy->mac_work_count = 0;
2402
2403 mt7996_mac_update_stats(phy);
2404
2405 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE);
2406 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
2407 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT);
2408 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT);
2409 }
2410 }
2411
2412 mutex_unlock(&mphy->dev->mutex);
2413
2414 mt76_tx_status_check(mphy->dev, false);
2415
2416 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
2417 MT7996_WATCHDOG_TIME);
2418 }
2419
mt7996_dfs_stop_radar_detector(struct mt7996_phy * phy)2420 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy)
2421 {
2422 struct mt7996_dev *dev = phy->dev;
2423 int rdd_idx = mt7996_get_rdd_idx(phy, false);
2424
2425 if (rdd_idx < 0)
2426 return;
2427
2428 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0);
2429 }
2430
mt7996_dfs_start_rdd(struct mt7996_dev * dev,int rdd_idx)2431 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx)
2432 {
2433 int err, region;
2434
2435 switch (dev->mt76.region) {
2436 case NL80211_DFS_ETSI:
2437 region = 0;
2438 break;
2439 case NL80211_DFS_JP:
2440 region = 2;
2441 break;
2442 case NL80211_DFS_FCC:
2443 default:
2444 region = 1;
2445 break;
2446 }
2447
2448 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region);
2449 if (err < 0)
2450 return err;
2451
2452 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1);
2453 }
2454
mt7996_dfs_start_radar_detector(struct mt7996_phy * phy)2455 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy)
2456 {
2457 struct mt7996_dev *dev = phy->dev;
2458 int err, rdd_idx;
2459
2460 rdd_idx = mt7996_get_rdd_idx(phy, false);
2461 if (rdd_idx < 0)
2462 return -EINVAL;
2463
2464 /* start CAC */
2465 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0);
2466 if (err < 0)
2467 return err;
2468
2469 err = mt7996_dfs_start_rdd(dev, rdd_idx);
2470
2471 return err;
2472 }
2473
2474 static int
mt7996_dfs_init_radar_specs(struct mt7996_phy * phy)2475 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy)
2476 {
2477 const struct mt7996_dfs_radar_spec *radar_specs;
2478 struct mt7996_dev *dev = phy->dev;
2479 int err, i;
2480
2481 switch (dev->mt76.region) {
2482 case NL80211_DFS_FCC:
2483 radar_specs = &fcc_radar_specs;
2484 err = mt7996_mcu_set_fcc5_lpn(dev, 8);
2485 if (err < 0)
2486 return err;
2487 break;
2488 case NL80211_DFS_ETSI:
2489 radar_specs = &etsi_radar_specs;
2490 break;
2491 case NL80211_DFS_JP:
2492 radar_specs = &jp_radar_specs;
2493 break;
2494 default:
2495 return -EINVAL;
2496 }
2497
2498 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
2499 err = mt7996_mcu_set_radar_th(dev, i,
2500 &radar_specs->radar_pattern[i]);
2501 if (err < 0)
2502 return err;
2503 }
2504
2505 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
2506 }
2507
mt7996_dfs_init_radar_detector(struct mt7996_phy * phy)2508 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy)
2509 {
2510 struct mt7996_dev *dev = phy->dev;
2511 enum mt76_dfs_state dfs_state, prev_state;
2512 int err, rdd_idx = mt7996_get_rdd_idx(phy, false);
2513
2514 prev_state = phy->mt76->dfs_state;
2515 dfs_state = mt76_phy_dfs_state(phy->mt76);
2516
2517 if (prev_state == dfs_state || rdd_idx < 0)
2518 return 0;
2519
2520 if (prev_state == MT_DFS_STATE_UNKNOWN)
2521 mt7996_dfs_stop_radar_detector(phy);
2522
2523 if (dfs_state == MT_DFS_STATE_DISABLED)
2524 goto stop;
2525
2526 if (prev_state <= MT_DFS_STATE_DISABLED) {
2527 err = mt7996_dfs_init_radar_specs(phy);
2528 if (err < 0)
2529 return err;
2530
2531 err = mt7996_dfs_start_radar_detector(phy);
2532 if (err < 0)
2533 return err;
2534
2535 phy->mt76->dfs_state = MT_DFS_STATE_CAC;
2536 }
2537
2538 if (dfs_state == MT_DFS_STATE_CAC)
2539 return 0;
2540
2541 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0);
2542 if (err < 0) {
2543 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
2544 return err;
2545 }
2546
2547 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
2548 return 0;
2549
2550 stop:
2551 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0);
2552 if (err < 0)
2553 return err;
2554
2555 mt7996_dfs_stop_radar_detector(phy);
2556 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
2557
2558 return 0;
2559 }
2560
2561 static int
mt7996_mac_twt_duration_align(int duration)2562 mt7996_mac_twt_duration_align(int duration)
2563 {
2564 return duration << 8;
2565 }
2566
2567 static u64
mt7996_mac_twt_sched_list_add(struct mt7996_dev * dev,struct mt7996_twt_flow * flow)2568 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev,
2569 struct mt7996_twt_flow *flow)
2570 {
2571 struct mt7996_twt_flow *iter, *iter_next;
2572 u32 duration = flow->duration << 8;
2573 u64 start_tsf;
2574
2575 iter = list_first_entry_or_null(&dev->twt_list,
2576 struct mt7996_twt_flow, list);
2577 if (!iter || !iter->sched || iter->start_tsf > duration) {
2578 /* add flow as first entry in the list */
2579 list_add(&flow->list, &dev->twt_list);
2580 return 0;
2581 }
2582
2583 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
2584 start_tsf = iter->start_tsf +
2585 mt7996_mac_twt_duration_align(iter->duration);
2586 if (list_is_last(&iter->list, &dev->twt_list))
2587 break;
2588
2589 if (!iter_next->sched ||
2590 iter_next->start_tsf > start_tsf + duration) {
2591 list_add(&flow->list, &iter->list);
2592 goto out;
2593 }
2594 }
2595
2596 /* add flow as last entry in the list */
2597 list_add_tail(&flow->list, &dev->twt_list);
2598 out:
2599 return start_tsf;
2600 }
2601
mt7996_mac_check_twt_req(struct ieee80211_twt_setup * twt)2602 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt)
2603 {
2604 struct ieee80211_twt_params *twt_agrt;
2605 u64 interval, duration;
2606 u16 mantissa;
2607 u8 exp;
2608
2609 /* only individual agreement supported */
2610 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
2611 return -EOPNOTSUPP;
2612
2613 /* only 256us unit supported */
2614 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
2615 return -EOPNOTSUPP;
2616
2617 twt_agrt = (struct ieee80211_twt_params *)twt->params;
2618
2619 /* explicit agreement not supported */
2620 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
2621 return -EOPNOTSUPP;
2622
2623 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
2624 le16_to_cpu(twt_agrt->req_type));
2625 mantissa = le16_to_cpu(twt_agrt->mantissa);
2626 duration = twt_agrt->min_twt_dur << 8;
2627
2628 interval = (u64)mantissa << exp;
2629 if (interval < duration)
2630 return -EOPNOTSUPP;
2631
2632 return 0;
2633 }
2634
2635 static bool
mt7996_mac_twt_param_equal(struct mt7996_sta_link * msta_link,struct ieee80211_twt_params * twt_agrt)2636 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link,
2637 struct ieee80211_twt_params *twt_agrt)
2638 {
2639 u16 type = le16_to_cpu(twt_agrt->req_type);
2640 u8 exp;
2641 int i;
2642
2643 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
2644 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) {
2645 struct mt7996_twt_flow *f;
2646
2647 if (!(msta_link->twt.flowid_mask & BIT(i)))
2648 continue;
2649
2650 f = &msta_link->twt.flow[i];
2651 if (f->duration == twt_agrt->min_twt_dur &&
2652 f->mantissa == twt_agrt->mantissa &&
2653 f->exp == exp &&
2654 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
2655 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
2656 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
2657 return true;
2658 }
2659
2660 return false;
2661 }
2662
mt7996_mac_add_twt_setup(struct ieee80211_hw * hw,struct ieee80211_sta * sta,struct ieee80211_twt_setup * twt)2663 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw,
2664 struct ieee80211_sta *sta,
2665 struct ieee80211_twt_setup *twt)
2666 {
2667 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
2668 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
2669 struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
2670 struct mt7996_sta_link *msta_link = &msta->deflink;
2671 u16 req_type = le16_to_cpu(twt_agrt->req_type);
2672 enum ieee80211_twt_setup_cmd sta_setup_cmd;
2673 struct mt7996_dev *dev = mt7996_hw_dev(hw);
2674 struct mt7996_twt_flow *flow;
2675 u8 flowid, table_id, exp;
2676
2677 if (mt7996_mac_check_twt_req(twt))
2678 goto out;
2679
2680 mutex_lock(&dev->mt76.mutex);
2681
2682 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT)
2683 goto unlock;
2684
2685 if (hweight8(msta_link->twt.flowid_mask) ==
2686 ARRAY_SIZE(msta_link->twt.flow))
2687 goto unlock;
2688
2689 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) {
2690 setup_cmd = TWT_SETUP_CMD_DICTATE;
2691 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR;
2692 goto unlock;
2693 }
2694
2695 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt))
2696 goto unlock;
2697
2698 flowid = ffs(~msta_link->twt.flowid_mask) - 1;
2699 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
2700 twt_agrt->req_type |= le16_encode_bits(flowid,
2701 IEEE80211_TWT_REQTYPE_FLOWID);
2702
2703 table_id = ffs(~dev->twt.table_mask) - 1;
2704 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
2705 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
2706
2707 flow = &msta_link->twt.flow[flowid];
2708 memset(flow, 0, sizeof(*flow));
2709 INIT_LIST_HEAD(&flow->list);
2710 flow->wcid = msta_link->wcid.idx;
2711 flow->table_id = table_id;
2712 flow->id = flowid;
2713 flow->duration = twt_agrt->min_twt_dur;
2714 flow->mantissa = twt_agrt->mantissa;
2715 flow->exp = exp;
2716 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
2717 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
2718 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
2719
2720 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
2721 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
2722 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
2723 u64 flow_tsf, curr_tsf;
2724 u32 rem;
2725
2726 flow->sched = true;
2727 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow);
2728 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink);
2729 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
2730 flow_tsf = curr_tsf + interval - rem;
2731 twt_agrt->twt = cpu_to_le64(flow_tsf);
2732 } else {
2733 list_add_tail(&flow->list, &dev->twt_list);
2734 }
2735 flow->tsf = le64_to_cpu(twt_agrt->twt);
2736
2737 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow,
2738 MCU_TWT_AGRT_ADD))
2739 goto unlock;
2740
2741 setup_cmd = TWT_SETUP_CMD_ACCEPT;
2742 dev->twt.table_mask |= BIT(table_id);
2743 msta_link->twt.flowid_mask |= BIT(flowid);
2744 dev->twt.n_agrt++;
2745
2746 unlock:
2747 mutex_unlock(&dev->mt76.mutex);
2748 out:
2749 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
2750 twt_agrt->req_type |=
2751 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
2752 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED;
2753 }
2754
mt7996_mac_twt_teardown_flow(struct mt7996_dev * dev,struct mt7996_vif_link * link,struct mt7996_sta_link * msta_link,u8 flowid)2755 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev,
2756 struct mt7996_vif_link *link,
2757 struct mt7996_sta_link *msta_link,
2758 u8 flowid)
2759 {
2760 struct mt7996_twt_flow *flow;
2761
2762 lockdep_assert_held(&dev->mt76.mutex);
2763
2764 if (flowid >= ARRAY_SIZE(msta_link->twt.flow))
2765 return;
2766
2767 if (!(msta_link->twt.flowid_mask & BIT(flowid)))
2768 return;
2769
2770 flow = &msta_link->twt.flow[flowid];
2771 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE))
2772 return;
2773
2774 list_del_init(&flow->list);
2775 msta_link->twt.flowid_mask &= ~BIT(flowid);
2776 dev->twt.table_mask &= ~BIT(flow->table_id);
2777 dev->twt.n_agrt--;
2778 }
2779