1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /* Copyright (C) 2023 MediaTek Inc. */
3
4 #include <linux/devcoredump.h>
5 #include <linux/etherdevice.h>
6 #include <linux/timekeeping.h>
7 #include "mt7925.h"
8 #include "../dma.h"
9 #include "regd.h"
10 #include "mac.h"
11 #include "mcu.h"
12
mt7925_mac_wtbl_update(struct mt792x_dev * dev,int idx,u32 mask)13 bool mt7925_mac_wtbl_update(struct mt792x_dev *dev, int idx, u32 mask)
14 {
15 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
16 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
17
18 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
19 0, 5000);
20 }
21
mt7925_mac_sta_poll(struct mt792x_dev * dev)22 static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
23 {
24 static const u8 ac_to_tid[] = {
25 [IEEE80211_AC_BE] = 0,
26 [IEEE80211_AC_BK] = 1,
27 [IEEE80211_AC_VI] = 4,
28 [IEEE80211_AC_VO] = 6
29 };
30 struct ieee80211_sta *sta;
31 struct mt792x_sta *msta;
32 struct mt792x_link_sta *mlink;
33 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
34 LIST_HEAD(sta_poll_list);
35 struct rate_info *rate;
36 s8 rssi[4];
37 int i;
38
39 spin_lock_bh(&dev->mt76.sta_poll_lock);
40 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
41 spin_unlock_bh(&dev->mt76.sta_poll_lock);
42
43 while (true) {
44 bool clear = false;
45 u32 addr, val;
46 u16 idx;
47 u8 bw;
48
49 if (list_empty(&sta_poll_list))
50 break;
51 mlink = list_first_entry(&sta_poll_list,
52 struct mt792x_link_sta, wcid.poll_list);
53 msta = mlink->sta;
54 spin_lock_bh(&dev->mt76.sta_poll_lock);
55 list_del_init(&mlink->wcid.poll_list);
56 spin_unlock_bh(&dev->mt76.sta_poll_lock);
57
58 idx = mlink->wcid.idx;
59 addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET);
60
61 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
62 u32 tx_last = mlink->airtime_ac[i];
63 u32 rx_last = mlink->airtime_ac[i + 4];
64
65 mlink->airtime_ac[i] = mt76_rr(dev, addr);
66 mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
67
68 tx_time[i] = mlink->airtime_ac[i] - tx_last;
69 rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
70
71 if ((tx_last | rx_last) & BIT(30))
72 clear = true;
73
74 addr += 8;
75 }
76
77 if (clear) {
78 mt7925_mac_wtbl_update(dev, idx,
79 MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
80 memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
81 }
82
83 if (!mlink->wcid.sta)
84 continue;
85
86 sta = container_of((void *)msta, struct ieee80211_sta,
87 drv_priv);
88 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
89 u8 q = mt76_connac_lmac_mapping(i);
90 u32 tx_cur = tx_time[q];
91 u32 rx_cur = rx_time[q];
92 u8 tid = ac_to_tid[i];
93
94 if (!tx_cur && !rx_cur)
95 continue;
96
97 ieee80211_sta_register_airtime(sta, tid, tx_cur,
98 rx_cur);
99 }
100
101 /* We don't support reading GI info from txs packets.
102 * For accurate tx status reporting and AQL improvement,
103 * we need to make sure that flags match so polling GI
104 * from per-sta counters directly.
105 */
106 rate = &mlink->wcid.rate;
107
108 switch (rate->bw) {
109 case RATE_INFO_BW_160:
110 bw = IEEE80211_STA_RX_BW_160;
111 break;
112 case RATE_INFO_BW_80:
113 bw = IEEE80211_STA_RX_BW_80;
114 break;
115 case RATE_INFO_BW_40:
116 bw = IEEE80211_STA_RX_BW_40;
117 break;
118 default:
119 bw = IEEE80211_STA_RX_BW_20;
120 break;
121 }
122
123 addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 6);
124 val = mt76_rr(dev, addr);
125 if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
126 addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 5);
127 val = mt76_rr(dev, addr);
128 rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
129 } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
130 u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
131
132 rate->he_gi = (val & (0x3 << offs)) >> offs;
133 } else if (rate->flags &
134 (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
135 if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
136 rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
137 else
138 rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
139 }
140
141 /* get signal strength of resp frames (CTS/BA/ACK) */
142 addr = mt7925_mac_wtbl_lmac_addr(dev, idx, 34);
143 val = mt76_rr(dev, addr);
144
145 rssi[0] = to_rssi(GENMASK(7, 0), val);
146 rssi[1] = to_rssi(GENMASK(15, 8), val);
147 rssi[2] = to_rssi(GENMASK(23, 16), val);
148 rssi[3] = to_rssi(GENMASK(31, 14), val);
149
150 mlink->ack_signal =
151 mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
152
153 ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
154 }
155 }
156
mt7925_mac_set_fixed_rate_table(struct mt792x_dev * dev,u8 tbl_idx,u16 rate_idx)157 void mt7925_mac_set_fixed_rate_table(struct mt792x_dev *dev,
158 u8 tbl_idx, u16 rate_idx)
159 {
160 u32 ctrl = MT_WTBL_ITCR_WR | MT_WTBL_ITCR_EXEC | tbl_idx;
161
162 mt76_wr(dev, MT_WTBL_ITDR0, rate_idx);
163 /* use wtbl spe idx */
164 mt76_wr(dev, MT_WTBL_ITDR1, MT_WTBL_SPE_IDX_SEL);
165 mt76_wr(dev, MT_WTBL_ITCR, ctrl);
166 }
167
168 /* The HW does not translate the mac header to 802.3 for mesh point */
mt7925_reverse_frag0_hdr_trans(struct sk_buff * skb,u16 hdr_gap)169 static int mt7925_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
170 {
171 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
172 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap);
173 struct mt792x_sta *msta = (struct mt792x_sta *)status->wcid;
174 __le32 *rxd = (__le32 *)skb->data;
175 struct ieee80211_sta *sta;
176 struct ieee80211_vif *vif;
177 struct ieee80211_hdr hdr;
178 u16 frame_control;
179
180 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) !=
181 MT_RXD3_NORMAL_U2M)
182 return -EINVAL;
183
184 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4))
185 return -EINVAL;
186
187 if (!msta || !msta->vif)
188 return -EINVAL;
189
190 sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
191 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
192
193 /* store the info from RXD and ethhdr to avoid being overridden */
194 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL);
195 hdr.frame_control = cpu_to_le16(frame_control);
196 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL));
197 hdr.duration_id = 0;
198
199 ether_addr_copy(hdr.addr1, vif->addr);
200 ether_addr_copy(hdr.addr2, sta->addr);
201 switch (frame_control & (IEEE80211_FCTL_TODS |
202 IEEE80211_FCTL_FROMDS)) {
203 case 0:
204 ether_addr_copy(hdr.addr3, vif->bss_conf.bssid);
205 break;
206 case IEEE80211_FCTL_FROMDS:
207 ether_addr_copy(hdr.addr3, eth_hdr->h_source);
208 break;
209 case IEEE80211_FCTL_TODS:
210 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
211 break;
212 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS:
213 ether_addr_copy(hdr.addr3, eth_hdr->h_dest);
214 ether_addr_copy(hdr.addr4, eth_hdr->h_source);
215 break;
216 default:
217 break;
218 }
219
220 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
221 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) ||
222 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX))
223 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header);
224 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN)
225 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header);
226 else
227 skb_pull(skb, 2);
228
229 if (ieee80211_has_order(hdr.frame_control))
230 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11],
231 IEEE80211_HT_CTL_LEN);
232 if (ieee80211_is_data_qos(hdr.frame_control)) {
233 __le16 qos_ctrl;
234
235 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL));
236 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl,
237 IEEE80211_QOS_CTL_LEN);
238 }
239
240 if (ieee80211_has_a4(hdr.frame_control))
241 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
242 else
243 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6);
244
245 return 0;
246 }
247
248 static int
mt7925_mac_fill_rx_rate(struct mt792x_dev * dev,struct mt76_rx_status * status,struct ieee80211_supported_band * sband,__le32 * rxv,u8 * mode)249 mt7925_mac_fill_rx_rate(struct mt792x_dev *dev,
250 struct mt76_rx_status *status,
251 struct ieee80211_supported_band *sband,
252 __le32 *rxv, u8 *mode)
253 {
254 u32 v0, v2;
255 u8 stbc, gi, bw, dcm, nss;
256 int i, idx;
257 bool cck = false;
258
259 v0 = le32_to_cpu(rxv[0]);
260 v2 = le32_to_cpu(rxv[2]);
261
262 idx = FIELD_GET(MT_PRXV_TX_RATE, v0);
263 i = idx;
264 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1;
265
266 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2);
267 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2);
268 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2);
269 dcm = FIELD_GET(MT_PRXV_DCM, v2);
270 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2);
271
272 switch (*mode) {
273 case MT_PHY_TYPE_CCK:
274 cck = true;
275 fallthrough;
276 case MT_PHY_TYPE_OFDM:
277 i = mt76_get_rate(&dev->mt76, sband, i, cck);
278 break;
279 case MT_PHY_TYPE_HT_GF:
280 case MT_PHY_TYPE_HT:
281 status->encoding = RX_ENC_HT;
282 if (gi)
283 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
284 if (i > 31)
285 return -EINVAL;
286 break;
287 case MT_PHY_TYPE_VHT:
288 status->nss = nss;
289 status->encoding = RX_ENC_VHT;
290 if (gi)
291 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
292 if (i > 11)
293 return -EINVAL;
294 break;
295 case MT_PHY_TYPE_HE_MU:
296 case MT_PHY_TYPE_HE_SU:
297 case MT_PHY_TYPE_HE_EXT_SU:
298 case MT_PHY_TYPE_HE_TB:
299 status->nss = nss;
300 status->encoding = RX_ENC_HE;
301 i &= GENMASK(3, 0);
302
303 if (gi <= NL80211_RATE_INFO_HE_GI_3_2)
304 status->he_gi = gi;
305
306 status->he_dcm = dcm;
307 break;
308 case MT_PHY_TYPE_EHT_SU:
309 case MT_PHY_TYPE_EHT_TRIG:
310 case MT_PHY_TYPE_EHT_MU:
311 status->nss = nss;
312 status->encoding = RX_ENC_EHT;
313 i &= GENMASK(3, 0);
314
315 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2)
316 status->eht.gi = gi;
317 break;
318 default:
319 return -EINVAL;
320 }
321 status->rate_idx = i;
322
323 switch (bw) {
324 case IEEE80211_STA_RX_BW_20:
325 break;
326 case IEEE80211_STA_RX_BW_40:
327 if (*mode & MT_PHY_TYPE_HE_EXT_SU &&
328 (idx & MT_PRXV_TX_ER_SU_106T)) {
329 status->bw = RATE_INFO_BW_HE_RU;
330 status->he_ru =
331 NL80211_RATE_INFO_HE_RU_ALLOC_106;
332 } else {
333 status->bw = RATE_INFO_BW_40;
334 }
335 break;
336 case IEEE80211_STA_RX_BW_80:
337 status->bw = RATE_INFO_BW_80;
338 break;
339 case IEEE80211_STA_RX_BW_160:
340 status->bw = RATE_INFO_BW_160;
341 break;
342 default:
343 return -EINVAL;
344 }
345
346 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc;
347 if (*mode < MT_PHY_TYPE_HE_SU && gi)
348 status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
349
350 return 0;
351 }
352
353 static int
mt7925_mac_fill_rx(struct mt792x_dev * dev,struct sk_buff * skb)354 mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
355 {
356 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM;
357 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
358 bool hdr_trans, unicast, insert_ccmp_hdr = false;
359 u8 chfreq, qos_ctl = 0, remove_pad, amsdu_info;
360 u16 hdr_gap;
361 __le32 *rxv = NULL, *rxd = (__le32 *)skb->data;
362 struct mt76_phy *mphy = &dev->mt76.phy;
363 struct mt792x_phy *phy = &dev->phy;
364 struct ieee80211_supported_band *sband;
365 u32 csum_status = *(u32 *)skb->cb;
366 u32 rxd1 = le32_to_cpu(rxd[1]);
367 u32 rxd2 = le32_to_cpu(rxd[2]);
368 u32 rxd3 = le32_to_cpu(rxd[3]);
369 u32 rxd4 = le32_to_cpu(rxd[4]);
370 struct mt792x_link_sta *mlink;
371 u8 mode = 0; /* , band_idx; */
372 u16 seq_ctrl = 0;
373 __le16 fc = 0;
374 int idx;
375
376 memset(status, 0, sizeof(*status));
377
378 if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
379 return -EINVAL;
380
381 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
382 return -EINVAL;
383
384 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
385 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
386 return -EINVAL;
387
388 /* ICV error or CCMP/BIP/WPI MIC error */
389 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
390 status->flag |= RX_FLAG_ONLY_MONITOR;
391
392 chfreq = FIELD_GET(MT_RXD3_NORMAL_CH_FREQ, rxd3);
393 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
394 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
395 status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
396
397 if (status->wcid) {
398 mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
399 mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
400 }
401
402 mt792x_get_status_freq_info(status, chfreq);
403
404 switch (status->band) {
405 case NL80211_BAND_5GHZ:
406 sband = &mphy->sband_5g.sband;
407 break;
408 case NL80211_BAND_6GHZ:
409 sband = &mphy->sband_6g.sband;
410 break;
411 default:
412 sband = &mphy->sband_2g.sband;
413 break;
414 }
415
416 if (!sband->channels)
417 return -EINVAL;
418
419 if (mt76_is_mmio(&dev->mt76) && (rxd3 & csum_mask) == csum_mask &&
420 !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
421 skb->ip_summed = CHECKSUM_UNNECESSARY;
422
423 if (rxd3 & MT_RXD3_NORMAL_FCS_ERR)
424 status->flag |= RX_FLAG_FAILED_FCS_CRC;
425
426 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
427 status->flag |= RX_FLAG_MMIC_ERROR;
428
429 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 &&
430 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
431 status->flag |= RX_FLAG_DECRYPTED;
432 status->flag |= RX_FLAG_IV_STRIPPED;
433 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
434 }
435
436 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
437
438 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
439 return -EINVAL;
440
441 rxd += 8;
442 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
443 u32 v0 = le32_to_cpu(rxd[0]);
444 u32 v2 = le32_to_cpu(rxd[2]);
445
446 /* TODO: need to map rxd address */
447 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0));
448 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2);
449 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2);
450
451 rxd += 4;
452 if ((u8 *)rxd - skb->data >= skb->len)
453 return -EINVAL;
454 }
455
456 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
457 u8 *data = (u8 *)rxd;
458
459 if (status->flag & RX_FLAG_DECRYPTED) {
460 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) {
461 case MT_CIPHER_AES_CCMP:
462 case MT_CIPHER_CCMP_CCX:
463 case MT_CIPHER_CCMP_256:
464 insert_ccmp_hdr =
465 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
466 fallthrough;
467 case MT_CIPHER_TKIP:
468 case MT_CIPHER_TKIP_NO_MIC:
469 case MT_CIPHER_GCMP:
470 case MT_CIPHER_GCMP_256:
471 status->iv[0] = data[5];
472 status->iv[1] = data[4];
473 status->iv[2] = data[3];
474 status->iv[3] = data[2];
475 status->iv[4] = data[1];
476 status->iv[5] = data[0];
477 break;
478 default:
479 break;
480 }
481 }
482 rxd += 4;
483 if ((u8 *)rxd - skb->data >= skb->len)
484 return -EINVAL;
485 }
486
487 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
488 status->timestamp = le32_to_cpu(rxd[0]);
489 status->flag |= RX_FLAG_MACTIME_START;
490
491 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
492 status->flag |= RX_FLAG_AMPDU_DETAILS;
493
494 /* all subframes of an A-MPDU have the same timestamp */
495 if (phy->rx_ampdu_ts != status->timestamp) {
496 if (!++phy->ampdu_ref)
497 phy->ampdu_ref++;
498 }
499 phy->rx_ampdu_ts = status->timestamp;
500
501 status->ampdu_ref = phy->ampdu_ref;
502 }
503
504 rxd += 4;
505 if ((u8 *)rxd - skb->data >= skb->len)
506 return -EINVAL;
507 }
508
509 /* RXD Group 3 - P-RXV */
510 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
511 u32 v3;
512 int ret;
513
514 rxv = rxd;
515 rxd += 4;
516 if ((u8 *)rxd - skb->data >= skb->len)
517 return -EINVAL;
518
519 v3 = le32_to_cpu(rxv[3]);
520
521 status->chains = mphy->antenna_mask;
522 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3);
523 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3);
524 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3);
525 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3);
526
527 /* RXD Group 5 - C-RXV */
528 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
529 rxd += 24;
530 if ((u8 *)rxd - skb->data >= skb->len)
531 return -EINVAL;
532 }
533
534 ret = mt7925_mac_fill_rx_rate(dev, status, sband, rxv, &mode);
535 if (ret < 0)
536 return ret;
537 }
538
539 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
540 status->amsdu = !!amsdu_info;
541 if (status->amsdu) {
542 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
543 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
544 }
545
546 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
547 if (hdr_trans && ieee80211_has_morefrags(fc)) {
548 if (mt7925_reverse_frag0_hdr_trans(skb, hdr_gap))
549 return -EINVAL;
550 hdr_trans = false;
551 } else {
552 int pad_start = 0;
553
554 skb_pull(skb, hdr_gap);
555 if (!hdr_trans && status->amsdu) {
556 pad_start = ieee80211_get_hdrlen_from_skb(skb);
557 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
558 /* When header translation failure is indicated,
559 * the hardware will insert an extra 2-byte field
560 * containing the data length after the protocol
561 * type field.
562 */
563 pad_start = 12;
564 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
565 pad_start += 4;
566 else
567 pad_start = 0;
568 }
569
570 if (pad_start) {
571 memmove(skb->data + 2, skb->data, pad_start);
572 skb_pull(skb, 2);
573 }
574 }
575
576 if (!hdr_trans) {
577 struct ieee80211_hdr *hdr;
578
579 if (insert_ccmp_hdr) {
580 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
581
582 mt76_insert_ccmp_hdr(skb, key_id);
583 }
584
585 hdr = mt76_skb_get_hdr(skb);
586 fc = hdr->frame_control;
587 if (ieee80211_is_data_qos(fc)) {
588 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
589 qos_ctl = *ieee80211_get_qos_ctl(hdr);
590 }
591 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
592 } else {
593 status->flag |= RX_FLAG_8023;
594 }
595
596 mt792x_mac_assoc_rssi(dev, skb);
597
598 if (rxv && !(status->flag & RX_FLAG_8023)) {
599 switch (status->encoding) {
600 case RX_ENC_EHT:
601 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
602 break;
603 case RX_ENC_HE:
604 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
605 break;
606 default:
607 break;
608 }
609 }
610
611 if (!status->wcid || !ieee80211_is_data_qos(fc))
612 return 0;
613
614 status->aggr = unicast && !ieee80211_is_qos_nullfunc(fc);
615 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
616 status->qos_ctl = qos_ctl;
617
618 return 0;
619 }
620
621 static void
mt7925_mac_write_txwi_8023(__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid)622 mt7925_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
623 struct mt76_wcid *wcid)
624 {
625 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
626 u8 fc_type, fc_stype;
627 u16 ethertype;
628 bool wmm = false;
629 u32 val;
630
631 if (wcid->sta) {
632 struct ieee80211_sta *sta;
633
634 sta = container_of((void *)wcid, struct ieee80211_sta, drv_priv);
635 wmm = sta->wme;
636 }
637
638 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) |
639 FIELD_PREP(MT_TXD1_TID, tid);
640
641 ethertype = get_unaligned_be16(&skb->data[12]);
642 if (ethertype >= ETH_P_802_3_MIN)
643 val |= MT_TXD1_ETH_802_3;
644
645 txwi[1] |= cpu_to_le32(val);
646
647 fc_type = IEEE80211_FTYPE_DATA >> 2;
648 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0;
649
650 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
651 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
652
653 txwi[2] |= cpu_to_le32(val);
654 }
655
656 static void
mt7925_mac_write_txwi_80211(struct mt76_dev * dev,__le32 * txwi,struct sk_buff * skb,struct ieee80211_key_conf * key)657 mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
658 struct sk_buff *skb,
659 struct ieee80211_key_conf *key)
660 {
661 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
662 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
663 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
664 bool multicast = is_multicast_ether_addr(hdr->addr1);
665 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
666 __le16 fc = hdr->frame_control;
667 u8 fc_type, fc_stype;
668 u32 val;
669
670 if (ieee80211_is_action(fc) &&
671 skb->len >= IEEE80211_MIN_ACTION_SIZE(action_code) &&
672 mgmt->u.action.category == WLAN_CATEGORY_BACK &&
673 mgmt->u.action.action_code == WLAN_ACTION_ADDBA_REQ)
674 tid = MT_TX_ADDBA;
675 else if (ieee80211_is_mgmt(hdr->frame_control))
676 tid = MT_TX_NORMAL;
677
678 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
679 FIELD_PREP(MT_TXD1_HDR_INFO,
680 ieee80211_get_hdrlen_from_skb(skb) / 2) |
681 FIELD_PREP(MT_TXD1_TID, tid);
682
683 if (!ieee80211_is_data(fc) || multicast ||
684 info->flags & IEEE80211_TX_CTL_USE_MINRATE)
685 val |= MT_TXD1_FIXED_RATE;
686
687 if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
688 key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
689 val |= MT_TXD1_BIP;
690 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
691 }
692
693 txwi[1] |= cpu_to_le32(val);
694
695 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
696 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
697
698 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
699 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype);
700
701 txwi[2] |= cpu_to_le32(val);
702
703 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast));
704 if (ieee80211_is_beacon(fc))
705 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT);
706
707 if (info->flags & IEEE80211_TX_CTL_INJECTED) {
708 u16 seqno = le16_to_cpu(hdr->seq_ctrl);
709
710 if (ieee80211_is_back_req(hdr->frame_control)) {
711 struct ieee80211_bar *bar;
712
713 bar = (struct ieee80211_bar *)skb->data;
714 seqno = le16_to_cpu(bar->start_seq_num);
715 }
716
717 val = MT_TXD3_SN_VALID |
718 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
719 txwi[3] |= cpu_to_le32(val);
720 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
721 }
722 }
723
724 void
mt7925_mac_write_txwi(struct mt76_dev * dev,__le32 * txwi,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_key_conf * key,int pid,enum mt76_txq_id qid,u32 changed)725 mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
726 struct sk_buff *skb, struct mt76_wcid *wcid,
727 struct ieee80211_key_conf *key, int pid,
728 enum mt76_txq_id qid, u32 changed)
729 {
730 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
731 struct ieee80211_vif *vif = info->control.vif;
732 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0, band_idx = 0;
733 u32 val, sz_txd = mt76_is_mmio(dev) ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
734 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
735 struct mt76_vif_link *mvif;
736 bool beacon = !!(changed & (BSS_CHANGED_BEACON |
737 BSS_CHANGED_BEACON_ENABLED));
738 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
739 BSS_CHANGED_FILS_DISCOVERY));
740 struct mt792x_bss_conf *mconf;
741
742 mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
743 wcid->link_id) : NULL;
744 mvif = mconf ? (struct mt76_vif_link *)&mconf->mt76 : NULL;
745
746 if (mvif) {
747 omac_idx = mvif->omac_idx;
748 wmm_idx = mvif->wmm_idx;
749 band_idx = mvif->band_idx;
750 }
751
752 if (inband_disc) {
753 p_fmt = MT_TX_TYPE_FW;
754 q_idx = MT_LMAC_ALTX0;
755 } else if (beacon) {
756 p_fmt = MT_TX_TYPE_FW;
757 q_idx = MT_LMAC_BCN0;
758 } else if (qid >= MT_TXQ_PSD) {
759 p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
760 q_idx = MT_LMAC_ALTX0;
761 } else {
762 p_fmt = mt76_is_mmio(dev) ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
763 q_idx = wmm_idx * MT76_CONNAC_MAX_WMM_SETS +
764 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb));
765
766 /* counting non-offloading skbs */
767 wcid->stats.tx_bytes += skb->len;
768 wcid->stats.tx_packets++;
769 }
770
771 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
772 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
773 FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
774 txwi[0] = cpu_to_le32(val);
775
776 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
777 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
778
779 if (band_idx)
780 val |= FIELD_PREP(MT_TXD1_TGID, band_idx);
781
782 txwi[1] = cpu_to_le32(val);
783 txwi[2] = 0;
784
785 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 15);
786
787 if (key)
788 val |= MT_TXD3_PROTECT_FRAME;
789 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
790 val |= MT_TXD3_NO_ACK;
791 if (wcid->amsdu)
792 val |= MT_TXD3_HW_AMSDU;
793
794 txwi[3] = cpu_to_le32(val);
795 txwi[4] = 0;
796
797 val = FIELD_PREP(MT_TXD5_PID, pid);
798 if (pid >= MT_PACKET_ID_FIRST) {
799 val |= MT_TXD5_TX_STATUS_HOST;
800 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
801 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU);
802 }
803
804 txwi[5] = cpu_to_le32(val);
805
806 val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
807 if (vif && (!ieee80211_vif_is_mld(vif) ||
808 (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)))
809 val |= MT_TXD6_DIS_MAT;
810 txwi[6] = cpu_to_le32(val);
811 txwi[7] = 0;
812
813 if (is_8023)
814 mt7925_mac_write_txwi_8023(txwi, skb, wcid);
815 else
816 mt7925_mac_write_txwi_80211(dev, txwi, skb, key);
817
818 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) {
819 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
820 bool mcast = ieee80211_is_data(hdr->frame_control) &&
821 is_multicast_ether_addr(hdr->addr1);
822 u8 idx = MT792x_BASIC_RATES_TBL;
823
824 if (mvif) {
825 if (mcast && mvif->mcast_rates_idx)
826 idx = mvif->mcast_rates_idx;
827 else if (beacon && mvif->beacon_rates_idx)
828 idx = mvif->beacon_rates_idx;
829 else
830 idx = mvif->basic_rates_idx;
831 }
832
833 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
834 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
835 }
836 }
837 EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi);
838
mt7925_tx_check_aggr(struct ieee80211_sta * sta,struct sk_buff * skb,struct mt76_wcid * wcid)839 static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb,
840 struct mt76_wcid *wcid)
841 {
842 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
843 struct ieee80211_link_sta *link_sta;
844 struct mt792x_link_sta *mlink;
845 struct mt792x_sta *msta;
846 bool is_8023;
847 u16 fc, tid;
848
849 if (!sta)
850 return;
851
852 link_sta = rcu_dereference(sta->link[wcid->link_id]);
853 if (!link_sta)
854 return;
855
856 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
857 return;
858
859 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
860 is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
861
862 if (is_8023) {
863 fc = IEEE80211_FTYPE_DATA |
864 (sta->wme ? IEEE80211_STYPE_QOS_DATA :
865 IEEE80211_STYPE_DATA);
866 } else {
867 /* No need to get precise TID for Action/Management Frame,
868 * since it will not meet the following Frame Control
869 * condition anyway.
870 */
871
872 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
873
874 fc = le16_to_cpu(hdr->frame_control) &
875 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
876 }
877
878 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
879 return;
880
881 msta = (struct mt792x_sta *)sta->drv_priv;
882
883 if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED)
884 mlink = rcu_dereference(msta->link[msta->deflink_id]);
885 else
886 mlink = &msta->deflink;
887
888 if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state)) {
889 if (ieee80211_start_tx_ba_session(sta, tid, 0))
890 clear_bit(tid, &mlink->wcid.ampdu_state);
891 }
892 }
893
894 static bool
mt7925_mac_add_txs_skb(struct mt792x_dev * dev,struct mt76_wcid * wcid,int pid,__le32 * txs_data)895 mt7925_mac_add_txs_skb(struct mt792x_dev *dev, struct mt76_wcid *wcid,
896 int pid, __le32 *txs_data)
897 {
898 struct mt76_sta_stats *stats = &wcid->stats;
899 struct ieee80211_supported_band *sband;
900 struct mt76_dev *mdev = &dev->mt76;
901 struct mt76_phy *mphy;
902 struct ieee80211_tx_info *info;
903 struct sk_buff_head list;
904 struct rate_info rate = {};
905 struct sk_buff *skb;
906 bool cck = false;
907 u32 txrate, txs, mode, stbc;
908
909 mt76_tx_status_lock(mdev, &list);
910 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
911 if (!skb)
912 goto out_no_skb;
913
914 txs = le32_to_cpu(txs_data[0]);
915
916 info = IEEE80211_SKB_CB(skb);
917 if (!(txs & MT_TXS0_ACK_ERROR_MASK))
918 info->flags |= IEEE80211_TX_STAT_ACK;
919
920 info->status.ampdu_len = 1;
921 info->status.ampdu_ack_len = !!(info->flags &
922 IEEE80211_TX_STAT_ACK);
923
924 info->status.rates[0].idx = -1;
925
926 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
927
928 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
929 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
930 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC);
931
932 if (stbc && rate.nss > 1)
933 rate.nss >>= 1;
934
935 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
936 stats->tx_nss[rate.nss - 1]++;
937 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
938 stats->tx_mcs[rate.mcs]++;
939
940 mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
941 switch (mode) {
942 case MT_PHY_TYPE_CCK:
943 cck = true;
944 fallthrough;
945 case MT_PHY_TYPE_OFDM:
946 mphy = mt76_dev_phy(mdev, wcid->phy_idx);
947
948 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
949 sband = &mphy->sband_5g.sband;
950 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
951 sband = &mphy->sband_6g.sband;
952 else
953 sband = &mphy->sband_2g.sband;
954
955 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
956 rate.legacy = sband->bitrates[rate.mcs].bitrate;
957 break;
958 case MT_PHY_TYPE_HT:
959 case MT_PHY_TYPE_HT_GF:
960 if (rate.mcs > 31)
961 goto out;
962
963 rate.flags = RATE_INFO_FLAGS_MCS;
964 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
965 rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
966 break;
967 case MT_PHY_TYPE_VHT:
968 if (rate.mcs > 9)
969 goto out;
970
971 rate.flags = RATE_INFO_FLAGS_VHT_MCS;
972 break;
973 case MT_PHY_TYPE_HE_SU:
974 case MT_PHY_TYPE_HE_EXT_SU:
975 case MT_PHY_TYPE_HE_TB:
976 case MT_PHY_TYPE_HE_MU:
977 if (rate.mcs > 11)
978 goto out;
979
980 rate.he_gi = wcid->rate.he_gi;
981 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
982 rate.flags = RATE_INFO_FLAGS_HE_MCS;
983 break;
984 case MT_PHY_TYPE_EHT_SU:
985 case MT_PHY_TYPE_EHT_TRIG:
986 case MT_PHY_TYPE_EHT_MU:
987 if (rate.mcs > 13)
988 goto out;
989
990 rate.eht_gi = wcid->rate.eht_gi;
991 rate.flags = RATE_INFO_FLAGS_EHT_MCS;
992 break;
993 default:
994 goto out;
995 }
996
997 stats->tx_mode[mode]++;
998
999 switch (FIELD_GET(MT_TXS0_BW, txs)) {
1000 case IEEE80211_STA_RX_BW_160:
1001 rate.bw = RATE_INFO_BW_160;
1002 stats->tx_bw[3]++;
1003 break;
1004 case IEEE80211_STA_RX_BW_80:
1005 rate.bw = RATE_INFO_BW_80;
1006 stats->tx_bw[2]++;
1007 break;
1008 case IEEE80211_STA_RX_BW_40:
1009 rate.bw = RATE_INFO_BW_40;
1010 stats->tx_bw[1]++;
1011 break;
1012 default:
1013 rate.bw = RATE_INFO_BW_20;
1014 stats->tx_bw[0]++;
1015 break;
1016 }
1017 wcid->rate = rate;
1018
1019 out:
1020 mt76_tx_status_skb_done(mdev, skb, &list);
1021
1022 out_no_skb:
1023 mt76_tx_status_unlock(mdev, &list);
1024
1025 return !!skb;
1026 }
1027
mt7925_mac_add_txs(struct mt792x_dev * dev,void * data)1028 void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
1029 {
1030 struct mt792x_link_sta *mlink = NULL;
1031 struct mt76_wcid *wcid;
1032 __le32 *txs_data = data;
1033 u16 wcidx;
1034 u8 pid;
1035
1036 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
1037 return;
1038
1039 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
1040 pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
1041
1042 if (pid < MT_PACKET_ID_FIRST)
1043 return;
1044
1045 if (wcidx >= MT792x_WTBL_SIZE)
1046 return;
1047
1048 rcu_read_lock();
1049
1050 wcid = mt76_wcid_ptr(dev, wcidx);
1051 if (!wcid)
1052 goto out;
1053
1054 mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1055
1056 mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data);
1057 if (!wcid->sta)
1058 goto out;
1059
1060 mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
1061
1062 out:
1063 rcu_read_unlock();
1064 }
1065
mt7925_txwi_free(struct mt792x_dev * dev,struct mt76_txwi_cache * t,struct ieee80211_sta * sta,struct mt76_wcid * wcid,struct list_head * free_list)1066 void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
1067 struct ieee80211_sta *sta, struct mt76_wcid *wcid,
1068 struct list_head *free_list)
1069 {
1070 struct mt76_dev *mdev = &dev->mt76;
1071 __le32 *txwi;
1072 u16 wcid_idx;
1073
1074 mt76_connac_txp_skb_unmap(mdev, t);
1075 if (!t->skb)
1076 goto out;
1077
1078 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
1079 if (sta) {
1080 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1081 mt7925_tx_check_aggr(sta, t->skb, wcid);
1082
1083 wcid_idx = wcid->idx;
1084 } else {
1085 wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1086 }
1087
1088 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
1089 out:
1090 t->skb = NULL;
1091 mt76_put_txwi(mdev, t);
1092 }
1093 EXPORT_SYMBOL_GPL(mt7925_txwi_free);
1094
1095 static void
mt7925_mac_tx_free(struct mt792x_dev * dev,void * data,int len)1096 mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
1097 {
1098 __le32 *tx_free = (__le32 *)data, *cur_info;
1099 struct mt76_dev *mdev = &dev->mt76;
1100 struct mt76_txwi_cache *txwi;
1101 struct ieee80211_sta *sta = NULL;
1102 struct mt76_wcid *wcid = NULL;
1103 LIST_HEAD(free_list);
1104 struct sk_buff *skb, *tmp;
1105 void *end = data + len;
1106 bool wake = false;
1107 u16 total, count = 0;
1108
1109 /* clean DMA queues and unmap buffers first */
1110 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
1111 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
1112
1113 if (WARN_ON_ONCE(le32_get_bits(tx_free[1], MT_TXFREE1_VER) < 4))
1114 return;
1115
1116 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT);
1117 for (cur_info = &tx_free[2]; count < total; cur_info++) {
1118 u32 msdu, info;
1119 u8 i;
1120
1121 if (WARN_ON_ONCE((void *)cur_info >= end))
1122 return;
1123 /* 1'b1: new wcid pair.
1124 * 1'b0: msdu_id with the same 'wcid pair' as above.
1125 */
1126 info = le32_to_cpu(*cur_info);
1127 if (info & MT_TXFREE_INFO_PAIR) {
1128 struct mt792x_link_sta *mlink;
1129 u16 idx;
1130
1131 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
1132 wcid = mt76_wcid_ptr(dev, idx);
1133 sta = wcid_to_sta(wcid);
1134 if (!sta)
1135 continue;
1136
1137 mlink = container_of(wcid, struct mt792x_link_sta, wcid);
1138 mt76_wcid_add_poll(&dev->mt76, &mlink->wcid);
1139 continue;
1140 }
1141
1142 if (info & MT_TXFREE_INFO_HEADER) {
1143 if (wcid) {
1144 wcid->stats.tx_retries +=
1145 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1;
1146 wcid->stats.tx_failed +=
1147 !!FIELD_GET(MT_TXFREE_INFO_STAT, info);
1148 }
1149 continue;
1150 }
1151
1152 for (i = 0; i < 2; i++) {
1153 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID;
1154 if (msdu == MT_TXFREE_INFO_MSDU_ID)
1155 continue;
1156
1157 count++;
1158 txwi = mt76_token_release(mdev, msdu, &wake);
1159 if (!txwi)
1160 continue;
1161
1162 mt7925_txwi_free(dev, txwi, sta, wcid, &free_list);
1163 }
1164 }
1165
1166 mt7925_mac_sta_poll(dev);
1167
1168 if (wake)
1169 mt76_set_tx_blocked(&dev->mt76, false);
1170
1171 mt76_worker_schedule(&dev->mt76.tx_worker);
1172
1173 list_for_each_entry_safe(skb, tmp, &free_list, list) {
1174 skb_list_del_init(skb);
1175 napi_consume_skb(skb, 1);
1176 }
1177 }
1178
mt7925_rx_check(struct mt76_dev * mdev,void * data,int len)1179 bool mt7925_rx_check(struct mt76_dev *mdev, void *data, int len)
1180 {
1181 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1182 __le32 *rxd = (__le32 *)data;
1183 __le32 *end = (__le32 *)&rxd[len / 4];
1184 enum rx_pkt_type type;
1185
1186 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1187 if (type != PKT_TYPE_NORMAL) {
1188 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1189
1190 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1191 MT_RXD0_SW_PKT_TYPE_FRAME))
1192 return true;
1193 }
1194
1195 switch (type) {
1196 case PKT_TYPE_TXRX_NOTIFY:
1197 /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1198 mt7925_mac_tx_free(dev, data, len); /* mmio */
1199 return false;
1200 case PKT_TYPE_TXS:
1201 for (rxd += 4; rxd + 12 <= end; rxd += 12)
1202 mt7925_mac_add_txs(dev, rxd);
1203 return false;
1204 default:
1205 return true;
1206 }
1207 }
1208 EXPORT_SYMBOL_GPL(mt7925_rx_check);
1209
mt7925_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb,u32 * info)1210 void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
1211 struct sk_buff *skb, u32 *info)
1212 {
1213 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1214 __le32 *rxd = (__le32 *)skb->data;
1215 __le32 *end = (__le32 *)&skb->data[skb->len];
1216 enum rx_pkt_type type;
1217 u16 flag;
1218
1219 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
1220 flag = le32_get_bits(rxd[0], MT_RXD0_PKT_FLAG);
1221 if (type != PKT_TYPE_NORMAL) {
1222 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK);
1223
1224 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) ==
1225 MT_RXD0_SW_PKT_TYPE_FRAME))
1226 type = PKT_TYPE_NORMAL;
1227 }
1228
1229 if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
1230 type = PKT_TYPE_NORMAL_MCU;
1231
1232 switch (type) {
1233 case PKT_TYPE_TXRX_NOTIFY:
1234 /* PKT_TYPE_TXRX_NOTIFY can be received only by mmio devices */
1235 mt7925_mac_tx_free(dev, skb->data, skb->len);
1236 napi_consume_skb(skb, 1);
1237 break;
1238 case PKT_TYPE_RX_EVENT:
1239 mt7925_mcu_rx_event(dev, skb);
1240 break;
1241 case PKT_TYPE_TXS:
1242 for (rxd += 2; rxd + 8 <= end; rxd += 8)
1243 mt7925_mac_add_txs(dev, rxd);
1244 dev_kfree_skb(skb);
1245 break;
1246 case PKT_TYPE_NORMAL_MCU:
1247 case PKT_TYPE_NORMAL:
1248 if (!mt7925_mac_fill_rx(dev, skb)) {
1249 mt76_rx(&dev->mt76, q, skb);
1250 return;
1251 }
1252 fallthrough;
1253 default:
1254 dev_kfree_skb(skb);
1255 break;
1256 }
1257 }
1258 EXPORT_SYMBOL_GPL(mt7925_queue_rx_skb);
1259
1260 static void
mt7925_vif_connect_iter(void * priv,u8 * mac,struct ieee80211_vif * vif)1261 mt7925_vif_connect_iter(void *priv, u8 *mac,
1262 struct ieee80211_vif *vif)
1263 {
1264 struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
1265 unsigned long valid = ieee80211_vif_is_mld(vif) ?
1266 mvif->valid_links : BIT(0);
1267 struct mt792x_dev *dev = mvif->phy->dev;
1268 struct ieee80211_hw *hw = mt76_hw(dev);
1269 struct ieee80211_bss_conf *bss_conf;
1270 struct mt792x_bss_conf *mconf;
1271 int i;
1272
1273 if (vif->type == NL80211_IFTYPE_STATION)
1274 ieee80211_disconnect(vif, true);
1275
1276 for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
1277 bss_conf = mt792x_vif_to_bss_conf(vif, i);
1278 mconf = mt792x_vif_to_link(mvif, i);
1279
1280 mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, &mconf->mt76,
1281 &mvif->sta.deflink.wcid, true);
1282 mt7925_mcu_set_tx(dev, bss_conf);
1283 }
1284
1285 if (vif->type == NL80211_IFTYPE_AP) {
1286 mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
1287 true, NULL);
1288 mt7925_mcu_sta_update(dev, NULL, vif,
1289 &mvif->sta.deflink, true,
1290 MT76_STA_INFO_STATE_NONE);
1291 mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, true);
1292 }
1293 }
1294
1295 /* system error recovery */
mt7925_mac_reset_work(struct work_struct * work)1296 void mt7925_mac_reset_work(struct work_struct *work)
1297 {
1298 struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1299 reset_work);
1300 struct ieee80211_hw *hw = mt76_hw(dev);
1301 struct mt76_connac_pm *pm = &dev->pm;
1302 int i, ret;
1303
1304 dev_dbg(dev->mt76.dev, "chip reset\n");
1305 dev->hw_full_reset = true;
1306 ieee80211_stop_queues(hw);
1307
1308 cancel_delayed_work_sync(&dev->mphy.mac_work);
1309 cancel_delayed_work_sync(&pm->ps_work);
1310 cancel_work_sync(&pm->wake_work);
1311
1312 for (i = 0; i < 10; i++) {
1313 mutex_lock(&dev->mt76.mutex);
1314 ret = mt792x_dev_reset(dev);
1315 mutex_unlock(&dev->mt76.mutex);
1316
1317 if (!ret)
1318 break;
1319 }
1320
1321 if (i == 10)
1322 dev_err(dev->mt76.dev, "chip reset failed\n");
1323
1324 if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) {
1325 struct cfg80211_scan_info info = {
1326 .aborted = true,
1327 };
1328
1329 ieee80211_scan_completed(dev->mphy.hw, &info);
1330 }
1331
1332 dev->hw_full_reset = false;
1333 pm->suspended = false;
1334 ieee80211_wake_queues(hw);
1335 ieee80211_iterate_active_interfaces(hw,
1336 IEEE80211_IFACE_ITER_RESUME_ALL,
1337 mt7925_vif_connect_iter, NULL);
1338 mt76_connac_power_save_sched(&dev->mt76.phy, pm);
1339
1340 mt7925_regd_change(&dev->phy, "00");
1341 }
1342
mt7925_coredump_work(struct work_struct * work)1343 void mt7925_coredump_work(struct work_struct *work)
1344 {
1345 struct mt792x_dev *dev;
1346 char *dump, *data;
1347
1348 dev = (struct mt792x_dev *)container_of(work, struct mt792x_dev,
1349 coredump.work.work);
1350
1351 if (time_is_after_jiffies(dev->coredump.last_activity +
1352 4 * MT76_CONNAC_COREDUMP_TIMEOUT)) {
1353 queue_delayed_work(dev->mt76.wq, &dev->coredump.work,
1354 MT76_CONNAC_COREDUMP_TIMEOUT);
1355 return;
1356 }
1357
1358 dump = vzalloc(MT76_CONNAC_COREDUMP_SZ);
1359 data = dump;
1360
1361 while (true) {
1362 struct sk_buff *skb;
1363
1364 spin_lock_bh(&dev->mt76.lock);
1365 skb = __skb_dequeue(&dev->coredump.msg_list);
1366 spin_unlock_bh(&dev->mt76.lock);
1367
1368 if (!skb)
1369 break;
1370
1371 skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 8);
1372 if (!dump || data + skb->len - dump > MT76_CONNAC_COREDUMP_SZ) {
1373 dev_kfree_skb(skb);
1374 continue;
1375 }
1376
1377 memcpy(data, skb->data, skb->len);
1378 data += skb->len;
1379
1380 dev_kfree_skb(skb);
1381 }
1382
1383 if (dump)
1384 dev_coredumpv(dev->mt76.dev, dump, MT76_CONNAC_COREDUMP_SZ,
1385 GFP_KERNEL);
1386
1387 mt792x_reset(&dev->mt76);
1388 }
1389
1390 /* usb_sdio */
1391 static void
mt7925_usb_sdio_write_txwi(struct mt792x_dev * dev,struct mt76_wcid * wcid,enum mt76_txq_id qid,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,int pid,struct sk_buff * skb)1392 mt7925_usb_sdio_write_txwi(struct mt792x_dev *dev, struct mt76_wcid *wcid,
1393 enum mt76_txq_id qid, struct ieee80211_sta *sta,
1394 struct ieee80211_key_conf *key, int pid,
1395 struct sk_buff *skb)
1396 {
1397 __le32 *txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
1398
1399 memset(txwi, 0, MT_SDIO_TXD_SIZE);
1400 mt7925_mac_write_txwi(&dev->mt76, txwi, skb, wcid, key, pid, qid, 0);
1401 skb_push(skb, MT_SDIO_TXD_SIZE);
1402 }
1403
mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev * mdev,void * txwi_ptr,enum mt76_txq_id qid,struct mt76_wcid * wcid,struct ieee80211_sta * sta,struct mt76_tx_info * tx_info)1404 int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
1405 enum mt76_txq_id qid, struct mt76_wcid *wcid,
1406 struct ieee80211_sta *sta,
1407 struct mt76_tx_info *tx_info)
1408 {
1409 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1410 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
1411 struct ieee80211_key_conf *key = info->control.hw_key;
1412 struct sk_buff *skb = tx_info->skb;
1413 int err, pad, pktid;
1414
1415 if (unlikely(tx_info->skb->len <= ETH_HLEN))
1416 return -EINVAL;
1417
1418 if (!wcid)
1419 wcid = &dev->mt76.global_wcid;
1420
1421 if (sta) {
1422 struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
1423
1424 if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
1425 info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
1426 msta->deflink.last_txs = jiffies;
1427 }
1428 }
1429
1430 pktid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
1431 mt7925_usb_sdio_write_txwi(dev, wcid, qid, sta, key, pktid, skb);
1432
1433 mt792x_skb_add_usb_sdio_hdr(dev, skb, 0);
1434 pad = round_up(skb->len, 4) - skb->len;
1435 if (mt76_is_usb(mdev))
1436 pad += 4;
1437
1438 err = mt76_skb_adjust_pad(skb, pad);
1439 if (err)
1440 /* Release pktid in case of error. */
1441 idr_remove(&wcid->pktid, pktid);
1442
1443 return err;
1444 }
1445 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_prepare_skb);
1446
mt7925_usb_sdio_tx_complete_skb(struct mt76_dev * mdev,struct mt76_queue_entry * e)1447 void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
1448 struct mt76_queue_entry *e)
1449 {
1450 __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
1451 unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
1452 struct ieee80211_sta *sta;
1453 struct mt76_wcid *wcid;
1454 u16 idx;
1455
1456 idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
1457 wcid = __mt76_wcid_ptr(mdev, idx);
1458 sta = wcid_to_sta(wcid);
1459
1460 if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
1461 mt7925_tx_check_aggr(sta, e->skb, wcid);
1462
1463 skb_pull(e->skb, headroom);
1464 mt76_tx_complete_skb(mdev, e->wcid, e->skb);
1465 }
1466 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_complete_skb);
1467
mt7925_usb_sdio_tx_status_data(struct mt76_dev * mdev,u8 * update)1468 bool mt7925_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
1469 {
1470 struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
1471
1472 mt792x_mutex_acquire(dev);
1473 mt7925_mac_sta_poll(dev);
1474 mt792x_mutex_release(dev);
1475
1476 return false;
1477 }
1478 EXPORT_SYMBOL_GPL(mt7925_usb_sdio_tx_status_data);
1479
1480 #if IS_ENABLED(CONFIG_IPV6)
mt7925_set_ipv6_ns_work(struct work_struct * work)1481 void mt7925_set_ipv6_ns_work(struct work_struct *work)
1482 {
1483 struct mt792x_dev *dev = container_of(work, struct mt792x_dev,
1484 ipv6_ns_work);
1485 struct sk_buff *skb;
1486 int ret = 0;
1487
1488 do {
1489 skb = skb_dequeue(&dev->ipv6_ns_list);
1490
1491 if (!skb)
1492 break;
1493
1494 mt792x_mutex_acquire(dev);
1495 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
1496 MCU_UNI_CMD(OFFLOAD), true);
1497 mt792x_mutex_release(dev);
1498
1499 } while (!ret);
1500
1501 if (ret)
1502 skb_queue_purge(&dev->ipv6_ns_list);
1503 }
1504 #endif
1505