1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (C) 2022 MediaTek Inc. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/timekeeping.h> 8 #include "coredump.h" 9 #include "mt7996.h" 10 #include "../dma.h" 11 #include "mac.h" 12 #include "mcu.h" 13 #if defined(__FreeBSD__) 14 #include <linux/cache.h> 15 #include <linux/delay.h> 16 #endif 17 18 #define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2) 19 20 static const struct mt7996_dfs_radar_spec etsi_radar_specs = { 21 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 22 .radar_pattern = { 23 [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 }, 24 [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 }, 25 [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 }, 26 [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 }, 27 [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 }, 28 [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 }, 29 [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 }, 30 [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 }, 31 }, 32 }; 33 34 static const struct mt7996_dfs_radar_spec fcc_radar_specs = { 35 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 36 .radar_pattern = { 37 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 38 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 39 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 40 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 41 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 42 }, 43 }; 44 45 static const struct mt7996_dfs_radar_spec jp_radar_specs = { 46 .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 }, 47 .radar_pattern = { 48 [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 }, 49 [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 }, 50 [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 }, 51 [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 }, 52 [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 }, 53 [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 }, 54 [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 }, 55 [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 }, 56 }, 57 }; 58 59 static struct mt76_wcid *mt7996_rx_get_wcid(struct mt7996_dev *dev, 60 u16 idx, u8 band_idx) 61 { 62 struct mt7996_sta_link *msta_link; 63 struct mt7996_sta *msta; 64 struct mt7996_vif *mvif; 65 struct mt76_wcid *wcid; 66 int i; 67 68 wcid = mt76_wcid_ptr(dev, idx); 69 if (!wcid || !wcid->sta) 70 return NULL; 71 72 if (!mt7996_band_valid(dev, band_idx)) 73 return NULL; 74 75 if (wcid->phy_idx == band_idx) 76 return wcid; 77 78 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 79 msta = msta_link->sta; 80 if (!msta || !msta->vif) 81 return NULL; 82 83 mvif = msta->vif; 84 for (i = 0; i < ARRAY_SIZE(mvif->mt76.link); i++) { 85 struct mt76_vif_link *mlink; 86 87 mlink = rcu_dereference(mvif->mt76.link[i]); 88 if (!mlink) 89 continue; 90 91 if (mlink->band_idx != band_idx) 92 continue; 93 94 msta_link = rcu_dereference(msta->link[i]); 95 break; 96 } 97 98 return &msta_link->wcid; 99 } 100 101 bool mt7996_mac_wtbl_update(struct mt7996_dev *dev, int idx, u32 mask) 102 { 103 mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 104 FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 105 106 return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 107 0, 5000); 108 } 109 110 u32 mt7996_mac_wtbl_lmac_addr(struct mt7996_dev *dev, u16 wcid, u8 dw) 111 { 112 mt76_wr(dev, MT_WTBLON_TOP_WDUCR, 113 FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7))); 114 115 return MT_WTBL_LMAC_OFFS(wcid, dw); 116 } 117 118 static void mt7996_mac_sta_poll(struct mt7996_dev *dev) 119 { 120 static const u8 ac_to_tid[] = { 121 [IEEE80211_AC_BE] = 0, 122 [IEEE80211_AC_BK] = 1, 123 [IEEE80211_AC_VI] = 4, 124 [IEEE80211_AC_VO] = 6 125 }; 126 struct mt7996_sta_link *msta_link; 127 struct mt76_vif_link *mlink; 128 struct ieee80211_sta *sta; 129 struct mt7996_sta *msta; 130 u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; 131 LIST_HEAD(sta_poll_list); 132 struct mt76_wcid *wcid; 133 int i; 134 135 spin_lock_bh(&dev->mt76.sta_poll_lock); 136 list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list); 137 spin_unlock_bh(&dev->mt76.sta_poll_lock); 138 139 rcu_read_lock(); 140 141 while (true) { 142 bool clear = false; 143 u32 addr, val; 144 u16 idx; 145 s8 rssi[4]; 146 147 spin_lock_bh(&dev->mt76.sta_poll_lock); 148 if (list_empty(&sta_poll_list)) { 149 spin_unlock_bh(&dev->mt76.sta_poll_lock); 150 break; 151 } 152 msta_link = list_first_entry(&sta_poll_list, 153 struct mt7996_sta_link, 154 wcid.poll_list); 155 msta = msta_link->sta; 156 wcid = &msta_link->wcid; 157 list_del_init(&wcid->poll_list); 158 spin_unlock_bh(&dev->mt76.sta_poll_lock); 159 160 idx = wcid->idx; 161 162 /* refresh peer's airtime reporting */ 163 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 20); 164 165 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 166 u32 tx_last = msta_link->airtime_ac[i]; 167 u32 rx_last = msta_link->airtime_ac[i + 4]; 168 169 msta_link->airtime_ac[i] = mt76_rr(dev, addr); 170 msta_link->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); 171 172 tx_time[i] = msta_link->airtime_ac[i] - tx_last; 173 rx_time[i] = msta_link->airtime_ac[i + 4] - rx_last; 174 175 if ((tx_last | rx_last) & BIT(30)) 176 clear = true; 177 178 addr += 8; 179 } 180 181 if (clear) { 182 mt7996_mac_wtbl_update(dev, idx, 183 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 184 memset(msta_link->airtime_ac, 0, 185 sizeof(msta_link->airtime_ac)); 186 } 187 188 if (!wcid->sta) 189 continue; 190 191 sta = container_of((void *)msta, struct ieee80211_sta, 192 drv_priv); 193 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 194 u8 q = mt76_connac_lmac_mapping(i); 195 u32 tx_cur = tx_time[q]; 196 u32 rx_cur = rx_time[q]; 197 u8 tid = ac_to_tid[i]; 198 199 if (!tx_cur && !rx_cur) 200 continue; 201 202 ieee80211_sta_register_airtime(sta, tid, tx_cur, rx_cur); 203 } 204 205 /* get signal strength of resp frames (CTS/BA/ACK) */ 206 addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 34); 207 val = mt76_rr(dev, addr); 208 209 rssi[0] = to_rssi(GENMASK(7, 0), val); 210 rssi[1] = to_rssi(GENMASK(15, 8), val); 211 rssi[2] = to_rssi(GENMASK(23, 16), val); 212 rssi[3] = to_rssi(GENMASK(31, 14), val); 213 214 mlink = rcu_dereference(msta->vif->mt76.link[wcid->link_id]); 215 if (mlink) { 216 struct mt76_phy *mphy = mt76_vif_link_phy(mlink); 217 218 if (mphy) 219 msta_link->ack_signal = 220 mt76_rx_signal(mphy->antenna_mask, 221 rssi); 222 } 223 224 ewma_avg_signal_add(&msta_link->avg_ack_signal, 225 -msta_link->ack_signal); 226 } 227 228 rcu_read_unlock(); 229 } 230 231 /* The HW does not translate the mac header to 802.3 for mesh point */ 232 static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap) 233 { 234 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 235 struct ethhdr *eth_hdr = (struct ethhdr *)(skb->data + hdr_gap); 236 struct mt7996_sta_link *msta_link = (void *)status->wcid; 237 struct mt7996_sta *msta = msta_link->sta; 238 struct ieee80211_bss_conf *link_conf; 239 __le32 *rxd = (__le32 *)skb->data; 240 struct ieee80211_sta *sta; 241 struct ieee80211_vif *vif; 242 struct ieee80211_hdr hdr; 243 u16 frame_control; 244 245 if (le32_get_bits(rxd[3], MT_RXD3_NORMAL_ADDR_TYPE) != 246 MT_RXD3_NORMAL_U2M) 247 return -EINVAL; 248 249 if (!(le32_to_cpu(rxd[1]) & MT_RXD1_NORMAL_GROUP_4)) 250 return -EINVAL; 251 252 if (!msta || !msta->vif) 253 return -EINVAL; 254 255 sta = wcid_to_sta(status->wcid); 256 vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); 257 link_conf = rcu_dereference(vif->link_conf[msta_link->wcid.link_id]); 258 if (!link_conf) 259 return -EINVAL; 260 261 /* store the info from RXD and ethhdr to avoid being overridden */ 262 frame_control = le32_get_bits(rxd[8], MT_RXD8_FRAME_CONTROL); 263 hdr.frame_control = cpu_to_le16(frame_control); 264 hdr.seq_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_SEQ_CTRL)); 265 hdr.duration_id = 0; 266 267 ether_addr_copy(hdr.addr1, vif->addr); 268 ether_addr_copy(hdr.addr2, sta->addr); 269 switch (frame_control & (IEEE80211_FCTL_TODS | 270 IEEE80211_FCTL_FROMDS)) { 271 case 0: 272 ether_addr_copy(hdr.addr3, link_conf->bssid); 273 break; 274 case IEEE80211_FCTL_FROMDS: 275 ether_addr_copy(hdr.addr3, eth_hdr->h_source); 276 break; 277 case IEEE80211_FCTL_TODS: 278 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 279 break; 280 case IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS: 281 ether_addr_copy(hdr.addr3, eth_hdr->h_dest); 282 ether_addr_copy(hdr.addr4, eth_hdr->h_source); 283 break; 284 default: 285 return -EINVAL; 286 } 287 288 skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2); 289 if (eth_hdr->h_proto == cpu_to_be16(ETH_P_AARP) || 290 eth_hdr->h_proto == cpu_to_be16(ETH_P_IPX)) 291 ether_addr_copy(skb_push(skb, ETH_ALEN), bridge_tunnel_header); 292 else if (be16_to_cpu(eth_hdr->h_proto) >= ETH_P_802_3_MIN) 293 ether_addr_copy(skb_push(skb, ETH_ALEN), rfc1042_header); 294 else 295 skb_pull(skb, 2); 296 297 if (ieee80211_has_order(hdr.frame_control)) 298 memcpy(skb_push(skb, IEEE80211_HT_CTL_LEN), &rxd[11], 299 IEEE80211_HT_CTL_LEN); 300 if (ieee80211_is_data_qos(hdr.frame_control)) { 301 __le16 qos_ctrl; 302 303 qos_ctrl = cpu_to_le16(le32_get_bits(rxd[10], MT_RXD10_QOS_CTL)); 304 memcpy(skb_push(skb, IEEE80211_QOS_CTL_LEN), &qos_ctrl, 305 IEEE80211_QOS_CTL_LEN); 306 } 307 308 if (ieee80211_has_a4(hdr.frame_control)) 309 memcpy(skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr)); 310 else 311 memcpy(skb_push(skb, sizeof(hdr) - 6), &hdr, sizeof(hdr) - 6); 312 313 return 0; 314 } 315 316 static int 317 mt7996_mac_fill_rx_rate(struct mt7996_dev *dev, 318 struct mt76_rx_status *status, 319 struct ieee80211_supported_band *sband, 320 __le32 *rxv, u8 *mode) 321 { 322 u32 v0, v2; 323 u8 stbc, gi, bw, dcm, nss; 324 int i, idx; 325 bool cck = false; 326 327 v0 = le32_to_cpu(rxv[0]); 328 v2 = le32_to_cpu(rxv[2]); 329 330 idx = FIELD_GET(MT_PRXV_TX_RATE, v0); 331 i = idx; 332 nss = FIELD_GET(MT_PRXV_NSTS, v0) + 1; 333 334 stbc = FIELD_GET(MT_PRXV_HT_STBC, v2); 335 gi = FIELD_GET(MT_PRXV_HT_SHORT_GI, v2); 336 *mode = FIELD_GET(MT_PRXV_TX_MODE, v2); 337 dcm = FIELD_GET(MT_PRXV_DCM, v2); 338 bw = FIELD_GET(MT_PRXV_FRAME_MODE, v2); 339 340 switch (*mode) { 341 case MT_PHY_TYPE_CCK: 342 cck = true; 343 fallthrough; 344 case MT_PHY_TYPE_OFDM: 345 i = mt76_get_rate(&dev->mt76, sband, i, cck); 346 break; 347 case MT_PHY_TYPE_HT_GF: 348 case MT_PHY_TYPE_HT: 349 status->encoding = RX_ENC_HT; 350 if (gi) 351 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 352 if (i > 31) 353 return -EINVAL; 354 break; 355 case MT_PHY_TYPE_VHT: 356 status->nss = nss; 357 status->encoding = RX_ENC_VHT; 358 if (gi) 359 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 360 if (i > 11) 361 return -EINVAL; 362 break; 363 case MT_PHY_TYPE_HE_MU: 364 case MT_PHY_TYPE_HE_SU: 365 case MT_PHY_TYPE_HE_EXT_SU: 366 case MT_PHY_TYPE_HE_TB: 367 status->nss = nss; 368 status->encoding = RX_ENC_HE; 369 i &= GENMASK(3, 0); 370 371 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 372 status->he_gi = gi; 373 374 status->he_dcm = dcm; 375 break; 376 case MT_PHY_TYPE_EHT_SU: 377 case MT_PHY_TYPE_EHT_TRIG: 378 case MT_PHY_TYPE_EHT_MU: 379 status->nss = nss; 380 status->encoding = RX_ENC_EHT; 381 i &= GENMASK(3, 0); 382 383 if (gi <= NL80211_RATE_INFO_EHT_GI_3_2) 384 status->eht.gi = gi; 385 break; 386 default: 387 return -EINVAL; 388 } 389 status->rate_idx = i; 390 391 switch (bw) { 392 case IEEE80211_STA_RX_BW_20: 393 break; 394 case IEEE80211_STA_RX_BW_40: 395 if (*mode & MT_PHY_TYPE_HE_EXT_SU && 396 (idx & MT_PRXV_TX_ER_SU_106T)) { 397 status->bw = RATE_INFO_BW_HE_RU; 398 status->he_ru = 399 NL80211_RATE_INFO_HE_RU_ALLOC_106; 400 } else { 401 status->bw = RATE_INFO_BW_40; 402 } 403 break; 404 case IEEE80211_STA_RX_BW_80: 405 status->bw = RATE_INFO_BW_80; 406 break; 407 case IEEE80211_STA_RX_BW_160: 408 status->bw = RATE_INFO_BW_160; 409 break; 410 /* rxv reports bw 320-1 and 320-2 separately */ 411 case IEEE80211_STA_RX_BW_320: 412 case IEEE80211_STA_RX_BW_320 + 1: 413 status->bw = RATE_INFO_BW_320; 414 break; 415 default: 416 return -EINVAL; 417 } 418 419 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 420 if (*mode < MT_PHY_TYPE_HE_SU && gi) 421 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 422 423 return 0; 424 } 425 426 static void 427 mt7996_wed_check_ppe(struct mt7996_dev *dev, struct mt76_queue *q, 428 struct mt7996_sta *msta, struct sk_buff *skb, 429 u32 info) 430 { 431 struct ieee80211_vif *vif; 432 struct wireless_dev *wdev; 433 434 if (!msta || !msta->vif) 435 return; 436 437 if (!mt76_queue_is_wed_rx(q)) 438 return; 439 440 if (!(info & MT_DMA_INFO_PPE_VLD)) 441 return; 442 443 vif = container_of((void *)msta->vif, struct ieee80211_vif, 444 drv_priv); 445 wdev = ieee80211_vif_to_wdev(vif); 446 skb->dev = wdev->netdev; 447 448 mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb, 449 FIELD_GET(MT_DMA_PPE_CPU_REASON, info), 450 FIELD_GET(MT_DMA_PPE_ENTRY, info)); 451 } 452 453 static int 454 mt7996_mac_fill_rx(struct mt7996_dev *dev, enum mt76_rxq_id q, 455 struct sk_buff *skb, u32 *info) 456 { 457 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 458 struct mt76_phy *mphy = &dev->mt76.phy; 459 struct mt7996_phy *phy = &dev->phy; 460 struct ieee80211_supported_band *sband; 461 __le32 *rxd = (__le32 *)skb->data; 462 __le32 *rxv = NULL; 463 u32 rxd0 = le32_to_cpu(rxd[0]); 464 u32 rxd1 = le32_to_cpu(rxd[1]); 465 u32 rxd2 = le32_to_cpu(rxd[2]); 466 u32 rxd3 = le32_to_cpu(rxd[3]); 467 u32 rxd4 = le32_to_cpu(rxd[4]); 468 u32 csum_mask = MT_RXD3_NORMAL_IP_SUM | MT_RXD3_NORMAL_UDP_TCP_SUM; 469 u32 csum_status = *(u32 *)skb->cb; 470 u32 mesh_mask = MT_RXD0_MESH | MT_RXD0_MHCP; 471 bool is_mesh = (rxd0 & mesh_mask) == mesh_mask; 472 bool unicast, insert_ccmp_hdr = false; 473 u8 remove_pad, amsdu_info, band_idx; 474 u8 mode = 0, qos_ctl = 0; 475 bool hdr_trans; 476 u16 hdr_gap; 477 u16 seq_ctrl = 0; 478 __le16 fc = 0; 479 int idx; 480 u8 hw_aggr = false; 481 struct mt7996_sta *msta = NULL; 482 483 hw_aggr = status->aggr; 484 memset(status, 0, sizeof(*status)); 485 486 band_idx = FIELD_GET(MT_RXD1_NORMAL_BAND_IDX, rxd1); 487 mphy = dev->mt76.phys[band_idx]; 488 phy = mphy->priv; 489 status->phy_idx = mphy->band_idx; 490 491 if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 492 return -EINVAL; 493 494 if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR) 495 return -EINVAL; 496 497 hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS; 498 if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM)) 499 return -EINVAL; 500 501 /* ICV error or CCMP/BIP/WPI MIC error */ 502 if (rxd1 & MT_RXD1_NORMAL_ICV_ERR) 503 status->flag |= RX_FLAG_ONLY_MONITOR; 504 505 unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M; 506 idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1); 507 status->wcid = mt7996_rx_get_wcid(dev, idx, band_idx); 508 509 if (status->wcid) { 510 struct mt7996_sta_link *msta_link; 511 512 msta_link = container_of(status->wcid, struct mt7996_sta_link, 513 wcid); 514 msta = msta_link->sta; 515 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 516 } 517 518 status->freq = mphy->chandef.chan->center_freq; 519 status->band = mphy->chandef.chan->band; 520 if (status->band == NL80211_BAND_5GHZ) 521 sband = &mphy->sband_5g.sband; 522 else if (status->band == NL80211_BAND_6GHZ) 523 sband = &mphy->sband_6g.sband; 524 else 525 sband = &mphy->sband_2g.sband; 526 527 if (!sband->channels) 528 return -EINVAL; 529 530 if ((rxd3 & csum_mask) == csum_mask && 531 !(csum_status & (BIT(0) | BIT(2) | BIT(3)))) 532 skb->ip_summed = CHECKSUM_UNNECESSARY; 533 534 if (rxd1 & MT_RXD3_NORMAL_FCS_ERR) 535 status->flag |= RX_FLAG_FAILED_FCS_CRC; 536 537 if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR) 538 status->flag |= RX_FLAG_MMIC_ERROR; 539 540 if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 541 !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) { 542 status->flag |= RX_FLAG_DECRYPTED; 543 status->flag |= RX_FLAG_IV_STRIPPED; 544 status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 545 } 546 547 remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2); 548 549 if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 550 return -EINVAL; 551 552 rxd += 8; 553 if (rxd1 & MT_RXD1_NORMAL_GROUP_4) { 554 u32 v0 = le32_to_cpu(rxd[0]); 555 u32 v2 = le32_to_cpu(rxd[2]); 556 557 fc = cpu_to_le16(FIELD_GET(MT_RXD8_FRAME_CONTROL, v0)); 558 qos_ctl = FIELD_GET(MT_RXD10_QOS_CTL, v2); 559 seq_ctrl = FIELD_GET(MT_RXD10_SEQ_CTRL, v2); 560 561 rxd += 4; 562 if ((u8 *)rxd - skb->data >= skb->len) 563 return -EINVAL; 564 } 565 566 if (rxd1 & MT_RXD1_NORMAL_GROUP_1) { 567 u8 *data = (u8 *)rxd; 568 569 if (status->flag & RX_FLAG_DECRYPTED) { 570 switch (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2)) { 571 case MT_CIPHER_AES_CCMP: 572 case MT_CIPHER_CCMP_CCX: 573 case MT_CIPHER_CCMP_256: 574 insert_ccmp_hdr = 575 FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 576 fallthrough; 577 case MT_CIPHER_TKIP: 578 case MT_CIPHER_TKIP_NO_MIC: 579 case MT_CIPHER_GCMP: 580 case MT_CIPHER_GCMP_256: 581 status->iv[0] = data[5]; 582 status->iv[1] = data[4]; 583 status->iv[2] = data[3]; 584 status->iv[3] = data[2]; 585 status->iv[4] = data[1]; 586 status->iv[5] = data[0]; 587 break; 588 default: 589 break; 590 } 591 } 592 rxd += 4; 593 if ((u8 *)rxd - skb->data >= skb->len) 594 return -EINVAL; 595 } 596 597 if (rxd1 & MT_RXD1_NORMAL_GROUP_2) { 598 status->timestamp = le32_to_cpu(rxd[0]); 599 status->flag |= RX_FLAG_MACTIME_START; 600 601 if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) { 602 status->flag |= RX_FLAG_AMPDU_DETAILS; 603 604 /* all subframes of an A-MPDU have the same timestamp */ 605 if (phy->rx_ampdu_ts != status->timestamp) { 606 if (!++phy->ampdu_ref) 607 phy->ampdu_ref++; 608 } 609 phy->rx_ampdu_ts = status->timestamp; 610 611 status->ampdu_ref = phy->ampdu_ref; 612 } 613 614 rxd += 4; 615 if ((u8 *)rxd - skb->data >= skb->len) 616 return -EINVAL; 617 } 618 619 /* RXD Group 3 - P-RXV */ 620 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 621 u32 v3; 622 int ret; 623 624 rxv = rxd; 625 rxd += 4; 626 if ((u8 *)rxd - skb->data >= skb->len) 627 return -EINVAL; 628 629 v3 = le32_to_cpu(rxv[3]); 630 631 status->chains = mphy->antenna_mask; 632 status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v3); 633 status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v3); 634 status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v3); 635 status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v3); 636 637 /* RXD Group 5 - C-RXV */ 638 if (rxd1 & MT_RXD1_NORMAL_GROUP_5) { 639 rxd += 24; 640 if ((u8 *)rxd - skb->data >= skb->len) 641 return -EINVAL; 642 } 643 644 ret = mt7996_mac_fill_rx_rate(dev, status, sband, rxv, &mode); 645 if (ret < 0) 646 return ret; 647 } 648 649 amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4); 650 status->amsdu = !!amsdu_info; 651 if (status->amsdu) { 652 status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME; 653 status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME; 654 } 655 656 /* IEEE 802.11 fragmentation can only be applied to unicast frames. 657 * Hence, drop fragments with multicast/broadcast RA. 658 * This check fixes vulnerabilities, like CVE-2020-26145. 659 */ 660 if ((ieee80211_has_morefrags(fc) || seq_ctrl & IEEE80211_SCTL_FRAG) && 661 FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) != MT_RXD3_NORMAL_U2M) 662 return -EINVAL; 663 664 hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad; 665 if (hdr_trans && ieee80211_has_morefrags(fc)) { 666 if (mt7996_reverse_frag0_hdr_trans(skb, hdr_gap)) 667 return -EINVAL; 668 hdr_trans = false; 669 } else { 670 int pad_start = 0; 671 672 skb_pull(skb, hdr_gap); 673 if (!hdr_trans && status->amsdu && !(ieee80211_has_a4(fc) && is_mesh)) { 674 pad_start = ieee80211_get_hdrlen_from_skb(skb); 675 } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) { 676 /* When header translation failure is indicated, 677 * the hardware will insert an extra 2-byte field 678 * containing the data length after the protocol 679 * type field. This happens either when the LLC-SNAP 680 * pattern did not match, or if a VLAN header was 681 * detected. 682 */ 683 pad_start = 12; 684 if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q) 685 pad_start += 4; 686 else 687 pad_start = 0; 688 } 689 690 if (pad_start) { 691 memmove(skb->data + 2, skb->data, pad_start); 692 skb_pull(skb, 2); 693 } 694 } 695 696 if (!hdr_trans) { 697 struct ieee80211_hdr *hdr; 698 699 if (insert_ccmp_hdr) { 700 u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 701 702 mt76_insert_ccmp_hdr(skb, key_id); 703 } 704 705 hdr = mt76_skb_get_hdr(skb); 706 fc = hdr->frame_control; 707 if (ieee80211_is_data_qos(fc)) { 708 u8 *qos = ieee80211_get_qos_ctl(hdr); 709 710 seq_ctrl = le16_to_cpu(hdr->seq_ctrl); 711 qos_ctl = *qos; 712 713 /* Mesh DA/SA/Length will be stripped after hardware 714 * de-amsdu, so here needs to clear amsdu present bit 715 * to mark it as a normal mesh frame. 716 */ 717 if (ieee80211_has_a4(fc) && is_mesh && status->amsdu) 718 *qos &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; 719 } 720 skb_set_mac_header(skb, (unsigned char *)hdr - skb->data); 721 } else { 722 status->flag |= RX_FLAG_8023; 723 mt7996_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb, 724 *info); 725 mt76_npu_check_ppe(&dev->mt76, skb, *info); 726 } 727 728 if (rxv && !(status->flag & RX_FLAG_8023)) { 729 switch (status->encoding) { 730 case RX_ENC_EHT: 731 mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode); 732 break; 733 case RX_ENC_HE: 734 mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode); 735 break; 736 default: 737 break; 738 } 739 } 740 741 if (!status->wcid || !ieee80211_is_data_qos(fc) || hw_aggr) 742 return 0; 743 744 status->aggr = unicast && 745 !ieee80211_is_qos_nullfunc(fc); 746 status->qos_ctl = qos_ctl; 747 status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl); 748 749 return 0; 750 } 751 752 static void 753 mt7996_mac_write_txwi_8023(struct mt7996_dev *dev, __le32 *txwi, 754 struct sk_buff *skb, struct mt76_wcid *wcid) 755 { 756 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 757 u8 fc_type, fc_stype; 758 u16 ethertype; 759 bool wmm = false; 760 u32 val; 761 762 if (wcid->sta) { 763 struct ieee80211_sta *sta = wcid_to_sta(wcid); 764 765 wmm = sta->wme; 766 } 767 768 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3) | 769 FIELD_PREP(MT_TXD1_TID, tid); 770 771 ethertype = get_unaligned_be16(&skb->data[12]); 772 if (ethertype >= ETH_P_802_3_MIN) 773 val |= MT_TXD1_ETH_802_3; 774 775 txwi[1] |= cpu_to_le32(val); 776 777 fc_type = IEEE80211_FTYPE_DATA >> 2; 778 fc_stype = wmm ? IEEE80211_STYPE_QOS_DATA >> 4 : 0; 779 780 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 781 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 782 783 txwi[2] |= cpu_to_le32(val); 784 785 if (wcid->amsdu) 786 txwi[3] |= cpu_to_le32(MT_TXD3_HW_AMSDU); 787 } 788 789 static void 790 mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi, 791 struct sk_buff *skb, 792 struct ieee80211_key_conf *key, 793 struct mt76_wcid *wcid) 794 { 795 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 796 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 797 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 798 bool multicast = is_multicast_ether_addr(hdr->addr1); 799 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 800 __le16 fc = hdr->frame_control, sc = hdr->seq_ctrl; 801 u16 seqno = le16_to_cpu(sc); 802 bool hw_bigtk = false; 803 u8 fc_type, fc_stype; 804 u32 val; 805 806 if (ieee80211_is_action(fc) && 807 mgmt->u.action.category == WLAN_CATEGORY_BACK && 808 mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) { 809 if (is_mt7990(&dev->mt76)) 810 txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TID_ADDBA, tid)); 811 else 812 txwi[7] |= cpu_to_le32(MT_TXD7_MAC_TXD); 813 814 tid = MT_TX_ADDBA; 815 } else if (ieee80211_is_mgmt(hdr->frame_control)) { 816 tid = MT_TX_NORMAL; 817 } 818 819 val = FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 820 FIELD_PREP(MT_TXD1_HDR_INFO, 821 ieee80211_get_hdrlen_from_skb(skb) / 2) | 822 FIELD_PREP(MT_TXD1_TID, tid); 823 824 if (!ieee80211_is_data(fc) || multicast || 825 info->flags & IEEE80211_TX_CTL_USE_MINRATE) 826 val |= MT_TXD1_FIXED_RATE; 827 828 if (is_mt7990(&dev->mt76) && ieee80211_is_beacon(fc) && 829 (wcid->hw_key_idx2 == 6 || wcid->hw_key_idx2 == 7)) 830 hw_bigtk = true; 831 832 if ((key && multicast && ieee80211_is_robust_mgmt_frame(skb)) || hw_bigtk) { 833 val |= MT_TXD1_BIP; 834 txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME); 835 } 836 837 txwi[1] |= cpu_to_le32(val); 838 839 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 840 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 841 842 val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) | 843 FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype); 844 845 if (ieee80211_has_morefrags(fc) && ieee80211_is_first_frag(sc)) 846 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_FIRST); 847 else if (ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 848 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_MID); 849 else if (!ieee80211_has_morefrags(fc) && !ieee80211_is_first_frag(sc)) 850 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_LAST); 851 else 852 val |= FIELD_PREP(MT_TXD2_FRAG, MT_TX_FRAG_NONE); 853 854 txwi[2] |= cpu_to_le32(val); 855 856 txwi[3] |= cpu_to_le32(FIELD_PREP(MT_TXD3_BCM, multicast)); 857 if (ieee80211_is_beacon(fc)) { 858 txwi[3] &= ~cpu_to_le32(MT_TXD3_SW_POWER_MGMT); 859 txwi[3] |= cpu_to_le32(MT_TXD3_REM_TX_COUNT); 860 } 861 862 if (multicast && ieee80211_vif_is_mld(info->control.vif)) { 863 val = MT_TXD3_SN_VALID | 864 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 865 txwi[3] |= cpu_to_le32(val); 866 } 867 868 if (info->flags & IEEE80211_TX_CTL_INJECTED) { 869 if (ieee80211_is_back_req(hdr->frame_control)) { 870 struct ieee80211_bar *bar; 871 872 bar = (struct ieee80211_bar *)skb->data; 873 seqno = le16_to_cpu(bar->start_seq_num); 874 } 875 876 val = MT_TXD3_SN_VALID | 877 FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 878 txwi[3] |= cpu_to_le32(val); 879 txwi[3] &= ~cpu_to_le32(MT_TXD3_HW_AMSDU); 880 } 881 882 if (ieee80211_vif_is_mld(info->control.vif) && 883 (multicast || unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))) 884 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 885 886 if (ieee80211_is_nullfunc(fc) && ieee80211_has_a4(fc) && 887 ieee80211_vif_is_mld(info->control.vif)) { 888 txwi[5] |= cpu_to_le32(MT_TXD5_FL); 889 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 890 } 891 892 if (!wcid->sta && ieee80211_is_mgmt(fc)) 893 txwi[6] |= cpu_to_le32(MT_TXD6_DIS_MAT); 894 } 895 896 void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi, 897 struct sk_buff *skb, struct mt76_wcid *wcid, 898 struct ieee80211_key_conf *key, int pid, 899 enum mt76_txq_id qid, u32 changed) 900 { 901 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 902 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 903 struct ieee80211_vif *vif = info->control.vif; 904 u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2; 905 u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 906 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 907 struct mt76_vif_link *mlink = NULL; 908 struct mt7996_vif *mvif; 909 unsigned int link_id; 910 u16 tx_count = 15; 911 u32 val; 912 bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | 913 BSS_CHANGED_FILS_DISCOVERY)); 914 bool beacon = !!(changed & (BSS_CHANGED_BEACON | 915 BSS_CHANGED_BEACON_ENABLED)) && (!inband_disc); 916 917 if (wcid != &dev->mt76.global_wcid) 918 link_id = wcid->link_id; 919 else 920 link_id = u32_get_bits(info->control.flags, 921 IEEE80211_TX_CTRL_MLO_LINK); 922 923 mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 924 if (mvif) { 925 if (wcid->offchannel) 926 mlink = rcu_dereference(mvif->mt76.offchannel_link); 927 if (!mlink) 928 mlink = rcu_dereference(mvif->mt76.link[link_id]); 929 } 930 931 if (mlink) { 932 omac_idx = mlink->omac_idx; 933 wmm_idx = mlink->wmm_idx; 934 band_idx = mlink->band_idx; 935 } 936 937 if (inband_disc) { 938 p_fmt = MT_TX_TYPE_FW; 939 q_idx = MT_LMAC_ALTX0; 940 } else if (beacon) { 941 p_fmt = MT_TX_TYPE_FW; 942 q_idx = MT_LMAC_BCN0; 943 } else if (qid >= MT_TXQ_PSD) { 944 p_fmt = MT_TX_TYPE_CT; 945 q_idx = MT_LMAC_ALTX0; 946 } else { 947 p_fmt = MT_TX_TYPE_CT; 948 q_idx = wmm_idx * MT7996_MAX_WMM_SETS + 949 mt76_connac_lmac_mapping(skb_get_queue_mapping(skb)); 950 } 951 952 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 953 FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) | 954 FIELD_PREP(MT_TXD0_Q_IDX, q_idx); 955 txwi[0] = cpu_to_le32(val); 956 957 val = FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) | 958 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 959 960 if (band_idx) 961 val |= FIELD_PREP(MT_TXD1_TGID, band_idx); 962 963 txwi[1] = cpu_to_le32(val); 964 txwi[2] = 0; 965 966 val = MT_TXD3_SW_POWER_MGMT | 967 FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 968 if (key) 969 val |= MT_TXD3_PROTECT_FRAME; 970 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 971 val |= MT_TXD3_NO_ACK; 972 973 txwi[3] = cpu_to_le32(val); 974 txwi[4] = 0; 975 976 val = FIELD_PREP(MT_TXD5_PID, pid); 977 if (pid >= MT_PACKET_ID_FIRST) 978 val |= MT_TXD5_TX_STATUS_HOST; 979 txwi[5] = cpu_to_le32(val); 980 981 val = MT_TXD6_DAS | MT_TXD6_VTA; 982 if ((q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) || 983 skb->protocol == cpu_to_be16(ETH_P_PAE)) 984 val |= MT_TXD6_DIS_MAT; 985 986 if (is_mt7996(&dev->mt76)) 987 val |= FIELD_PREP(MT_TXD6_MSDU_CNT, 1); 988 else if (is_8023 || !ieee80211_is_mgmt(hdr->frame_control)) 989 val |= FIELD_PREP(MT_TXD6_MSDU_CNT_V2, 1); 990 991 txwi[6] = cpu_to_le32(val); 992 txwi[7] = 0; 993 994 if (is_8023) 995 mt7996_mac_write_txwi_8023(dev, txwi, skb, wcid); 996 else 997 mt7996_mac_write_txwi_80211(dev, txwi, skb, key, wcid); 998 999 if (txwi[1] & cpu_to_le32(MT_TXD1_FIXED_RATE)) { 1000 bool mcast = ieee80211_is_data(hdr->frame_control) && 1001 is_multicast_ether_addr(hdr->addr1); 1002 u8 idx = MT7996_BASIC_RATES_TBL; 1003 1004 if (mlink) { 1005 if (mcast && mlink->mcast_rates_idx) 1006 idx = mlink->mcast_rates_idx; 1007 else if (beacon && mlink->beacon_rates_idx) 1008 idx = mlink->beacon_rates_idx; 1009 else 1010 idx = mlink->basic_rates_idx; 1011 } 1012 1013 val = FIELD_PREP(MT_TXD6_TX_RATE, idx) | MT_TXD6_FIXED_BW; 1014 if (mcast) 1015 val |= MT_TXD6_DIS_MAT; 1016 txwi[6] |= cpu_to_le32(val); 1017 txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE); 1018 } 1019 } 1020 1021 static bool 1022 mt7996_tx_use_mgmt(struct mt7996_dev *dev, struct sk_buff *skb) 1023 { 1024 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1025 1026 if (ieee80211_is_mgmt(hdr->frame_control)) 1027 return true; 1028 1029 /* for SDO to bypass specific data frame */ 1030 if (!mt7996_has_wa(dev)) { 1031 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) 1032 return true; 1033 1034 if (ieee80211_has_a4(hdr->frame_control) && 1035 !ieee80211_is_data_present(hdr->frame_control)) 1036 return true; 1037 } 1038 1039 return false; 1040 } 1041 1042 int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 1043 enum mt76_txq_id qid, struct mt76_wcid *wcid, 1044 struct ieee80211_sta *sta, 1045 struct mt76_tx_info *tx_info) 1046 { 1047 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1048 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1049 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 1050 struct ieee80211_key_conf *key = info->control.hw_key; 1051 struct ieee80211_vif *vif = info->control.vif; 1052 struct mt7996_vif *mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL; 1053 struct mt7996_sta *msta = sta ? (struct mt7996_sta *)sta->drv_priv : NULL; 1054 struct mt76_vif_link *mlink = NULL; 1055 struct mt76_txwi_cache *t; 1056 int id, i, pid, nbuf = tx_info->nbuf - 1; 1057 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1058 __le32 *ptr = (__le32 *)txwi_ptr; 1059 u8 *txwi = (u8 *)txwi_ptr; 1060 u8 link_id; 1061 1062 if (unlikely(tx_info->skb->len <= ETH_HLEN)) 1063 return -EINVAL; 1064 1065 if (!wcid) 1066 wcid = &dev->mt76.global_wcid; 1067 1068 if ((is_8023 || ieee80211_is_data_qos(hdr->frame_control)) && sta->mlo && 1069 likely(tx_info->skb->protocol != cpu_to_be16(ETH_P_PAE))) { 1070 u8 tid = tx_info->skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1071 1072 link_id = (tid % 2) ? msta->seclink_id : msta->deflink_id; 1073 } else { 1074 link_id = u32_get_bits(info->control.flags, 1075 IEEE80211_TX_CTRL_MLO_LINK); 1076 } 1077 1078 if (link_id != wcid->link_id && link_id != IEEE80211_LINK_UNSPECIFIED) { 1079 if (msta) { 1080 struct mt7996_sta_link *msta_link = 1081 rcu_dereference(msta->link[link_id]); 1082 1083 if (msta_link) 1084 wcid = &msta_link->wcid; 1085 } else if (mvif) { 1086 mlink = rcu_dereference(mvif->mt76.link[link_id]); 1087 if (mlink && mlink->wcid) 1088 wcid = mlink->wcid; 1089 } 1090 } 1091 1092 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1093 t->skb = tx_info->skb; 1094 1095 id = mt76_token_consume(mdev, &t); 1096 if (id < 0) 1097 return id; 1098 1099 /* Since the rules of HW MLD address translation are not fully 1100 * compatible with 802.11 EAPOL frame, we do the translation by 1101 * software 1102 */ 1103 if (tx_info->skb->protocol == cpu_to_be16(ETH_P_PAE) && sta->mlo) { 1104 struct ieee80211_hdr *hdr = (void *)tx_info->skb->data; 1105 struct ieee80211_bss_conf *link_conf; 1106 struct ieee80211_link_sta *link_sta; 1107 1108 link_conf = rcu_dereference(vif->link_conf[wcid->link_id]); 1109 if (!link_conf) 1110 return -EINVAL; 1111 1112 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1113 if (!link_sta) 1114 return -EINVAL; 1115 1116 dma_sync_single_for_cpu(mdev->dma_dev, tx_info->buf[1].addr, 1117 tx_info->buf[1].len, DMA_TO_DEVICE); 1118 1119 memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); 1120 memcpy(hdr->addr2, link_conf->addr, ETH_ALEN); 1121 if (ieee80211_has_a4(hdr->frame_control)) { 1122 memcpy(hdr->addr3, sta->addr, ETH_ALEN); 1123 memcpy(hdr->addr4, vif->addr, ETH_ALEN); 1124 } else if (ieee80211_has_tods(hdr->frame_control)) { 1125 memcpy(hdr->addr3, sta->addr, ETH_ALEN); 1126 } else if (ieee80211_has_fromds(hdr->frame_control)) { 1127 memcpy(hdr->addr3, vif->addr, ETH_ALEN); 1128 } 1129 1130 dma_sync_single_for_device(mdev->dma_dev, tx_info->buf[1].addr, 1131 tx_info->buf[1].len, DMA_TO_DEVICE); 1132 } 1133 1134 pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb); 1135 memset(txwi_ptr, 0, MT_TXD_SIZE); 1136 /* Transmit non qos data by 802.11 header and need to fill txd by host*/ 1137 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1138 mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key, 1139 pid, qid, 0); 1140 1141 /* MT7996 and MT7992 require driver to provide the MAC TXP for AddBA 1142 * req 1143 */ 1144 if (le32_to_cpu(ptr[7]) & MT_TXD7_MAC_TXD) { 1145 u32 val; 1146 1147 ptr = (__le32 *)(txwi + MT_TXD_SIZE); 1148 memset((void *)ptr, 0, sizeof(struct mt76_connac_fw_txp)); 1149 1150 val = FIELD_PREP(MT_TXP0_TOKEN_ID0, id) | 1151 MT_TXP0_TOKEN_ID0_VALID_MASK; 1152 ptr[0] = cpu_to_le32(val); 1153 1154 val = FIELD_PREP(MT_TXP1_TID_ADDBA, 1155 tx_info->skb->priority & 1156 IEEE80211_QOS_CTL_TID_MASK); 1157 ptr[1] = cpu_to_le32(val); 1158 ptr[2] = cpu_to_le32(tx_info->buf[1].addr & 0xFFFFFFFF); 1159 1160 val = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[1].len) | 1161 MT_TXP3_ML0_MASK; 1162 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1163 val |= FIELD_PREP(MT_TXP3_DMA_ADDR_H, 1164 tx_info->buf[1].addr >> 32); 1165 #endif 1166 ptr[3] = cpu_to_le32(val); 1167 } else { 1168 struct mt76_connac_txp_common *txp; 1169 1170 txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE); 1171 for (i = 0; i < nbuf; i++) { 1172 u16 len; 1173 1174 len = FIELD_PREP(MT_TXP_BUF_LEN, tx_info->buf[i + 1].len); 1175 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1176 len |= FIELD_PREP(MT_TXP_DMA_ADDR_H, 1177 tx_info->buf[i + 1].addr >> 32); 1178 #endif 1179 1180 txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr); 1181 txp->fw.len[i] = cpu_to_le16(len); 1182 } 1183 txp->fw.nbuf = nbuf; 1184 1185 txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST); 1186 1187 if (!is_8023 || pid >= MT_PACKET_ID_FIRST) 1188 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD); 1189 1190 if (!key) 1191 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME); 1192 1193 if (!is_8023 && mt7996_tx_use_mgmt(dev, tx_info->skb)) 1194 txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME); 1195 1196 if (mvif) { 1197 if (wcid->offchannel) 1198 mlink = rcu_dereference(mvif->mt76.offchannel_link); 1199 if (!mlink) 1200 mlink = rcu_dereference(mvif->mt76.link[wcid->link_id]); 1201 1202 txp->fw.bss_idx = mlink ? mlink->idx : mvif->deflink.mt76.idx; 1203 } 1204 1205 txp->fw.token = cpu_to_le16(id); 1206 txp->fw.rept_wds_wcid = cpu_to_le16(sta ? wcid->idx : 0xfff); 1207 } 1208 1209 tx_info->skb = NULL; 1210 1211 /* pass partial skb header to fw */ 1212 tx_info->buf[1].len = MT_CT_PARSE_LEN; 1213 tx_info->buf[1].skip_unmap = true; 1214 tx_info->nbuf = MT_CT_DMA_BUF_NUM; 1215 1216 return 0; 1217 } 1218 1219 u32 mt7996_wed_init_buf(void *ptr, dma_addr_t phys, int token_id) 1220 { 1221 #if defined(__linux__) 1222 struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE; 1223 #elif defined(__FreeBSD__) 1224 struct mt76_connac_fw_txp *txp = (void *)((u8 *)ptr + MT_TXD_SIZE); 1225 #endif 1226 __le32 *txwi = ptr; 1227 u32 val; 1228 1229 memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp)); 1230 1231 val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) | 1232 FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT); 1233 txwi[0] = cpu_to_le32(val); 1234 1235 val = BIT(31) | 1236 FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3); 1237 txwi[1] = cpu_to_le32(val); 1238 1239 txp->token = cpu_to_le16(token_id); 1240 txp->nbuf = 1; 1241 txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp)); 1242 1243 return MT_TXD_SIZE + sizeof(*txp); 1244 } 1245 1246 static void 1247 mt7996_tx_check_aggr(struct ieee80211_link_sta *link_sta, 1248 struct mt76_wcid *wcid, struct sk_buff *skb) 1249 { 1250 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1251 bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; 1252 u16 fc, tid; 1253 1254 if (!(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) 1255 return; 1256 1257 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1258 if (tid >= 6) /* skip VO queue */ 1259 return; 1260 1261 if (is_8023) { 1262 fc = IEEE80211_FTYPE_DATA | 1263 (link_sta->sta->wme ? IEEE80211_STYPE_QOS_DATA 1264 : IEEE80211_STYPE_DATA); 1265 } else { 1266 /* No need to get precise TID for Action/Management Frame, 1267 * since it will not meet the following Frame Control 1268 * condition anyway. 1269 */ 1270 1271 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1272 1273 fc = le16_to_cpu(hdr->frame_control) & 1274 (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); 1275 } 1276 1277 if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) 1278 return; 1279 1280 if (!test_and_set_bit(tid, &wcid->ampdu_state)) 1281 ieee80211_start_tx_ba_session(link_sta->sta, tid, 0); 1282 } 1283 1284 static void 1285 mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t, 1286 struct ieee80211_link_sta *link_sta, 1287 struct mt76_wcid *wcid, struct list_head *free_list) 1288 { 1289 struct mt76_dev *mdev = &dev->mt76; 1290 __le32 *txwi; 1291 u16 wcid_idx; 1292 1293 mt76_connac_txp_skb_unmap(mdev, t); 1294 if (!t->skb) 1295 goto out; 1296 1297 txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); 1298 if (link_sta) { 1299 wcid_idx = wcid->idx; 1300 if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) { 1301 struct mt7996_sta *msta; 1302 1303 /* AMPDU state is stored in the primary link */ 1304 msta = (void *)link_sta->sta->drv_priv; 1305 mt7996_tx_check_aggr(link_sta, &msta->deflink.wcid, 1306 t->skb); 1307 } 1308 } else { 1309 wcid_idx = le32_get_bits(txwi[9], MT_TXD9_WLAN_IDX); 1310 } 1311 1312 __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list); 1313 1314 out: 1315 t->skb = NULL; 1316 mt76_put_txwi(mdev, t); 1317 } 1318 1319 static void 1320 mt7996_mac_tx_free(struct mt7996_dev *dev, void *data, int len) 1321 { 1322 __le32 *tx_free = (__le32 *)data, *cur_info; 1323 struct mt76_dev *mdev = &dev->mt76; 1324 struct mt76_phy *phy2 = mdev->phys[MT_BAND1]; 1325 struct mt76_phy *phy3 = mdev->phys[MT_BAND2]; 1326 struct ieee80211_link_sta *link_sta = NULL; 1327 struct mt76_txwi_cache *txwi; 1328 struct mt76_wcid *wcid = NULL; 1329 LIST_HEAD(free_list); 1330 struct sk_buff *skb, *tmp; 1331 #if defined(__linux__) 1332 void *end = data + len; 1333 #elif defined(__FreeBSD__) 1334 void *end = (u8 *)data + len; 1335 #endif 1336 bool wake = false; 1337 u16 total, count = 0; 1338 u8 ver; 1339 1340 /* clean DMA queues and unmap buffers first */ 1341 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false); 1342 mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false); 1343 if (phy2) { 1344 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_PSD], false); 1345 mt76_queue_tx_cleanup(dev, phy2->q_tx[MT_TXQ_BE], false); 1346 } 1347 if (phy3) { 1348 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_PSD], false); 1349 mt76_queue_tx_cleanup(dev, phy3->q_tx[MT_TXQ_BE], false); 1350 } 1351 1352 ver = le32_get_bits(tx_free[1], MT_TXFREE1_VER); 1353 if (WARN_ON_ONCE(ver < 5)) 1354 return; 1355 1356 total = le32_get_bits(tx_free[0], MT_TXFREE0_MSDU_CNT); 1357 for (cur_info = &tx_free[2]; count < total; cur_info++) { 1358 u32 msdu, info; 1359 u8 i; 1360 1361 if (WARN_ON_ONCE((void *)cur_info >= end)) 1362 return; 1363 /* 1'b1: new wcid pair. 1364 * 1'b0: msdu_id with the same 'wcid pair' as above. 1365 */ 1366 info = le32_to_cpu(*cur_info); 1367 if (info & MT_TXFREE_INFO_PAIR) { 1368 struct ieee80211_sta *sta; 1369 unsigned long valid_links; 1370 struct mt7996_sta *msta; 1371 unsigned int id; 1372 u16 idx; 1373 1374 idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); 1375 wcid = mt76_wcid_ptr(dev, idx); 1376 sta = wcid_to_sta(wcid); 1377 if (!sta) { 1378 link_sta = NULL; 1379 goto next; 1380 } 1381 1382 link_sta = rcu_dereference(sta->link[wcid->link_id]); 1383 if (!link_sta) 1384 goto next; 1385 1386 msta = (struct mt7996_sta *)sta->drv_priv; 1387 valid_links = sta->valid_links ?: BIT(0); 1388 1389 /* For MLD STA, add all link's wcid to sta_poll_list */ 1390 for_each_set_bit(id, &valid_links, 1391 IEEE80211_MLD_MAX_NUM_LINKS) { 1392 struct mt7996_sta_link *msta_link; 1393 1394 msta_link = rcu_dereference(msta->link[id]); 1395 if (!msta_link) 1396 continue; 1397 1398 mt76_wcid_add_poll(&dev->mt76, 1399 &msta_link->wcid); 1400 } 1401 next: 1402 /* ver 7 has a new DW with pair = 1, skip it */ 1403 if (ver == 7 && ((void *)(cur_info + 1) < end) && 1404 (le32_to_cpu(*(cur_info + 1)) & MT_TXFREE_INFO_PAIR)) 1405 cur_info++; 1406 continue; 1407 } else if (info & MT_TXFREE_INFO_HEADER) { 1408 u32 tx_retries = 0, tx_failed = 0; 1409 1410 if (!wcid) 1411 continue; 1412 1413 tx_retries = 1414 FIELD_GET(MT_TXFREE_INFO_COUNT, info) - 1; 1415 tx_failed = tx_retries + 1416 !!FIELD_GET(MT_TXFREE_INFO_STAT, info); 1417 1418 wcid->stats.tx_retries += tx_retries; 1419 wcid->stats.tx_failed += tx_failed; 1420 continue; 1421 } 1422 1423 for (i = 0; i < 2; i++) { 1424 msdu = (info >> (15 * i)) & MT_TXFREE_INFO_MSDU_ID; 1425 if (msdu == MT_TXFREE_INFO_MSDU_ID) 1426 continue; 1427 1428 count++; 1429 txwi = mt76_token_release(mdev, msdu, &wake); 1430 if (!txwi) 1431 continue; 1432 1433 mt7996_txwi_free(dev, txwi, link_sta, wcid, 1434 &free_list); 1435 } 1436 } 1437 1438 mt7996_mac_sta_poll(dev); 1439 1440 if (wake) 1441 mt76_set_tx_blocked(&dev->mt76, false); 1442 1443 mt76_worker_schedule(&dev->mt76.tx_worker); 1444 1445 list_for_each_entry_safe(skb, tmp, &free_list, list) { 1446 skb_list_del_init(skb); 1447 napi_consume_skb(skb, 1); 1448 } 1449 } 1450 1451 static bool 1452 mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, 1453 int pid, __le32 *txs_data) 1454 { 1455 struct mt76_sta_stats *stats = &wcid->stats; 1456 struct ieee80211_supported_band *sband; 1457 struct mt76_dev *mdev = &dev->mt76; 1458 struct mt76_phy *mphy; 1459 struct ieee80211_tx_info *info; 1460 struct sk_buff_head list; 1461 struct rate_info rate = {}; 1462 struct sk_buff *skb = NULL; 1463 bool cck = false; 1464 u32 txrate, txs, mode, stbc; 1465 1466 txs = le32_to_cpu(txs_data[0]); 1467 1468 mt76_tx_status_lock(mdev, &list); 1469 1470 /* only report MPDU TXS */ 1471 if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) == 0) { 1472 skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list); 1473 if (skb) { 1474 info = IEEE80211_SKB_CB(skb); 1475 if (!(txs & MT_TXS0_ACK_ERROR_MASK)) 1476 info->flags |= IEEE80211_TX_STAT_ACK; 1477 1478 info->status.ampdu_len = 1; 1479 info->status.ampdu_ack_len = 1480 !!(info->flags & IEEE80211_TX_STAT_ACK); 1481 1482 info->status.rates[0].idx = -1; 1483 } 1484 } 1485 1486 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wcid->sta) { 1487 struct ieee80211_sta *sta; 1488 u8 tid; 1489 1490 sta = wcid_to_sta(wcid); 1491 tid = FIELD_GET(MT_TXS0_TID, txs); 1492 ieee80211_refresh_tx_agg_session_timer(sta, tid); 1493 } 1494 1495 txrate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1496 1497 rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate); 1498 rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1; 1499 stbc = le32_get_bits(txs_data[3], MT_TXS3_RATE_STBC); 1500 1501 if (stbc && rate.nss > 1) 1502 rate.nss >>= 1; 1503 1504 if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss)) 1505 stats->tx_nss[rate.nss - 1]++; 1506 if (rate.mcs < ARRAY_SIZE(stats->tx_mcs)) 1507 stats->tx_mcs[rate.mcs]++; 1508 1509 mode = FIELD_GET(MT_TX_RATE_MODE, txrate); 1510 switch (mode) { 1511 case MT_PHY_TYPE_CCK: 1512 cck = true; 1513 fallthrough; 1514 case MT_PHY_TYPE_OFDM: 1515 mphy = mt76_dev_phy(mdev, wcid->phy_idx); 1516 1517 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 1518 sband = &mphy->sband_5g.sband; 1519 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 1520 sband = &mphy->sband_6g.sband; 1521 else 1522 sband = &mphy->sband_2g.sband; 1523 1524 rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck); 1525 rate.legacy = sband->bitrates[rate.mcs].bitrate; 1526 break; 1527 case MT_PHY_TYPE_HT: 1528 case MT_PHY_TYPE_HT_GF: 1529 if (rate.mcs > 31) 1530 goto out; 1531 1532 rate.flags = RATE_INFO_FLAGS_MCS; 1533 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1534 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1535 break; 1536 case MT_PHY_TYPE_VHT: 1537 if (rate.mcs > 9) 1538 goto out; 1539 1540 rate.flags = RATE_INFO_FLAGS_VHT_MCS; 1541 if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI) 1542 rate.flags |= RATE_INFO_FLAGS_SHORT_GI; 1543 break; 1544 case MT_PHY_TYPE_HE_SU: 1545 case MT_PHY_TYPE_HE_EXT_SU: 1546 case MT_PHY_TYPE_HE_TB: 1547 case MT_PHY_TYPE_HE_MU: 1548 if (rate.mcs > 11) 1549 goto out; 1550 1551 rate.he_gi = wcid->rate.he_gi; 1552 rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate); 1553 rate.flags = RATE_INFO_FLAGS_HE_MCS; 1554 break; 1555 case MT_PHY_TYPE_EHT_SU: 1556 case MT_PHY_TYPE_EHT_TRIG: 1557 case MT_PHY_TYPE_EHT_MU: 1558 if (rate.mcs > 13) 1559 goto out; 1560 1561 rate.eht_gi = wcid->rate.eht_gi; 1562 rate.flags = RATE_INFO_FLAGS_EHT_MCS; 1563 break; 1564 default: 1565 goto out; 1566 } 1567 1568 stats->tx_mode[mode]++; 1569 1570 switch (FIELD_GET(MT_TXS0_BW, txs)) { 1571 case IEEE80211_STA_RX_BW_320: 1572 rate.bw = RATE_INFO_BW_320; 1573 stats->tx_bw[4]++; 1574 break; 1575 case IEEE80211_STA_RX_BW_160: 1576 rate.bw = RATE_INFO_BW_160; 1577 stats->tx_bw[3]++; 1578 break; 1579 case IEEE80211_STA_RX_BW_80: 1580 rate.bw = RATE_INFO_BW_80; 1581 stats->tx_bw[2]++; 1582 break; 1583 case IEEE80211_STA_RX_BW_40: 1584 rate.bw = RATE_INFO_BW_40; 1585 stats->tx_bw[1]++; 1586 break; 1587 default: 1588 rate.bw = RATE_INFO_BW_20; 1589 stats->tx_bw[0]++; 1590 break; 1591 } 1592 wcid->rate = rate; 1593 1594 out: 1595 if (skb) 1596 mt76_tx_status_skb_done(mdev, skb, &list); 1597 mt76_tx_status_unlock(mdev, &list); 1598 1599 return !!skb; 1600 } 1601 1602 static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data) 1603 { 1604 struct mt7996_sta_link *msta_link; 1605 struct mt76_wcid *wcid; 1606 __le32 *txs_data = data; 1607 u16 wcidx; 1608 u8 pid; 1609 1610 wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID); 1611 pid = le32_get_bits(txs_data[3], MT_TXS3_PID); 1612 1613 if (pid < MT_PACKET_ID_NO_SKB) 1614 return; 1615 1616 rcu_read_lock(); 1617 1618 wcid = mt76_wcid_ptr(dev, wcidx); 1619 if (!wcid) 1620 goto out; 1621 1622 mt7996_mac_add_txs_skb(dev, wcid, pid, txs_data); 1623 1624 if (!wcid->sta) 1625 goto out; 1626 1627 msta_link = container_of(wcid, struct mt7996_sta_link, wcid); 1628 mt76_wcid_add_poll(&dev->mt76, &msta_link->wcid); 1629 1630 out: 1631 rcu_read_unlock(); 1632 } 1633 1634 bool mt7996_rx_check(struct mt76_dev *mdev, void *data, int len) 1635 { 1636 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1637 __le32 *rxd = (__le32 *)data; 1638 __le32 *end = (__le32 *)&rxd[len / 4]; 1639 enum rx_pkt_type type; 1640 1641 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1642 if (type != PKT_TYPE_NORMAL) { 1643 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1644 1645 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1646 MT_RXD0_SW_PKT_TYPE_FRAME)) 1647 return true; 1648 } 1649 1650 switch (type) { 1651 case PKT_TYPE_TXRX_NOTIFY: 1652 mt7996_mac_tx_free(dev, data, len); 1653 return false; 1654 case PKT_TYPE_TXS: 1655 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1656 mt7996_mac_add_txs(dev, rxd); 1657 return false; 1658 case PKT_TYPE_RX_FW_MONITOR: 1659 #if defined(CONFIG_MT7996_DEBUGFS) 1660 mt7996_debugfs_rx_fw_monitor(dev, data, len); 1661 #endif 1662 return false; 1663 default: 1664 return true; 1665 } 1666 } 1667 1668 void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 1669 struct sk_buff *skb, u32 *info) 1670 { 1671 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1672 __le32 *rxd = (__le32 *)skb->data; 1673 __le32 *end = (__le32 *)&skb->data[skb->len]; 1674 enum rx_pkt_type type; 1675 1676 type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE); 1677 if (type != PKT_TYPE_NORMAL) { 1678 u32 sw_type = le32_get_bits(rxd[0], MT_RXD0_SW_PKT_TYPE_MASK); 1679 1680 if (unlikely((sw_type & MT_RXD0_SW_PKT_TYPE_MAP) == 1681 MT_RXD0_SW_PKT_TYPE_FRAME)) 1682 type = PKT_TYPE_NORMAL; 1683 } 1684 1685 switch (type) { 1686 case PKT_TYPE_TXRX_NOTIFY: 1687 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2) && 1688 q == MT_RXQ_TXFREE_BAND2) { 1689 dev_kfree_skb(skb); 1690 break; 1691 } 1692 1693 mt7996_mac_tx_free(dev, skb->data, skb->len); 1694 napi_consume_skb(skb, 1); 1695 break; 1696 case PKT_TYPE_RX_EVENT: 1697 mt7996_mcu_rx_event(dev, skb); 1698 break; 1699 case PKT_TYPE_TXS: 1700 for (rxd += MT_TXS_HDR_SIZE; rxd + MT_TXS_SIZE <= end; rxd += MT_TXS_SIZE) 1701 mt7996_mac_add_txs(dev, rxd); 1702 dev_kfree_skb(skb); 1703 break; 1704 case PKT_TYPE_RX_FW_MONITOR: 1705 #if defined(CONFIG_MT7996_DEBUGFS) 1706 mt7996_debugfs_rx_fw_monitor(dev, skb->data, skb->len); 1707 #endif 1708 dev_kfree_skb(skb); 1709 break; 1710 case PKT_TYPE_NORMAL: 1711 if (!mt7996_mac_fill_rx(dev, q, skb, info)) { 1712 mt76_rx(&dev->mt76, q, skb); 1713 return; 1714 } 1715 fallthrough; 1716 default: 1717 dev_kfree_skb(skb); 1718 break; 1719 } 1720 } 1721 1722 static struct mt7996_msdu_page * 1723 mt7996_msdu_page_get_from_cache(struct mt7996_dev *dev) 1724 { 1725 struct mt7996_msdu_page *p = NULL; 1726 1727 spin_lock(&dev->wed_rro.lock); 1728 1729 if (!list_empty(&dev->wed_rro.page_cache)) { 1730 p = list_first_entry(&dev->wed_rro.page_cache, 1731 struct mt7996_msdu_page, list); 1732 list_del(&p->list); 1733 } 1734 1735 spin_unlock(&dev->wed_rro.lock); 1736 1737 return p; 1738 } 1739 1740 static struct mt7996_msdu_page *mt7996_msdu_page_get(struct mt7996_dev *dev) 1741 { 1742 struct mt7996_msdu_page *p; 1743 1744 p = mt7996_msdu_page_get_from_cache(dev); 1745 if (!p) { 1746 p = kzalloc(L1_CACHE_ALIGN(sizeof(*p)), GFP_ATOMIC); 1747 if (p) 1748 INIT_LIST_HEAD(&p->list); 1749 } 1750 1751 return p; 1752 } 1753 1754 static void mt7996_msdu_page_put_to_cache(struct mt7996_dev *dev, 1755 struct mt7996_msdu_page *p) 1756 { 1757 if (p->buf) { 1758 mt76_put_page_pool_buf(p->buf, false); 1759 p->buf = NULL; 1760 } 1761 1762 spin_lock(&dev->wed_rro.lock); 1763 list_add(&p->list, &dev->wed_rro.page_cache); 1764 spin_unlock(&dev->wed_rro.lock); 1765 } 1766 1767 static void mt7996_msdu_page_free_cache(struct mt7996_dev *dev) 1768 { 1769 while (true) { 1770 struct mt7996_msdu_page *p; 1771 1772 p = mt7996_msdu_page_get_from_cache(dev); 1773 if (!p) 1774 break; 1775 1776 if (p->buf) 1777 mt76_put_page_pool_buf(p->buf, false); 1778 1779 kfree(p); 1780 } 1781 } 1782 1783 static u32 mt7996_msdu_page_hash_from_addr(dma_addr_t dma_addr) 1784 { 1785 u32 val = 0; 1786 int i = 0; 1787 1788 while (dma_addr) { 1789 val += (u32)((dma_addr & 0xff) + i) % MT7996_RRO_MSDU_PG_HASH_SIZE; 1790 dma_addr >>= 8; 1791 i += 13; 1792 } 1793 1794 return val % MT7996_RRO_MSDU_PG_HASH_SIZE; 1795 } 1796 1797 static struct mt7996_msdu_page * 1798 mt7996_rro_msdu_page_get(struct mt7996_dev *dev, dma_addr_t dma_addr) 1799 { 1800 u32 hash = mt7996_msdu_page_hash_from_addr(dma_addr); 1801 struct mt7996_msdu_page *p, *tmp, *addr = NULL; 1802 1803 spin_lock(&dev->wed_rro.lock); 1804 1805 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[hash], 1806 list) { 1807 if (p->dma_addr == dma_addr) { 1808 list_del(&p->list); 1809 addr = p; 1810 break; 1811 } 1812 } 1813 1814 spin_unlock(&dev->wed_rro.lock); 1815 1816 return addr; 1817 } 1818 1819 static void mt7996_rx_token_put(struct mt7996_dev *dev) 1820 { 1821 int i; 1822 1823 for (i = 0; i < dev->mt76.rx_token_size; i++) { 1824 struct mt76_txwi_cache *t; 1825 1826 t = mt76_rx_token_release(&dev->mt76, i); 1827 if (!t || !t->ptr) 1828 continue; 1829 1830 mt76_put_page_pool_buf(t->ptr, false); 1831 t->dma_addr = 0; 1832 t->ptr = NULL; 1833 1834 mt76_put_rxwi(&dev->mt76, t); 1835 } 1836 } 1837 1838 void mt7996_rro_msdu_page_map_free(struct mt7996_dev *dev) 1839 { 1840 struct mt7996_msdu_page *p, *tmp; 1841 int i; 1842 1843 local_bh_disable(); 1844 1845 for (i = 0; i < ARRAY_SIZE(dev->wed_rro.page_map); i++) { 1846 list_for_each_entry_safe(p, tmp, &dev->wed_rro.page_map[i], 1847 list) { 1848 list_del_init(&p->list); 1849 if (p->buf) 1850 mt76_put_page_pool_buf(p->buf, false); 1851 kfree(p); 1852 } 1853 } 1854 mt7996_msdu_page_free_cache(dev); 1855 1856 local_bh_enable(); 1857 1858 mt7996_rx_token_put(dev); 1859 } 1860 1861 int mt7996_rro_msdu_page_add(struct mt76_dev *mdev, struct mt76_queue *q, 1862 dma_addr_t dma_addr, void *data) 1863 { 1864 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1865 struct mt7996_msdu_page_info *pinfo = data; 1866 struct mt7996_msdu_page *p; 1867 u32 hash; 1868 1869 pinfo->data |= cpu_to_le32(FIELD_PREP(MSDU_PAGE_INFO_OWNER_MASK, 1)); 1870 p = mt7996_msdu_page_get(dev); 1871 if (!p) 1872 return -ENOMEM; 1873 1874 p->buf = data; 1875 p->dma_addr = dma_addr; 1876 p->q = q; 1877 1878 hash = mt7996_msdu_page_hash_from_addr(dma_addr); 1879 1880 spin_lock(&dev->wed_rro.lock); 1881 list_add_tail(&p->list, &dev->wed_rro.page_map[hash]); 1882 spin_unlock(&dev->wed_rro.lock); 1883 1884 return 0; 1885 } 1886 1887 static struct mt7996_wed_rro_addr * 1888 mt7996_rro_addr_elem_get(struct mt7996_dev *dev, u16 session_id, u16 seq_num) 1889 { 1890 u32 idx = 0; 1891 #if defined(__linux__) 1892 void *addr; 1893 #elif defined(__FreeBSD__) 1894 u8 *addr; 1895 #endif 1896 1897 if (session_id == MT7996_RRO_MAX_SESSION) { 1898 addr = dev->wed_rro.session.ptr; 1899 } else { 1900 idx = session_id / MT7996_RRO_BA_BITMAP_SESSION_SIZE; 1901 addr = dev->wed_rro.addr_elem[idx].ptr; 1902 1903 idx = session_id % MT7996_RRO_BA_BITMAP_SESSION_SIZE; 1904 idx = idx * MT7996_RRO_WINDOW_MAX_LEN; 1905 } 1906 idx += seq_num % MT7996_RRO_WINDOW_MAX_LEN; 1907 1908 return (void *)(addr + idx * sizeof(struct mt7996_wed_rro_addr)); 1909 } 1910 1911 #define MT996_RRO_SN_MASK GENMASK(11, 0) 1912 1913 void mt7996_rro_rx_process(struct mt76_dev *mdev, void *data) 1914 { 1915 struct mt7996_dev *dev = container_of(mdev, struct mt7996_dev, mt76); 1916 struct mt76_wed_rro_ind *cmd = (struct mt76_wed_rro_ind *)data; 1917 u32 cmd_data0 = le32_to_cpu(cmd->data0); 1918 u32 cmd_data1 = le32_to_cpu(cmd->data1); 1919 u8 ind_reason = FIELD_GET(RRO_IND_DATA0_IND_REASON_MASK, cmd_data0); 1920 u16 start_seq = FIELD_GET(RRO_IND_DATA0_START_SEQ_MASK, cmd_data0); 1921 u16 seq_id = FIELD_GET(RRO_IND_DATA0_SEQ_ID_MASK, cmd_data0); 1922 u16 ind_count = FIELD_GET(RRO_IND_DATA1_IND_COUNT_MASK, cmd_data1); 1923 struct mt7996_msdu_page_info *pinfo = NULL; 1924 struct mt7996_msdu_page *p = NULL; 1925 int i, seq_num = 0; 1926 1927 for (i = 0; i < ind_count; i++) { 1928 struct mt7996_wed_rro_addr *e; 1929 struct mt76_rx_status *status; 1930 struct mt7996_rro_hif *rxd; 1931 int j, len, qid, data_len; 1932 struct mt76_txwi_cache *t; 1933 dma_addr_t dma_addr = 0; 1934 u16 rx_token_id, count; 1935 struct mt76_queue *q; 1936 struct sk_buff *skb; 1937 u32 info = 0, data; 1938 u8 signature; 1939 void *buf; 1940 bool ls; 1941 1942 seq_num = FIELD_GET(MT996_RRO_SN_MASK, start_seq + i); 1943 e = mt7996_rro_addr_elem_get(dev, seq_id, seq_num); 1944 data = le32_to_cpu(e->data); 1945 signature = FIELD_GET(WED_RRO_ADDR_SIGNATURE_MASK, data); 1946 if (signature != (seq_num / MT7996_RRO_WINDOW_MAX_LEN)) { 1947 u32 val = FIELD_PREP(WED_RRO_ADDR_SIGNATURE_MASK, 1948 0xff); 1949 1950 e->data |= cpu_to_le32(val); 1951 goto update_ack_seq_num; 1952 } 1953 1954 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1955 dma_addr = FIELD_GET(WED_RRO_ADDR_HEAD_HIGH_MASK, data); 1956 dma_addr <<= 32; 1957 #endif 1958 dma_addr |= le32_to_cpu(e->head_low); 1959 1960 count = FIELD_GET(WED_RRO_ADDR_COUNT_MASK, data); 1961 for (j = 0; j < count; j++) { 1962 if (!p) { 1963 p = mt7996_rro_msdu_page_get(dev, dma_addr); 1964 if (!p) 1965 continue; 1966 1967 dma_sync_single_for_cpu(mdev->dma_dev, p->dma_addr, 1968 SKB_WITH_OVERHEAD(p->q->buf_size), 1969 page_pool_get_dma_dir(p->q->page_pool)); 1970 pinfo = (struct mt7996_msdu_page_info *)p->buf; 1971 } 1972 1973 rxd = &pinfo->rxd[j % MT7996_MAX_HIF_RXD_IN_PG]; 1974 len = FIELD_GET(RRO_HIF_DATA1_SDL_MASK, 1975 le32_to_cpu(rxd->data1)); 1976 1977 rx_token_id = FIELD_GET(RRO_HIF_DATA4_RX_TOKEN_ID_MASK, 1978 le32_to_cpu(rxd->data4)); 1979 t = mt76_rx_token_release(mdev, rx_token_id); 1980 if (!t) 1981 goto next_page; 1982 1983 qid = t->qid; 1984 buf = t->ptr; 1985 q = &mdev->q_rx[qid]; 1986 dma_sync_single_for_cpu(mdev->dma_dev, t->dma_addr, 1987 SKB_WITH_OVERHEAD(q->buf_size), 1988 page_pool_get_dma_dir(q->page_pool)); 1989 1990 t->dma_addr = 0; 1991 t->ptr = NULL; 1992 mt76_put_rxwi(mdev, t); 1993 if (!buf) 1994 goto next_page; 1995 1996 if (q->rx_head) 1997 data_len = q->buf_size; 1998 else 1999 data_len = SKB_WITH_OVERHEAD(q->buf_size); 2000 2001 if (data_len < len + q->buf_offset) { 2002 dev_kfree_skb(q->rx_head); 2003 mt76_put_page_pool_buf(buf, false); 2004 q->rx_head = NULL; 2005 goto next_page; 2006 } 2007 2008 ls = FIELD_GET(RRO_HIF_DATA1_LS_MASK, 2009 le32_to_cpu(rxd->data1)); 2010 if (q->rx_head) { 2011 /* TODO: Take into account non-linear skb. */ 2012 mt76_put_page_pool_buf(buf, false); 2013 if (ls) { 2014 dev_kfree_skb(q->rx_head); 2015 q->rx_head = NULL; 2016 } 2017 goto next_page; 2018 } 2019 2020 if (ls && !mt7996_rx_check(mdev, buf, len)) 2021 goto next_page; 2022 2023 skb = build_skb(buf, q->buf_size); 2024 if (!skb) 2025 goto next_page; 2026 2027 skb_reserve(skb, q->buf_offset); 2028 skb_mark_for_recycle(skb); 2029 __skb_put(skb, len); 2030 2031 if (ind_reason == 1 || ind_reason == 2) { 2032 dev_kfree_skb(skb); 2033 goto next_page; 2034 } 2035 2036 if (!ls) { 2037 q->rx_head = skb; 2038 goto next_page; 2039 } 2040 2041 status = (struct mt76_rx_status *)skb->cb; 2042 if (seq_id != MT7996_RRO_MAX_SESSION) 2043 status->aggr = true; 2044 2045 mt7996_queue_rx_skb(mdev, qid, skb, &info); 2046 next_page: 2047 if ((j + 1) % MT7996_MAX_HIF_RXD_IN_PG == 0) { 2048 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2049 dma_addr = 2050 FIELD_GET(MSDU_PAGE_INFO_PG_HIGH_MASK, 2051 le32_to_cpu(pinfo->data)); 2052 dma_addr <<= 32; 2053 dma_addr |= le32_to_cpu(pinfo->pg_low); 2054 #else 2055 dma_addr = le32_to_cpu(pinfo->pg_low); 2056 #endif 2057 mt7996_msdu_page_put_to_cache(dev, p); 2058 p = NULL; 2059 } 2060 } 2061 2062 update_ack_seq_num: 2063 if ((i + 1) % 4 == 0) 2064 mt76_wr(dev, MT_RRO_ACK_SN_CTRL, 2065 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, 2066 seq_id) | 2067 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, 2068 seq_num)); 2069 if (p) { 2070 mt7996_msdu_page_put_to_cache(dev, p); 2071 p = NULL; 2072 } 2073 } 2074 2075 /* Update ack_seq_num for remaining addr_elem */ 2076 if (i % 4) 2077 mt76_wr(dev, MT_RRO_ACK_SN_CTRL, 2078 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SESSION_MASK, seq_id) | 2079 FIELD_PREP(MT_RRO_ACK_SN_CTRL_SN_MASK, seq_num)); 2080 } 2081 2082 void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy) 2083 { 2084 struct mt7996_dev *dev = phy->dev; 2085 u32 reg = MT_WF_PHYRX_BAND_RX_CTRL1(phy->mt76->band_idx); 2086 2087 mt76_clear(dev, reg, MT_WF_PHYRX_BAND_RX_CTRL1_STSCNT_EN); 2088 mt76_set(dev, reg, BIT(11) | BIT(9)); 2089 } 2090 2091 void mt7996_mac_reset_counters(struct mt7996_phy *phy) 2092 { 2093 struct mt7996_dev *dev = phy->dev; 2094 u8 band_idx = phy->mt76->band_idx; 2095 int i; 2096 2097 for (i = 0; i < 16; i++) 2098 mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2099 2100 phy->mt76->survey_time = ktime_get_boottime(); 2101 2102 memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats)); 2103 2104 /* reset airtime counters */ 2105 mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band_idx), 2106 MT_WF_RMAC_MIB_RXTIME_CLR); 2107 2108 mt7996_mcu_get_chan_mib_info(phy, true); 2109 } 2110 2111 void mt7996_mac_set_coverage_class(struct mt7996_phy *phy) 2112 { 2113 s16 coverage_class = phy->coverage_class; 2114 struct mt7996_dev *dev = phy->dev; 2115 struct mt7996_phy *phy2 = mt7996_phy2(dev); 2116 struct mt7996_phy *phy3 = mt7996_phy3(dev); 2117 u32 reg_offset; 2118 u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 2119 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 2120 u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 2121 FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28); 2122 u8 band_idx = phy->mt76->band_idx; 2123 int offset; 2124 2125 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 2126 return; 2127 2128 if (phy2) 2129 coverage_class = max_t(s16, dev->phy.coverage_class, 2130 phy2->coverage_class); 2131 2132 if (phy3) 2133 coverage_class = max_t(s16, coverage_class, 2134 phy3->coverage_class); 2135 2136 offset = 3 * coverage_class; 2137 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 2138 FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 2139 2140 mt76_wr(dev, MT_TMAC_CDTR(band_idx), cck + reg_offset); 2141 mt76_wr(dev, MT_TMAC_ODTR(band_idx), ofdm + reg_offset); 2142 } 2143 2144 void mt7996_mac_enable_nf(struct mt7996_dev *dev, u8 band) 2145 { 2146 mt76_set(dev, MT_WF_PHYRX_CSD_BAND_RXTD12(band), 2147 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR_ONLY | 2148 MT_WF_PHYRX_CSD_BAND_RXTD12_IRPI_SW_CLR); 2149 2150 mt76_set(dev, MT_WF_PHYRX_BAND_RX_CTRL1(band), 2151 FIELD_PREP(MT_WF_PHYRX_BAND_RX_CTRL1_IPI_EN, 0x5)); 2152 } 2153 2154 static u8 2155 mt7996_phy_get_nf(struct mt7996_phy *phy, u8 band_idx) 2156 { 2157 static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 }; 2158 struct mt7996_dev *dev = phy->dev; 2159 u32 val, sum = 0, n = 0; 2160 int ant, i; 2161 2162 for (ant = 0; ant < hweight8(phy->mt76->antenna_mask); ant++) { 2163 u32 reg = MT_WF_PHYRX_CSD_IRPI(band_idx, ant); 2164 2165 for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) { 2166 val = mt76_rr(dev, reg); 2167 sum += val * nf_power[i]; 2168 n += val; 2169 } 2170 } 2171 2172 return n ? sum / n : 0; 2173 } 2174 2175 void mt7996_update_channel(struct mt76_phy *mphy) 2176 { 2177 struct mt7996_phy *phy = mphy->priv; 2178 struct mt76_channel_state *state = mphy->chan_state; 2179 int nf; 2180 2181 mt7996_mcu_get_chan_mib_info(phy, false); 2182 2183 nf = mt7996_phy_get_nf(phy, mphy->band_idx); 2184 if (!phy->noise) 2185 phy->noise = nf << 4; 2186 else if (nf) 2187 phy->noise += nf - (phy->noise >> 4); 2188 2189 state->noise = -(phy->noise >> 4); 2190 } 2191 2192 static bool 2193 mt7996_wait_reset_state(struct mt7996_dev *dev, u32 state) 2194 { 2195 bool ret; 2196 2197 ret = wait_event_timeout(dev->reset_wait, 2198 (READ_ONCE(dev->recovery.state) & state), 2199 MT7996_RESET_TIMEOUT); 2200 2201 WARN(!ret, "Timeout waiting for MCU reset state %x\n", state); 2202 return ret; 2203 } 2204 2205 static void 2206 mt7996_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif) 2207 { 2208 struct ieee80211_bss_conf *link_conf; 2209 struct mt7996_phy *phy = priv; 2210 struct mt7996_dev *dev = phy->dev; 2211 unsigned int link_id; 2212 2213 2214 switch (vif->type) { 2215 case NL80211_IFTYPE_MESH_POINT: 2216 case NL80211_IFTYPE_ADHOC: 2217 case NL80211_IFTYPE_AP: 2218 break; 2219 default: 2220 return; 2221 } 2222 2223 for_each_vif_active_link(vif, link_conf, link_id) { 2224 struct mt7996_vif_link *link; 2225 2226 link = mt7996_vif_link(dev, vif, link_id); 2227 if (!link || link->phy != phy) 2228 continue; 2229 2230 mt7996_mcu_add_beacon(dev->mt76.hw, vif, link_conf, 2231 link_conf->enable_beacon); 2232 } 2233 } 2234 2235 void mt7996_mac_update_beacons(struct mt7996_phy *phy) 2236 { 2237 ieee80211_iterate_active_interfaces(phy->mt76->hw, 2238 IEEE80211_IFACE_ITER_RESUME_ALL, 2239 mt7996_update_vif_beacon, phy); 2240 } 2241 2242 static void 2243 mt7996_update_beacons(struct mt7996_dev *dev) 2244 { 2245 struct mt76_phy *phy2, *phy3; 2246 2247 mt7996_mac_update_beacons(&dev->phy); 2248 2249 phy2 = dev->mt76.phys[MT_BAND1]; 2250 if (phy2) 2251 mt7996_mac_update_beacons(phy2->priv); 2252 2253 phy3 = dev->mt76.phys[MT_BAND2]; 2254 if (phy3) 2255 mt7996_mac_update_beacons(phy3->priv); 2256 } 2257 2258 void mt7996_tx_token_put(struct mt7996_dev *dev) 2259 { 2260 struct mt76_txwi_cache *txwi; 2261 int id; 2262 2263 spin_lock_bh(&dev->mt76.token_lock); 2264 idr_for_each_entry(&dev->mt76.token, txwi, id) { 2265 mt7996_txwi_free(dev, txwi, NULL, NULL, NULL); 2266 dev->mt76.token_count--; 2267 } 2268 spin_unlock_bh(&dev->mt76.token_lock); 2269 idr_destroy(&dev->mt76.token); 2270 } 2271 2272 static int 2273 mt7996_mac_restart(struct mt7996_dev *dev) 2274 { 2275 struct mt76_dev *mdev = &dev->mt76; 2276 struct mt7996_phy *phy; 2277 int i, ret; 2278 2279 if (dev->hif2) { 2280 mt76_wr(dev, MT_INT1_MASK_CSR, 0x0); 2281 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 2282 } 2283 2284 if (dev_is_pci(mdev->dev)) { 2285 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 2286 if (dev->hif2) 2287 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0); 2288 } 2289 2290 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2291 mt7996_for_each_phy(dev, phy) 2292 set_bit(MT76_RESET, &phy->mt76->state); 2293 wake_up(&dev->mt76.mcu.wait); 2294 2295 /* lock/unlock all queues to ensure that no tx is pending */ 2296 mt7996_for_each_phy(dev, phy) 2297 mt76_txq_schedule_all(phy->mt76); 2298 2299 /* disable all tx/rx napi */ 2300 mt76_worker_disable(&dev->mt76.tx_worker); 2301 mt76_for_each_q_rx(mdev, i) { 2302 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2303 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 2304 continue; 2305 2306 if (mdev->q_rx[i].ndesc) 2307 napi_disable(&dev->mt76.napi[i]); 2308 } 2309 napi_disable(&dev->mt76.tx_napi); 2310 2311 /* token reinit */ 2312 mt7996_tx_token_put(dev); 2313 idr_init(&dev->mt76.token); 2314 2315 mt7996_dma_reset(dev, true); 2316 2317 mt76_for_each_q_rx(mdev, i) { 2318 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2319 mt76_queue_is_wed_rro(&mdev->q_rx[i])) 2320 continue; 2321 2322 if (mdev->q_rx[i].ndesc) { 2323 napi_enable(&dev->mt76.napi[i]); 2324 local_bh_disable(); 2325 napi_schedule(&dev->mt76.napi[i]); 2326 local_bh_enable(); 2327 } 2328 } 2329 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2330 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 2331 2332 mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 2333 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 2334 if (dev->hif2) { 2335 mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask); 2336 mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0); 2337 } 2338 if (dev_is_pci(mdev->dev)) { 2339 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 2340 if (dev->hif2) 2341 mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff); 2342 } 2343 2344 /* load firmware */ 2345 ret = mt7996_mcu_init_firmware(dev); 2346 if (ret) 2347 goto out; 2348 2349 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2350 mt7996_has_hwrro(dev)) { 2351 u32 wed_irq_mask = dev->mt76.mmio.irqmask | 2352 MT_INT_TX_DONE_BAND2; 2353 2354 mt7996_rro_hw_init(dev); 2355 mt76_for_each_q_rx(&dev->mt76, i) { 2356 if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) || 2357 mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])) 2358 mt76_queue_rx_reset(dev, i); 2359 } 2360 2361 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2362 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2363 false); 2364 mt7996_irq_enable(dev, wed_irq_mask); 2365 mt7996_irq_disable(dev, 0); 2366 } 2367 2368 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2369 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, 2370 MT_INT_TX_RX_DONE_EXT); 2371 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2372 MT_INT_TX_RX_DONE_EXT); 2373 } 2374 2375 /* set the necessary init items */ 2376 ret = mt7996_mcu_set_eeprom(dev); 2377 if (ret) 2378 goto out; 2379 2380 mt7996_mac_init(dev); 2381 mt7996_for_each_phy(dev, phy) 2382 mt7996_init_txpower(phy); 2383 ret = mt7996_txbf_init(dev); 2384 if (ret) 2385 goto out; 2386 2387 mt7996_for_each_phy(dev, phy) { 2388 if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 2389 continue; 2390 2391 ret = mt7996_run(phy); 2392 if (ret) 2393 goto out; 2394 } 2395 2396 out: 2397 /* reset done */ 2398 mt7996_for_each_phy(dev, phy) 2399 clear_bit(MT76_RESET, &phy->mt76->state); 2400 2401 napi_enable(&dev->mt76.tx_napi); 2402 local_bh_disable(); 2403 napi_schedule(&dev->mt76.tx_napi); 2404 local_bh_enable(); 2405 2406 mt76_worker_enable(&dev->mt76.tx_worker); 2407 return ret; 2408 } 2409 2410 static void 2411 mt7996_mac_reset_sta_iter(void *data, struct ieee80211_sta *sta) 2412 { 2413 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 2414 struct mt7996_dev *dev = data; 2415 int i; 2416 2417 for (i = 0; i < ARRAY_SIZE(msta->link); i++) { 2418 struct mt7996_sta_link *msta_link = NULL; 2419 2420 msta_link = rcu_replace_pointer(msta->link[i], msta_link, 2421 lockdep_is_held(&dev->mt76.mutex)); 2422 if (!msta_link) 2423 continue; 2424 2425 mt7996_mac_sta_deinit_link(dev, msta_link); 2426 2427 if (msta->deflink_id == i) { 2428 msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; 2429 continue; 2430 } 2431 2432 kfree_rcu(msta_link, rcu_head); 2433 } 2434 } 2435 2436 static void 2437 mt7996_mac_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 2438 { 2439 struct mt76_vif_link *mlink = (struct mt76_vif_link *)vif->drv_priv; 2440 struct mt76_vif_data *mvif = mlink->mvif; 2441 struct mt7996_dev *dev = data; 2442 int i; 2443 2444 rcu_read_lock(); 2445 for (i = 0; i < ARRAY_SIZE(mvif->link); i++) { 2446 2447 mlink = mt76_dereference(mvif->link[i], &dev->mt76); 2448 if (!mlink || mlink == (struct mt76_vif_link *)vif->drv_priv) 2449 continue; 2450 2451 rcu_assign_pointer(mvif->link[i], NULL); 2452 kfree_rcu(mlink, rcu_head); 2453 } 2454 rcu_read_unlock(); 2455 } 2456 2457 static void 2458 mt7996_mac_full_reset(struct mt7996_dev *dev) 2459 { 2460 struct ieee80211_hw *hw = mt76_hw(dev); 2461 struct mt7996_phy *phy; 2462 LIST_HEAD(list); 2463 int i; 2464 2465 dev->recovery.hw_full_reset = true; 2466 2467 wake_up(&dev->mt76.mcu.wait); 2468 ieee80211_stop_queues(hw); 2469 2470 cancel_work_sync(&dev->wed_rro.work); 2471 mt7996_for_each_phy(dev, phy) 2472 cancel_delayed_work_sync(&phy->mt76->mac_work); 2473 2474 mt76_abort_scan(&dev->mt76); 2475 2476 mutex_lock(&dev->mt76.mutex); 2477 for (i = 0; i < 10; i++) { 2478 if (!mt7996_mac_restart(dev)) 2479 break; 2480 } 2481 2482 if (i == 10) 2483 dev_err(dev->mt76.dev, "chip full reset failed\n"); 2484 2485 mt7996_for_each_phy(dev, phy) 2486 phy->omac_mask = 0; 2487 2488 ieee80211_iterate_stations_atomic(hw, mt7996_mac_reset_sta_iter, dev); 2489 ieee80211_iterate_active_interfaces_atomic(hw, 2490 IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER, 2491 mt7996_mac_reset_vif_iter, dev); 2492 mt76_reset_device(&dev->mt76); 2493 2494 INIT_LIST_HEAD(&dev->sta_rc_list); 2495 INIT_LIST_HEAD(&dev->twt_list); 2496 2497 spin_lock_bh(&dev->wed_rro.lock); 2498 list_splice_init(&dev->wed_rro.poll_list, &list); 2499 spin_unlock_bh(&dev->wed_rro.lock); 2500 2501 while (!list_empty(&list)) { 2502 struct mt7996_wed_rro_session_id *e; 2503 2504 e = list_first_entry(&list, struct mt7996_wed_rro_session_id, 2505 list); 2506 list_del_init(&e->list); 2507 kfree(e); 2508 } 2509 2510 i = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7996_WTBL_STA); 2511 dev->mt76.global_wcid.idx = i; 2512 dev->recovery.hw_full_reset = false; 2513 2514 mutex_unlock(&dev->mt76.mutex); 2515 2516 ieee80211_restart_hw(mt76_hw(dev)); 2517 } 2518 2519 void mt7996_mac_reset_work(struct work_struct *work) 2520 { 2521 struct ieee80211_hw *hw; 2522 struct mt7996_dev *dev; 2523 struct mt7996_phy *phy; 2524 int i; 2525 2526 dev = container_of(work, struct mt7996_dev, reset_work); 2527 hw = mt76_hw(dev); 2528 2529 /* chip full reset */ 2530 if (dev->recovery.restart) { 2531 /* disable WA/WM WDT */ 2532 mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA, 2533 MT_MCU_CMD_WDT_MASK); 2534 2535 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT) 2536 dev->recovery.wa_reset_count++; 2537 else 2538 dev->recovery.wm_reset_count++; 2539 2540 mt7996_mac_full_reset(dev); 2541 2542 /* enable mcu irq */ 2543 mt7996_irq_enable(dev, MT_INT_MCU_CMD); 2544 mt7996_irq_disable(dev, 0); 2545 2546 /* enable WA/WM WDT */ 2547 mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK); 2548 2549 dev->recovery.state = MT_MCU_CMD_NORMAL_STATE; 2550 dev->recovery.restart = false; 2551 return; 2552 } 2553 2554 if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA)) 2555 return; 2556 2557 dev_info(dev->mt76.dev,"\n%s L1 SER recovery start.", 2558 wiphy_name(hw->wiphy)); 2559 2560 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) 2561 mtk_wed_device_stop(&dev->mt76.mmio.wed_hif2); 2562 2563 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) 2564 mtk_wed_device_stop(&dev->mt76.mmio.wed); 2565 2566 ieee80211_stop_queues(mt76_hw(dev)); 2567 2568 set_bit(MT76_RESET, &dev->mphy.state); 2569 set_bit(MT76_MCU_RESET, &dev->mphy.state); 2570 mt76_abort_scan(&dev->mt76); 2571 wake_up(&dev->mt76.mcu.wait); 2572 2573 cancel_work_sync(&dev->wed_rro.work); 2574 mt7996_for_each_phy(dev, phy) { 2575 mt76_abort_roc(phy->mt76); 2576 set_bit(MT76_RESET, &phy->mt76->state); 2577 cancel_delayed_work_sync(&phy->mt76->mac_work); 2578 } 2579 2580 mt76_worker_disable(&dev->mt76.tx_worker); 2581 mt76_for_each_q_rx(&dev->mt76, i) { 2582 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2583 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2584 continue; 2585 2586 napi_disable(&dev->mt76.napi[i]); 2587 } 2588 napi_disable(&dev->mt76.tx_napi); 2589 2590 mutex_lock(&dev->mt76.mutex); 2591 2592 mt7996_npu_hw_stop(dev); 2593 2594 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 2595 2596 if (mt7996_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 2597 mt7996_dma_reset(dev, false); 2598 2599 mt7996_tx_token_put(dev); 2600 idr_init(&dev->mt76.token); 2601 2602 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 2603 mt7996_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE); 2604 } 2605 2606 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2607 mt7996_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2608 2609 /* enable DMA Rx/Tx and interrupt */ 2610 mt7996_dma_start(dev, false, false); 2611 2612 if (!is_mt7996(&dev->mt76) && dev->mt76.hwrro_mode == MT76_HWRRO_V3) 2613 mt76_wr(dev, MT_RRO_3_0_EMU_CONF, MT_RRO_3_0_EMU_CONF_EN_MASK); 2614 2615 if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { 2616 u32 wed_irq_mask = MT_INT_TX_DONE_BAND2 | 2617 dev->mt76.mmio.irqmask; 2618 2619 mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); 2620 mtk_wed_device_start_hw_rro(&dev->mt76.mmio.wed, wed_irq_mask, 2621 true); 2622 mt7996_irq_enable(dev, wed_irq_mask); 2623 mt7996_irq_disable(dev, 0); 2624 } 2625 2626 if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { 2627 mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, MT_INT_TX_RX_DONE_EXT); 2628 mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, 2629 MT_INT_TX_RX_DONE_EXT); 2630 } 2631 2632 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 2633 mt7996_for_each_phy(dev, phy) 2634 clear_bit(MT76_RESET, &phy->mt76->state); 2635 2636 mt76_for_each_q_rx(&dev->mt76, i) { 2637 if (mtk_wed_device_active(&dev->mt76.mmio.wed) && 2638 mt76_queue_is_wed_rro(&dev->mt76.q_rx[i])) 2639 continue; 2640 2641 napi_enable(&dev->mt76.napi[i]); 2642 local_bh_disable(); 2643 napi_schedule(&dev->mt76.napi[i]); 2644 local_bh_enable(); 2645 } 2646 2647 tasklet_schedule(&dev->mt76.irq_tasklet); 2648 2649 mt76_worker_enable(&dev->mt76.tx_worker); 2650 2651 napi_enable(&dev->mt76.tx_napi); 2652 local_bh_disable(); 2653 napi_schedule(&dev->mt76.tx_napi); 2654 local_bh_enable(); 2655 2656 ieee80211_wake_queues(hw); 2657 mt7996_update_beacons(dev); 2658 2659 mutex_unlock(&dev->mt76.mutex); 2660 2661 mt7996_npu_hw_init(dev); 2662 2663 mt7996_for_each_phy(dev, phy) 2664 ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work, 2665 MT7996_WATCHDOG_TIME); 2666 dev_info(dev->mt76.dev,"\n%s L1 SER recovery completed.", 2667 wiphy_name(dev->mt76.hw->wiphy)); 2668 } 2669 2670 /* firmware coredump */ 2671 void mt7996_mac_dump_work(struct work_struct *work) 2672 { 2673 const struct mt7996_mem_region *mem_region; 2674 struct mt7996_crash_data *crash_data; 2675 struct mt7996_dev *dev; 2676 struct mt7996_mem_hdr *hdr; 2677 size_t buf_len; 2678 int i; 2679 u32 num; 2680 u8 *buf; 2681 2682 dev = container_of(work, struct mt7996_dev, dump_work); 2683 2684 mutex_lock(&dev->dump_mutex); 2685 2686 crash_data = mt7996_coredump_new(dev); 2687 if (!crash_data) { 2688 mutex_unlock(&dev->dump_mutex); 2689 goto skip_coredump; 2690 } 2691 2692 mem_region = mt7996_coredump_get_mem_layout(dev, &num); 2693 if (!mem_region || !crash_data->memdump_buf_len) { 2694 mutex_unlock(&dev->dump_mutex); 2695 goto skip_memdump; 2696 } 2697 2698 buf = crash_data->memdump_buf; 2699 buf_len = crash_data->memdump_buf_len; 2700 2701 /* dumping memory content... */ 2702 memset(buf, 0, buf_len); 2703 for (i = 0; i < num; i++) { 2704 if (mem_region->len > buf_len) { 2705 dev_warn(dev->mt76.dev, "%s len %zu is too large\n", 2706 mem_region->name, mem_region->len); 2707 break; 2708 } 2709 2710 /* reserve space for the header */ 2711 hdr = (void *)buf; 2712 buf += sizeof(*hdr); 2713 buf_len -= sizeof(*hdr); 2714 2715 mt7996_memcpy_fromio(dev, buf, mem_region->start, 2716 mem_region->len); 2717 2718 hdr->start = mem_region->start; 2719 hdr->len = mem_region->len; 2720 2721 if (!mem_region->len) 2722 /* note: the header remains, just with zero length */ 2723 break; 2724 2725 buf += mem_region->len; 2726 buf_len -= mem_region->len; 2727 2728 mem_region++; 2729 } 2730 2731 mutex_unlock(&dev->dump_mutex); 2732 2733 skip_memdump: 2734 mt7996_coredump_submit(dev); 2735 skip_coredump: 2736 queue_work(dev->mt76.wq, &dev->reset_work); 2737 } 2738 2739 void mt7996_reset(struct mt7996_dev *dev) 2740 { 2741 if (!dev->recovery.hw_init_done) 2742 return; 2743 2744 if (dev->recovery.hw_full_reset) 2745 return; 2746 2747 /* wm/wa exception: do full recovery */ 2748 if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) { 2749 dev->recovery.restart = true; 2750 dev_info(dev->mt76.dev, 2751 "%s indicated firmware crash, attempting recovery\n", 2752 wiphy_name(dev->mt76.hw->wiphy)); 2753 2754 mt7996_irq_disable(dev, MT_INT_MCU_CMD); 2755 queue_work(dev->mt76.wq, &dev->dump_work); 2756 return; 2757 } 2758 2759 queue_work(dev->mt76.wq, &dev->reset_work); 2760 wake_up(&dev->reset_wait); 2761 } 2762 2763 void mt7996_mac_update_stats(struct mt7996_phy *phy) 2764 { 2765 struct mt76_mib_stats *mib = &phy->mib; 2766 struct mt7996_dev *dev = phy->dev; 2767 u8 band_idx = phy->mt76->band_idx; 2768 u32 cnt; 2769 int i; 2770 2771 cnt = mt76_rr(dev, MT_MIB_RSCR1(band_idx)); 2772 mib->fcs_err_cnt += cnt; 2773 2774 cnt = mt76_rr(dev, MT_MIB_RSCR33(band_idx)); 2775 mib->rx_fifo_full_cnt += cnt; 2776 2777 cnt = mt76_rr(dev, MT_MIB_RSCR31(band_idx)); 2778 mib->rx_mpdu_cnt += cnt; 2779 2780 cnt = mt76_rr(dev, MT_MIB_SDR6(band_idx)); 2781 mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt); 2782 2783 cnt = mt76_rr(dev, MT_MIB_RVSR0(band_idx)); 2784 mib->rx_vector_mismatch_cnt += cnt; 2785 2786 cnt = mt76_rr(dev, MT_MIB_RSCR35(band_idx)); 2787 mib->rx_delimiter_fail_cnt += cnt; 2788 2789 cnt = mt76_rr(dev, MT_MIB_RSCR36(band_idx)); 2790 mib->rx_len_mismatch_cnt += cnt; 2791 2792 cnt = mt76_rr(dev, MT_MIB_TSCR0(band_idx)); 2793 mib->tx_ampdu_cnt += cnt; 2794 2795 cnt = mt76_rr(dev, MT_MIB_TSCR2(band_idx)); 2796 mib->tx_stop_q_empty_cnt += cnt; 2797 2798 cnt = mt76_rr(dev, MT_MIB_TSCR3(band_idx)); 2799 mib->tx_mpdu_attempts_cnt += cnt; 2800 2801 cnt = mt76_rr(dev, MT_MIB_TSCR4(band_idx)); 2802 mib->tx_mpdu_success_cnt += cnt; 2803 2804 cnt = mt76_rr(dev, MT_MIB_RSCR27(band_idx)); 2805 mib->rx_ampdu_cnt += cnt; 2806 2807 cnt = mt76_rr(dev, MT_MIB_RSCR28(band_idx)); 2808 mib->rx_ampdu_bytes_cnt += cnt; 2809 2810 cnt = mt76_rr(dev, MT_MIB_RSCR29(band_idx)); 2811 mib->rx_ampdu_valid_subframe_cnt += cnt; 2812 2813 cnt = mt76_rr(dev, MT_MIB_RSCR30(band_idx)); 2814 mib->rx_ampdu_valid_subframe_bytes_cnt += cnt; 2815 2816 cnt = mt76_rr(dev, MT_MIB_SDR27(band_idx)); 2817 mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT, cnt); 2818 2819 cnt = mt76_rr(dev, MT_MIB_SDR28(band_idx)); 2820 mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT, cnt); 2821 2822 cnt = mt76_rr(dev, MT_UMIB_RPDCR(band_idx)); 2823 mib->rx_pfdrop_cnt += cnt; 2824 2825 cnt = mt76_rr(dev, MT_MIB_RVSR1(band_idx)); 2826 mib->rx_vec_queue_overflow_drop_cnt += cnt; 2827 2828 cnt = mt76_rr(dev, MT_MIB_TSCR1(band_idx)); 2829 mib->rx_ba_cnt += cnt; 2830 2831 cnt = mt76_rr(dev, MT_MIB_BSCR0(band_idx)); 2832 mib->tx_bf_ebf_ppdu_cnt += cnt; 2833 2834 cnt = mt76_rr(dev, MT_MIB_BSCR1(band_idx)); 2835 mib->tx_bf_ibf_ppdu_cnt += cnt; 2836 2837 cnt = mt76_rr(dev, MT_MIB_BSCR2(band_idx)); 2838 mib->tx_mu_bf_cnt += cnt; 2839 2840 cnt = mt76_rr(dev, MT_MIB_TSCR5(band_idx)); 2841 mib->tx_mu_mpdu_cnt += cnt; 2842 2843 cnt = mt76_rr(dev, MT_MIB_TSCR6(band_idx)); 2844 mib->tx_mu_acked_mpdu_cnt += cnt; 2845 2846 cnt = mt76_rr(dev, MT_MIB_TSCR7(band_idx)); 2847 mib->tx_su_acked_mpdu_cnt += cnt; 2848 2849 cnt = mt76_rr(dev, MT_MIB_BSCR3(band_idx)); 2850 mib->tx_bf_rx_fb_ht_cnt += cnt; 2851 mib->tx_bf_rx_fb_all_cnt += cnt; 2852 2853 cnt = mt76_rr(dev, MT_MIB_BSCR4(band_idx)); 2854 mib->tx_bf_rx_fb_vht_cnt += cnt; 2855 mib->tx_bf_rx_fb_all_cnt += cnt; 2856 2857 cnt = mt76_rr(dev, MT_MIB_BSCR5(band_idx)); 2858 mib->tx_bf_rx_fb_he_cnt += cnt; 2859 mib->tx_bf_rx_fb_all_cnt += cnt; 2860 2861 cnt = mt76_rr(dev, MT_MIB_BSCR6(band_idx)); 2862 mib->tx_bf_rx_fb_eht_cnt += cnt; 2863 mib->tx_bf_rx_fb_all_cnt += cnt; 2864 2865 cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(band_idx)); 2866 mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt); 2867 mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt); 2868 mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt); 2869 2870 cnt = mt76_rr(dev, MT_MIB_BSCR7(band_idx)); 2871 mib->tx_bf_fb_trig_cnt += cnt; 2872 2873 cnt = mt76_rr(dev, MT_MIB_BSCR17(band_idx)); 2874 mib->tx_bf_fb_cpl_cnt += cnt; 2875 2876 for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) { 2877 cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i)); 2878 mib->tx_amsdu[i] += cnt; 2879 mib->tx_amsdu_cnt += cnt; 2880 } 2881 2882 /* rts count */ 2883 cnt = mt76_rr(dev, MT_MIB_BTSCR5(band_idx)); 2884 mib->rts_cnt += cnt; 2885 2886 /* rts retry count */ 2887 cnt = mt76_rr(dev, MT_MIB_BTSCR6(band_idx)); 2888 mib->rts_retries_cnt += cnt; 2889 2890 /* ba miss count */ 2891 cnt = mt76_rr(dev, MT_MIB_BTSCR0(band_idx)); 2892 mib->ba_miss_cnt += cnt; 2893 2894 /* ack fail count */ 2895 cnt = mt76_rr(dev, MT_MIB_BFTFCR(band_idx)); 2896 mib->ack_fail_cnt += cnt; 2897 2898 for (i = 0; i < 16; i++) { 2899 cnt = mt76_rr(dev, MT_TX_AGG_CNT(band_idx, i)); 2900 phy->mt76->aggr_stats[i] += cnt; 2901 } 2902 } 2903 2904 void mt7996_mac_sta_rc_work(struct work_struct *work) 2905 { 2906 struct mt7996_dev *dev = container_of(work, struct mt7996_dev, rc_work); 2907 struct mt7996_sta_link *msta_link; 2908 struct ieee80211_vif *vif; 2909 struct mt7996_vif *mvif; 2910 LIST_HEAD(list); 2911 u32 changed; 2912 2913 mutex_lock(&dev->mt76.mutex); 2914 2915 spin_lock_bh(&dev->mt76.sta_poll_lock); 2916 list_splice_init(&dev->sta_rc_list, &list); 2917 2918 while (!list_empty(&list)) { 2919 msta_link = list_first_entry(&list, struct mt7996_sta_link, 2920 rc_list); 2921 list_del_init(&msta_link->rc_list); 2922 2923 changed = msta_link->changed; 2924 msta_link->changed = 0; 2925 mvif = msta_link->sta->vif; 2926 vif = container_of((void *)mvif, struct ieee80211_vif, 2927 drv_priv); 2928 2929 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2930 2931 if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED | 2932 IEEE80211_RC_NSS_CHANGED | 2933 IEEE80211_RC_BW_CHANGED)) 2934 mt7996_mcu_add_rate_ctrl(dev, msta_link->sta, vif, 2935 msta_link->wcid.link_id, 2936 true); 2937 2938 if (changed & IEEE80211_RC_SMPS_CHANGED) 2939 mt7996_mcu_set_fixed_field(dev, msta_link->sta, NULL, 2940 msta_link->wcid.link_id, 2941 RATE_PARAM_MMPS_UPDATE); 2942 2943 spin_lock_bh(&dev->mt76.sta_poll_lock); 2944 } 2945 2946 spin_unlock_bh(&dev->mt76.sta_poll_lock); 2947 2948 mutex_unlock(&dev->mt76.mutex); 2949 } 2950 2951 void mt7996_mac_work(struct work_struct *work) 2952 { 2953 struct mt7996_phy *phy; 2954 struct mt76_phy *mphy; 2955 2956 mphy = (struct mt76_phy *)container_of(work, struct mt76_phy, 2957 mac_work.work); 2958 phy = mphy->priv; 2959 2960 mutex_lock(&mphy->dev->mutex); 2961 2962 mt76_update_survey(mphy); 2963 if (++mphy->mac_work_count == 5) { 2964 mphy->mac_work_count = 0; 2965 2966 mt7996_mac_update_stats(phy); 2967 2968 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_RATE); 2969 if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) { 2970 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_ADM_STAT); 2971 mt7996_mcu_get_all_sta_info(phy, UNI_ALL_STA_TXRX_MSDU_COUNT); 2972 } 2973 } 2974 2975 mutex_unlock(&mphy->dev->mutex); 2976 2977 mt76_tx_status_check(mphy->dev, false); 2978 2979 ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 2980 MT7996_WATCHDOG_TIME); 2981 } 2982 2983 static void mt7996_dfs_stop_radar_detector(struct mt7996_phy *phy) 2984 { 2985 struct mt7996_dev *dev = phy->dev; 2986 int rdd_idx = mt7996_get_rdd_idx(phy, false); 2987 2988 if (rdd_idx < 0) 2989 return; 2990 2991 mt7996_mcu_rdd_cmd(dev, RDD_STOP, rdd_idx, 0); 2992 } 2993 2994 static int mt7996_dfs_start_rdd(struct mt7996_dev *dev, int rdd_idx) 2995 { 2996 int err, region; 2997 2998 switch (dev->mt76.region) { 2999 case NL80211_DFS_ETSI: 3000 region = 0; 3001 break; 3002 case NL80211_DFS_JP: 3003 region = 2; 3004 break; 3005 case NL80211_DFS_FCC: 3006 default: 3007 region = 1; 3008 break; 3009 } 3010 3011 err = mt7996_mcu_rdd_cmd(dev, RDD_START, rdd_idx, region); 3012 if (err < 0) 3013 return err; 3014 3015 return mt7996_mcu_rdd_cmd(dev, RDD_DET_MODE, rdd_idx, 1); 3016 } 3017 3018 static int mt7996_dfs_start_radar_detector(struct mt7996_phy *phy) 3019 { 3020 struct mt7996_dev *dev = phy->dev; 3021 int err, rdd_idx; 3022 3023 rdd_idx = mt7996_get_rdd_idx(phy, false); 3024 if (rdd_idx < 0) 3025 return -EINVAL; 3026 3027 /* start CAC */ 3028 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_START, rdd_idx, 0); 3029 if (err < 0) 3030 return err; 3031 3032 err = mt7996_dfs_start_rdd(dev, rdd_idx); 3033 3034 return err; 3035 } 3036 3037 static int 3038 mt7996_dfs_init_radar_specs(struct mt7996_phy *phy) 3039 { 3040 const struct mt7996_dfs_radar_spec *radar_specs; 3041 struct mt7996_dev *dev = phy->dev; 3042 int err, i; 3043 3044 switch (dev->mt76.region) { 3045 case NL80211_DFS_FCC: 3046 radar_specs = &fcc_radar_specs; 3047 err = mt7996_mcu_set_fcc5_lpn(dev, 8); 3048 if (err < 0) 3049 return err; 3050 break; 3051 case NL80211_DFS_ETSI: 3052 radar_specs = &etsi_radar_specs; 3053 break; 3054 case NL80211_DFS_JP: 3055 radar_specs = &jp_radar_specs; 3056 break; 3057 default: 3058 return -EINVAL; 3059 } 3060 3061 for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) { 3062 err = mt7996_mcu_set_radar_th(dev, i, 3063 &radar_specs->radar_pattern[i]); 3064 if (err < 0) 3065 return err; 3066 } 3067 3068 return mt7996_mcu_set_pulse_th(dev, &radar_specs->pulse_th); 3069 } 3070 3071 int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy) 3072 { 3073 struct mt7996_dev *dev = phy->dev; 3074 enum mt76_dfs_state dfs_state, prev_state; 3075 int err, rdd_idx = mt7996_get_rdd_idx(phy, false); 3076 3077 prev_state = phy->mt76->dfs_state; 3078 dfs_state = mt76_phy_dfs_state(phy->mt76); 3079 3080 if (prev_state == dfs_state || rdd_idx < 0) 3081 return 0; 3082 3083 if (prev_state == MT_DFS_STATE_UNKNOWN) 3084 mt7996_dfs_stop_radar_detector(phy); 3085 3086 if (dfs_state == MT_DFS_STATE_DISABLED) 3087 goto stop; 3088 3089 if (prev_state <= MT_DFS_STATE_DISABLED) { 3090 err = mt7996_dfs_init_radar_specs(phy); 3091 if (err < 0) 3092 return err; 3093 3094 err = mt7996_dfs_start_radar_detector(phy); 3095 if (err < 0) 3096 return err; 3097 3098 phy->mt76->dfs_state = MT_DFS_STATE_CAC; 3099 } 3100 3101 if (dfs_state == MT_DFS_STATE_CAC) 3102 return 0; 3103 3104 err = mt7996_mcu_rdd_cmd(dev, RDD_CAC_END, rdd_idx, 0); 3105 if (err < 0) { 3106 phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN; 3107 return err; 3108 } 3109 3110 phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE; 3111 return 0; 3112 3113 stop: 3114 err = mt7996_mcu_rdd_cmd(dev, RDD_NORMAL_START, rdd_idx, 0); 3115 if (err < 0) 3116 return err; 3117 3118 mt7996_dfs_stop_radar_detector(phy); 3119 phy->mt76->dfs_state = MT_DFS_STATE_DISABLED; 3120 3121 return 0; 3122 } 3123 3124 static int 3125 mt7996_mac_twt_duration_align(int duration) 3126 { 3127 return duration << 8; 3128 } 3129 3130 static u64 3131 mt7996_mac_twt_sched_list_add(struct mt7996_dev *dev, 3132 struct mt7996_twt_flow *flow) 3133 { 3134 struct mt7996_twt_flow *iter, *iter_next; 3135 u32 duration = flow->duration << 8; 3136 u64 start_tsf; 3137 3138 iter = list_first_entry_or_null(&dev->twt_list, 3139 struct mt7996_twt_flow, list); 3140 if (!iter || !iter->sched || iter->start_tsf > duration) { 3141 /* add flow as first entry in the list */ 3142 list_add(&flow->list, &dev->twt_list); 3143 return 0; 3144 } 3145 3146 list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) { 3147 start_tsf = iter->start_tsf + 3148 mt7996_mac_twt_duration_align(iter->duration); 3149 if (list_is_last(&iter->list, &dev->twt_list)) 3150 break; 3151 3152 if (!iter_next->sched || 3153 iter_next->start_tsf > start_tsf + duration) { 3154 list_add(&flow->list, &iter->list); 3155 goto out; 3156 } 3157 } 3158 3159 /* add flow as last entry in the list */ 3160 list_add_tail(&flow->list, &dev->twt_list); 3161 out: 3162 return start_tsf; 3163 } 3164 3165 static int mt7996_mac_check_twt_req(struct ieee80211_twt_setup *twt) 3166 { 3167 struct ieee80211_twt_params *twt_agrt; 3168 u64 interval, duration; 3169 u16 mantissa; 3170 u8 exp; 3171 3172 /* only individual agreement supported */ 3173 if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST) 3174 return -EOPNOTSUPP; 3175 3176 /* only 256us unit supported */ 3177 if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) 3178 return -EOPNOTSUPP; 3179 3180 twt_agrt = (struct ieee80211_twt_params *)twt->params; 3181 3182 /* explicit agreement not supported */ 3183 if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT))) 3184 return -EOPNOTSUPP; 3185 3186 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, 3187 le16_to_cpu(twt_agrt->req_type)); 3188 mantissa = le16_to_cpu(twt_agrt->mantissa); 3189 duration = twt_agrt->min_twt_dur << 8; 3190 3191 interval = (u64)mantissa << exp; 3192 if (interval < duration) 3193 return -EOPNOTSUPP; 3194 3195 return 0; 3196 } 3197 3198 static bool 3199 mt7996_mac_twt_param_equal(struct mt7996_sta_link *msta_link, 3200 struct ieee80211_twt_params *twt_agrt) 3201 { 3202 u16 type = le16_to_cpu(twt_agrt->req_type); 3203 u8 exp; 3204 int i; 3205 3206 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type); 3207 for (i = 0; i < MT7996_MAX_STA_TWT_AGRT; i++) { 3208 struct mt7996_twt_flow *f; 3209 3210 if (!(msta_link->twt.flowid_mask & BIT(i))) 3211 continue; 3212 3213 f = &msta_link->twt.flow[i]; 3214 if (f->duration == twt_agrt->min_twt_dur && 3215 f->mantissa == twt_agrt->mantissa && 3216 f->exp == exp && 3217 f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) && 3218 f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) && 3219 f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER)) 3220 return true; 3221 } 3222 3223 return false; 3224 } 3225 3226 void mt7996_mac_add_twt_setup(struct ieee80211_hw *hw, 3227 struct ieee80211_sta *sta, 3228 struct ieee80211_twt_setup *twt) 3229 { 3230 enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT; 3231 struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv; 3232 struct ieee80211_twt_params *twt_agrt = (void *)twt->params; 3233 struct mt7996_sta_link *msta_link = &msta->deflink; 3234 u16 req_type = le16_to_cpu(twt_agrt->req_type); 3235 enum ieee80211_twt_setup_cmd sta_setup_cmd; 3236 struct mt7996_dev *dev = mt7996_hw_dev(hw); 3237 struct mt7996_twt_flow *flow; 3238 u8 flowid, table_id, exp; 3239 3240 if (mt7996_mac_check_twt_req(twt)) 3241 goto out; 3242 3243 mutex_lock(&dev->mt76.mutex); 3244 3245 if (dev->twt.n_agrt == MT7996_MAX_TWT_AGRT) 3246 goto unlock; 3247 3248 if (hweight8(msta_link->twt.flowid_mask) == 3249 ARRAY_SIZE(msta_link->twt.flow)) 3250 goto unlock; 3251 3252 if (twt_agrt->min_twt_dur < MT7996_MIN_TWT_DUR) { 3253 setup_cmd = TWT_SETUP_CMD_DICTATE; 3254 twt_agrt->min_twt_dur = MT7996_MIN_TWT_DUR; 3255 goto unlock; 3256 } 3257 3258 if (mt7996_mac_twt_param_equal(msta_link, twt_agrt)) 3259 goto unlock; 3260 3261 flowid = ffs(~msta_link->twt.flowid_mask) - 1; 3262 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID); 3263 twt_agrt->req_type |= le16_encode_bits(flowid, 3264 IEEE80211_TWT_REQTYPE_FLOWID); 3265 3266 table_id = ffs(~dev->twt.table_mask) - 1; 3267 exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type); 3268 sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type); 3269 3270 flow = &msta_link->twt.flow[flowid]; 3271 memset(flow, 0, sizeof(*flow)); 3272 INIT_LIST_HEAD(&flow->list); 3273 flow->wcid = msta_link->wcid.idx; 3274 flow->table_id = table_id; 3275 flow->id = flowid; 3276 flow->duration = twt_agrt->min_twt_dur; 3277 flow->mantissa = twt_agrt->mantissa; 3278 flow->exp = exp; 3279 flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION); 3280 flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE); 3281 flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER); 3282 3283 if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST || 3284 sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) { 3285 u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp; 3286 u64 flow_tsf, curr_tsf; 3287 u32 rem; 3288 3289 flow->sched = true; 3290 flow->start_tsf = mt7996_mac_twt_sched_list_add(dev, flow); 3291 curr_tsf = __mt7996_get_tsf(hw, &msta->vif->deflink); 3292 div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem); 3293 flow_tsf = curr_tsf + interval - rem; 3294 twt_agrt->twt = cpu_to_le64(flow_tsf); 3295 } else { 3296 list_add_tail(&flow->list, &dev->twt_list); 3297 } 3298 flow->tsf = le64_to_cpu(twt_agrt->twt); 3299 3300 if (mt7996_mcu_twt_agrt_update(dev, &msta->vif->deflink, flow, 3301 MCU_TWT_AGRT_ADD)) 3302 goto unlock; 3303 3304 setup_cmd = TWT_SETUP_CMD_ACCEPT; 3305 dev->twt.table_mask |= BIT(table_id); 3306 msta_link->twt.flowid_mask |= BIT(flowid); 3307 dev->twt.n_agrt++; 3308 3309 unlock: 3310 mutex_unlock(&dev->mt76.mutex); 3311 out: 3312 twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD); 3313 twt_agrt->req_type |= 3314 le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD); 3315 twt->control = twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED; 3316 } 3317 3318 void mt7996_mac_twt_teardown_flow(struct mt7996_dev *dev, 3319 struct mt7996_vif_link *link, 3320 struct mt7996_sta_link *msta_link, 3321 u8 flowid) 3322 { 3323 struct mt7996_twt_flow *flow; 3324 3325 lockdep_assert_held(&dev->mt76.mutex); 3326 3327 if (flowid >= ARRAY_SIZE(msta_link->twt.flow)) 3328 return; 3329 3330 if (!(msta_link->twt.flowid_mask & BIT(flowid))) 3331 return; 3332 3333 flow = &msta_link->twt.flow[flowid]; 3334 if (mt7996_mcu_twt_agrt_update(dev, link, flow, MCU_TWT_AGRT_DELETE)) 3335 return; 3336 3337 list_del_init(&flow->list); 3338 msta_link->twt.flowid_mask &= ~BIT(flowid); 3339 dev->twt.table_mask &= ~BIT(flow->table_id); 3340 dev->twt.n_agrt--; 3341 } 3342