1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* Copyright (C) 2020 MediaTek Inc. */ 3 4 #include <linux/fs.h> 5 #include "mt7915.h" 6 #include "mcu.h" 7 #include "mac.h" 8 #include "eeprom.h" 9 10 #define fw_name(_dev, name, ...) ({ \ 11 char *_fw; \ 12 switch (mt76_chip(&(_dev)->mt76)) { \ 13 case 0x7915: \ 14 _fw = MT7915_##name; \ 15 break; \ 16 case 0x7981: \ 17 _fw = MT7981_##name; \ 18 break; \ 19 case 0x7986: \ 20 _fw = MT7986_##name##__VA_ARGS__; \ 21 break; \ 22 default: \ 23 _fw = MT7916_##name; \ 24 break; \ 25 } \ 26 _fw; \ 27 }) 28 29 #define fw_name_var(_dev, name) (mt7915_check_adie(dev, false) ? \ 30 fw_name(_dev, name) : \ 31 fw_name(_dev, name, _MT7975)) 32 33 #define MCU_PATCH_ADDRESS 0x200000 34 35 #define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p) 36 #define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m) 37 38 static bool sr_scene_detect = true; 39 module_param(sr_scene_detect, bool, 0644); 40 MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm"); 41 42 static u8 43 mt7915_mcu_get_sta_nss(u16 mcs_map) 44 { 45 u8 nss; 46 47 for (nss = 8; nss > 0; nss--) { 48 u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3; 49 50 if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) 51 break; 52 } 53 54 return nss - 1; 55 } 56 57 static void 58 mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs, 59 u16 mcs_map) 60 { 61 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 62 struct mt7915_dev *dev = msta->vif->phy->dev; 63 enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band; 64 const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs; 65 int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; 66 67 for (nss = 0; nss < max_nss; nss++) { 68 int mcs; 69 70 switch ((mcs_map >> (2 * nss)) & 0x3) { 71 case IEEE80211_HE_MCS_SUPPORT_0_11: 72 mcs = GENMASK(11, 0); 73 break; 74 case IEEE80211_HE_MCS_SUPPORT_0_9: 75 mcs = GENMASK(9, 0); 76 break; 77 case IEEE80211_HE_MCS_SUPPORT_0_7: 78 mcs = GENMASK(7, 0); 79 break; 80 default: 81 mcs = 0; 82 } 83 84 mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1; 85 86 switch (mcs) { 87 case 0 ... 7: 88 mcs = IEEE80211_HE_MCS_SUPPORT_0_7; 89 break; 90 case 8 ... 9: 91 mcs = IEEE80211_HE_MCS_SUPPORT_0_9; 92 break; 93 case 10 ... 11: 94 mcs = IEEE80211_HE_MCS_SUPPORT_0_11; 95 break; 96 default: 97 mcs = IEEE80211_HE_MCS_NOT_SUPPORTED; 98 break; 99 } 100 mcs_map &= ~(0x3 << (nss * 2)); 101 mcs_map |= mcs << (nss * 2); 102 103 /* only support 2ss on 160MHz for mt7915 */ 104 if (is_mt7915(&dev->mt76) && nss > 1 && 105 sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 106 break; 107 } 108 109 *he_mcs = cpu_to_le16(mcs_map); 110 } 111 112 static void 113 mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs, 114 const u16 *mask) 115 { 116 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 117 struct mt7915_dev *dev = msta->vif->phy->dev; 118 u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map); 119 int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; 120 u16 mcs; 121 122 for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) { 123 switch (mcs_map & 0x3) { 124 case IEEE80211_VHT_MCS_SUPPORT_0_9: 125 mcs = GENMASK(9, 0); 126 break; 127 case IEEE80211_VHT_MCS_SUPPORT_0_8: 128 mcs = GENMASK(8, 0); 129 break; 130 case IEEE80211_VHT_MCS_SUPPORT_0_7: 131 mcs = GENMASK(7, 0); 132 break; 133 default: 134 mcs = 0; 135 } 136 137 vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]); 138 139 /* only support 2ss on 160MHz for mt7915 */ 140 if (is_mt7915(&dev->mt76) && nss > 1 && 141 sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 142 break; 143 } 144 } 145 146 static void 147 mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs, 148 const u8 *mask) 149 { 150 int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss; 151 152 for (nss = 0; nss < max_nss; nss++) 153 ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss]; 154 } 155 156 static int 157 mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd, 158 struct sk_buff *skb, int seq) 159 { 160 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 161 struct mt76_connac2_mcu_rxd *rxd; 162 int ret = 0; 163 164 if (!skb) { 165 dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 166 cmd, seq); 167 168 if (!test_and_set_bit(MT76_MCU_RESET, &dev->mphy.state)) { 169 dev->recovery.restart = true; 170 wake_up(&dev->mt76.mcu.wait); 171 queue_work(dev->mt76.wq, &dev->reset_work); 172 wake_up(&dev->reset_wait); 173 } 174 175 return -ETIMEDOUT; 176 } 177 178 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 179 if (seq != rxd->seq && 180 !(rxd->eid == MCU_CMD_EXT_CID && 181 rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT)) 182 return -EAGAIN; 183 184 if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) { 185 skb_pull(skb, sizeof(*rxd) - 4); 186 ret = *skb->data; 187 } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) { 188 skb_pull(skb, sizeof(*rxd) + 4); 189 ret = le32_to_cpu(*(__le32 *)skb->data); 190 } else { 191 skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd)); 192 } 193 194 return ret; 195 } 196 197 static void 198 mt7915_mcu_set_timeout(struct mt76_dev *mdev, int cmd) 199 { 200 mdev->mcu.timeout = 5 * HZ; 201 202 if ((cmd & __MCU_CMD_FIELD_ID) != MCU_CMD_EXT_CID) 203 return; 204 205 switch (FIELD_GET(__MCU_CMD_FIELD_EXT_ID, cmd)) { 206 case MCU_EXT_CMD_THERMAL_CTRL: 207 case MCU_EXT_CMD_GET_MIB_INFO: 208 case MCU_EXT_CMD_PHY_STAT_INFO: 209 case MCU_EXT_CMD_STA_REC_UPDATE: 210 case MCU_EXT_CMD_BSS_INFO_UPDATE: 211 mdev->mcu.timeout = 2 * HZ; 212 return; 213 case MCU_EXT_CMD_EFUSE_BUFFER_MODE: 214 mdev->mcu.timeout = 10 * HZ; 215 return; 216 default: 217 break; 218 } 219 } 220 221 static int 222 mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, 223 int cmd, int *wait_seq) 224 { 225 struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); 226 enum mt76_mcuq_id qid; 227 228 if (cmd == MCU_CMD(FW_SCATTER)) 229 qid = MT_MCUQ_FWDL; 230 else if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) 231 qid = MT_MCUQ_WA; 232 else 233 qid = MT_MCUQ_WM; 234 235 mt7915_mcu_set_timeout(mdev, cmd); 236 237 return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0); 238 } 239 240 int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3) 241 { 242 struct { 243 __le32 args[3]; 244 } req = { 245 .args = { 246 cpu_to_le32(a1), 247 cpu_to_le32(a2), 248 cpu_to_le32(a3), 249 }, 250 }; 251 252 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false); 253 } 254 255 static void 256 mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 257 { 258 if (!vif->bss_conf.csa_active || vif->type == NL80211_IFTYPE_STATION) 259 return; 260 261 ieee80211_csa_finish(vif, 0); 262 } 263 264 static void 265 mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb) 266 { 267 struct mt76_phy *mphy = &dev->mt76.phy; 268 struct mt7915_mcu_csa_notify *c; 269 270 c = (struct mt7915_mcu_csa_notify *)skb->data; 271 272 if (c->band_idx > MT_BAND1) 273 return; 274 275 if ((c->band_idx && !dev->phy.mt76->band_idx) && 276 dev->mt76.phys[MT_BAND1]) 277 mphy = dev->mt76.phys[MT_BAND1]; 278 279 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 280 IEEE80211_IFACE_ITER_RESUME_ALL, 281 mt7915_mcu_csa_finish, mphy->hw); 282 } 283 284 static void 285 mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb) 286 { 287 struct mt76_phy *mphy = &dev->mt76.phy; 288 struct mt7915_mcu_thermal_notify *t; 289 struct mt7915_phy *phy; 290 291 t = (struct mt7915_mcu_thermal_notify *)skb->data; 292 if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE) 293 return; 294 295 if (t->ctrl.band_idx > MT_BAND1) 296 return; 297 298 if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) && 299 dev->mt76.phys[MT_BAND1]) 300 mphy = dev->mt76.phys[MT_BAND1]; 301 302 phy = mphy->priv; 303 phy->throttle_state = t->ctrl.duty.duty_cycle; 304 } 305 306 static void 307 mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb) 308 { 309 struct mt76_phy *mphy = &dev->mt76.phy; 310 struct mt7915_mcu_rdd_report *r; 311 u32 sku; 312 313 r = (struct mt7915_mcu_rdd_report *)skb->data; 314 315 switch (r->rdd_idx) { 316 case MT_RDD_IDX_BAND0: 317 break; 318 case MT_RDD_IDX_BAND1: 319 sku = mt7915_check_adie(dev, true); 320 /* the main phy is bound to band 1 for this sku */ 321 if (is_mt7986(&dev->mt76) && 322 (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE)) 323 break; 324 mphy = dev->mt76.phys[MT_BAND1]; 325 break; 326 case MT_RDD_IDX_BACKGROUND: 327 if (!dev->rdd2_phy) 328 return; 329 mphy = dev->rdd2_phy->mt76; 330 break; 331 default: 332 dev_err(dev->mt76.dev, "Unknown RDD idx %d\n", r->rdd_idx); 333 return; 334 } 335 336 if (!mphy) 337 return; 338 339 if (r->rdd_idx == MT_RDD_IDX_BACKGROUND) 340 cfg80211_background_radar_event(mphy->hw->wiphy, 341 &dev->rdd2_chandef, 342 GFP_ATOMIC); 343 else 344 ieee80211_radar_detected(mphy->hw, NULL); 345 dev->hw_pattern++; 346 } 347 348 static void 349 mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb) 350 { 351 struct mt76_connac2_mcu_rxd *rxd; 352 int len = skb->len - sizeof(*rxd); 353 const char *data, *type; 354 355 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 356 data = (char *)&rxd[1]; 357 358 switch (rxd->s2d_index) { 359 case 0: 360 if (mt7915_debugfs_rx_log(dev, data, len)) 361 return; 362 363 type = "WM"; 364 break; 365 case 2: 366 type = "WA"; 367 break; 368 default: 369 type = "unknown"; 370 break; 371 } 372 373 wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data); 374 } 375 376 static void 377 mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) 378 { 379 if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION) 380 return; 381 382 ieee80211_color_change_finish(vif, 0); 383 } 384 385 static void 386 mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb) 387 { 388 struct mt76_phy *mphy = &dev->mt76.phy; 389 struct mt7915_mcu_bcc_notify *b; 390 391 b = (struct mt7915_mcu_bcc_notify *)skb->data; 392 393 if (b->band_idx > MT_BAND1) 394 return; 395 396 if ((b->band_idx && !dev->phy.mt76->band_idx) && 397 dev->mt76.phys[MT_BAND1]) 398 mphy = dev->mt76.phys[MT_BAND1]; 399 400 ieee80211_iterate_active_interfaces_atomic(mphy->hw, 401 IEEE80211_IFACE_ITER_RESUME_ALL, 402 mt7915_mcu_cca_finish, mphy->hw); 403 } 404 405 static void 406 mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb) 407 { 408 struct mt76_connac2_mcu_rxd *rxd; 409 410 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 411 switch (rxd->ext_eid) { 412 case MCU_EXT_EVENT_THERMAL_PROTECT: 413 mt7915_mcu_rx_thermal_notify(dev, skb); 414 break; 415 case MCU_EXT_EVENT_RDD_REPORT: 416 mt7915_mcu_rx_radar_detected(dev, skb); 417 break; 418 case MCU_EXT_EVENT_CSA_NOTIFY: 419 mt7915_mcu_rx_csa_notify(dev, skb); 420 break; 421 case MCU_EXT_EVENT_FW_LOG_2_HOST: 422 mt7915_mcu_rx_log_message(dev, skb); 423 break; 424 case MCU_EXT_EVENT_BCC_NOTIFY: 425 mt7915_mcu_rx_bcc_notify(dev, skb); 426 break; 427 default: 428 break; 429 } 430 } 431 432 static void 433 mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb) 434 { 435 struct mt76_connac2_mcu_rxd *rxd; 436 437 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 438 switch (rxd->eid) { 439 case MCU_EVENT_EXT: 440 mt7915_mcu_rx_ext_event(dev, skb); 441 break; 442 default: 443 break; 444 } 445 dev_kfree_skb(skb); 446 } 447 448 void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb) 449 { 450 struct mt76_connac2_mcu_rxd *rxd; 451 452 rxd = (struct mt76_connac2_mcu_rxd *)skb->data; 453 if ((rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT || 454 rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST || 455 rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP || 456 rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC || 457 rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY || 458 !rxd->seq) && 459 !(rxd->eid == MCU_CMD_EXT_CID && 460 rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT)) 461 mt7915_mcu_rx_unsolicited_event(dev, skb); 462 else 463 mt76_mcu_rx_event(&dev->mt76, skb); 464 } 465 466 static struct tlv * 467 mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len, 468 __le16 *sub_ntlv, __le16 *len) 469 { 470 struct tlv *ptlv, tlv = { 471 .tag = cpu_to_le16(sub_tag), 472 .len = cpu_to_le16(sub_len), 473 }; 474 475 ptlv = skb_put_zero(skb, sub_len); 476 memcpy(ptlv, &tlv, sizeof(tlv)); 477 478 le16_add_cpu(sub_ntlv, 1); 479 le16_add_cpu(len, sub_len); 480 481 return ptlv; 482 } 483 484 /** bss info **/ 485 struct mt7915_he_obss_narrow_bw_ru_data { 486 bool tolerated; 487 }; 488 489 static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 490 struct cfg80211_bss *bss, 491 void *_data) 492 { 493 struct mt7915_he_obss_narrow_bw_ru_data *data = _data; 494 const struct element *elem; 495 496 rcu_read_lock(); 497 elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY); 498 499 if (!elem || elem->datalen <= 10 || 500 !(elem->data[10] & 501 WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) 502 data->tolerated = false; 503 504 rcu_read_unlock(); 505 } 506 507 static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, 508 struct ieee80211_vif *vif) 509 { 510 struct mt7915_he_obss_narrow_bw_ru_data iter_data = { 511 .tolerated = true, 512 }; 513 514 if (!(vif->bss_conf.chanreq.oper.chan->flags & IEEE80211_CHAN_RADAR)) 515 return false; 516 517 cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chanreq.oper, 518 mt7915_check_he_obss_narrow_bw_ru_iter, 519 &iter_data); 520 521 /* 522 * If there is at least one AP on radar channel that cannot 523 * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. 524 */ 525 return !iter_data.tolerated; 526 } 527 528 static void 529 mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, 530 struct mt7915_phy *phy) 531 { 532 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 533 struct bss_info_rf_ch *ch; 534 struct tlv *tlv; 535 int freq1 = chandef->center_freq1; 536 537 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch)); 538 539 ch = (struct bss_info_rf_ch *)tlv; 540 ch->pri_ch = chandef->chan->hw_value; 541 ch->center_ch0 = ieee80211_frequency_to_channel(freq1); 542 ch->bw = mt76_connac_chan_bw(chandef); 543 544 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 545 int freq2 = chandef->center_freq2; 546 547 ch->center_ch1 = ieee80211_frequency_to_channel(freq2); 548 } 549 550 if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) { 551 struct mt76_phy *mphy = phy->mt76; 552 553 ch->he_ru26_block = 554 mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif); 555 ch->he_all_disable = false; 556 } else { 557 ch->he_all_disable = true; 558 } 559 } 560 561 static void 562 mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, 563 struct mt7915_phy *phy) 564 { 565 int max_nss = hweight8(phy->mt76->antenna_mask); 566 struct bss_info_ra *ra; 567 struct tlv *tlv; 568 569 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra)); 570 571 ra = (struct bss_info_ra *)tlv; 572 ra->op_mode = vif->type == NL80211_IFTYPE_AP; 573 ra->adhoc_en = vif->type == NL80211_IFTYPE_ADHOC; 574 ra->short_preamble = true; 575 ra->tx_streams = max_nss; 576 ra->rx_streams = max_nss; 577 ra->algo = 4; 578 ra->train_up_rule = 2; 579 ra->train_up_high_thres = 110; 580 ra->train_up_rule_rssi = -70; 581 ra->low_traffic_thres = 2; 582 ra->phy_cap = cpu_to_le32(0xfdf); 583 ra->interval = cpu_to_le32(500); 584 ra->fast_interval = cpu_to_le32(100); 585 } 586 587 static void 588 mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, 589 struct mt7915_phy *phy) 590 { 591 #define DEFAULT_HE_PE_DURATION 4 592 #define DEFAULT_HE_DURATION_RTS_THRES 1023 593 const struct ieee80211_sta_he_cap *cap; 594 struct bss_info_he *he; 595 struct tlv *tlv; 596 597 cap = mt76_connac_get_he_phy_cap(phy->mt76, vif); 598 599 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he)); 600 601 he = (struct bss_info_he *)tlv; 602 he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; 603 if (!he->he_pe_duration) 604 he->he_pe_duration = DEFAULT_HE_PE_DURATION; 605 606 he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); 607 if (!he->he_rts_thres) 608 he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); 609 610 he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80; 611 he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160; 612 he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80; 613 } 614 615 static void 616 mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb) 617 { 618 #define TXD_CMP_MAP1 GENMASK(15, 0) 619 #define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23)) 620 struct bss_info_hw_amsdu *amsdu; 621 struct tlv *tlv; 622 623 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu)); 624 625 amsdu = (struct bss_info_hw_amsdu *)tlv; 626 amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1); 627 amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2); 628 amsdu->trig_thres = cpu_to_le16(2); 629 amsdu->enable = true; 630 } 631 632 static void 633 mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy) 634 { 635 struct bss_info_bmc_rate *bmc; 636 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 637 enum nl80211_band band = chandef->chan->band; 638 struct tlv *tlv; 639 640 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc)); 641 642 bmc = (struct bss_info_bmc_rate *)tlv; 643 if (band == NL80211_BAND_2GHZ) { 644 bmc->short_preamble = true; 645 } else { 646 bmc->bc_trans = cpu_to_le16(0x2000); 647 bmc->mc_trans = cpu_to_le16(0x2080); 648 } 649 } 650 651 static int 652 mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif, 653 bool bssid, bool enable) 654 { 655 struct mt7915_dev *dev = phy->dev; 656 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 657 u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START; 658 u32 mask = phy->omac_mask >> 32 & ~BIT(idx); 659 const u8 *addr = vif->addr; 660 struct { 661 u8 mode; 662 u8 force_clear; 663 u8 clear_bitmap[8]; 664 u8 entry_count; 665 u8 write; 666 u8 band; 667 668 u8 index; 669 u8 bssid; 670 u8 addr[ETH_ALEN]; 671 } __packed req = { 672 .mode = !!mask || enable, 673 .entry_count = 1, 674 .write = 1, 675 .band = phy->mt76->band_idx, 676 .index = idx * 2 + bssid, 677 }; 678 679 if (bssid) 680 addr = vif->bss_conf.bssid; 681 682 if (enable) 683 ether_addr_copy(req.addr, addr); 684 685 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE), &req, 686 sizeof(req), true); 687 } 688 689 int mt7915_mcu_add_bss_info(struct mt7915_phy *phy, 690 struct ieee80211_vif *vif, int enable) 691 { 692 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 693 struct mt7915_dev *dev = phy->dev; 694 struct sk_buff *skb; 695 696 if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) { 697 mt7915_mcu_muar_config(phy, vif, false, enable); 698 mt7915_mcu_muar_config(phy, vif, true, enable); 699 } 700 701 skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL, 702 MT7915_BSS_UPDATE_MAX_SIZE); 703 if (IS_ERR(skb)) 704 return PTR_ERR(skb); 705 706 /* bss_omac must be first */ 707 if (enable) 708 mt76_connac_mcu_bss_omac_tlv(skb, vif); 709 710 mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76, 711 mvif->sta.wcid.idx, enable); 712 713 if (vif->type == NL80211_IFTYPE_MONITOR) 714 goto out; 715 716 if (enable) { 717 mt7915_mcu_bss_rfch_tlv(skb, vif, phy); 718 mt7915_mcu_bss_bmc_tlv(skb, phy); 719 mt7915_mcu_bss_ra_tlv(skb, vif, phy); 720 mt7915_mcu_bss_hw_amsdu_tlv(skb); 721 722 if (vif->bss_conf.he_support) 723 mt7915_mcu_bss_he_tlv(skb, vif, phy); 724 725 if (mvif->mt76.omac_idx >= EXT_BSSID_START && 726 mvif->mt76.omac_idx < REPEATER_BSSID_START) 727 mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76); 728 } 729 out: 730 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 731 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 732 } 733 734 /** starec & wtbl **/ 735 int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev, 736 struct ieee80211_ampdu_params *params, 737 bool enable) 738 { 739 struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv; 740 struct mt7915_vif *mvif = msta->vif; 741 int ret; 742 743 mt76_worker_disable(&dev->mt76.tx_worker); 744 if (enable && !params->amsdu) 745 msta->wcid.amsdu = false; 746 ret = mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, 747 MCU_EXT_CMD(STA_REC_UPDATE), 748 enable, true); 749 mt76_worker_enable(&dev->mt76.tx_worker); 750 751 return ret; 752 } 753 754 int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev, 755 struct ieee80211_ampdu_params *params, 756 bool enable) 757 { 758 struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv; 759 struct mt7915_vif *mvif = msta->vif; 760 761 return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, 762 MCU_EXT_CMD(STA_REC_UPDATE), 763 enable, false); 764 } 765 766 static void 767 mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta, 768 struct ieee80211_vif *vif) 769 { 770 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 771 struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; 772 struct ieee80211_he_mcs_nss_supp mcs_map; 773 struct sta_rec_he *he; 774 struct tlv *tlv; 775 u32 cap = 0; 776 777 if (!sta->deflink.he_cap.has_he) 778 return; 779 780 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he)); 781 782 he = (struct sta_rec_he *)tlv; 783 784 if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE) 785 cap |= STA_REC_HE_CAP_HTC; 786 787 if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 788 cap |= STA_REC_HE_CAP_BSR; 789 790 if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 791 cap |= STA_REC_HE_CAP_OM; 792 793 if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU) 794 cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU; 795 796 if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 797 cap |= STA_REC_HE_CAP_BQR; 798 799 if (elem->phy_cap_info[0] & 800 (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G | 801 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G)) 802 cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT; 803 804 if (mvif->cap.he_ldpc && 805 (elem->phy_cap_info[1] & 806 IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)) 807 cap |= STA_REC_HE_CAP_LDPC; 808 809 if (elem->phy_cap_info[1] & 810 IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US) 811 cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI; 812 813 if (elem->phy_cap_info[2] & 814 IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US) 815 cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI; 816 817 if (elem->phy_cap_info[2] & 818 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ) 819 cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC; 820 821 if (elem->phy_cap_info[2] & 822 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ) 823 cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC; 824 825 if (elem->phy_cap_info[6] & 826 IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB) 827 cap |= STA_REC_HE_CAP_TRIG_CQI_FK; 828 829 if (elem->phy_cap_info[6] & 830 IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE) 831 cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE; 832 833 if (elem->phy_cap_info[7] & 834 IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI) 835 cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI; 836 837 if (elem->phy_cap_info[7] & 838 IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ) 839 cap |= STA_REC_HE_CAP_GT_80M_TX_STBC; 840 841 if (elem->phy_cap_info[7] & 842 IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ) 843 cap |= STA_REC_HE_CAP_GT_80M_RX_STBC; 844 845 if (elem->phy_cap_info[8] & 846 IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI) 847 cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI; 848 849 if (elem->phy_cap_info[8] & 850 IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI) 851 cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI; 852 853 if (elem->phy_cap_info[9] & 854 IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU) 855 cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242; 856 857 if (elem->phy_cap_info[9] & 858 IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU) 859 cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242; 860 861 he->he_cap = cpu_to_le32(cap); 862 863 mcs_map = sta->deflink.he_cap.he_mcs_nss_supp; 864 switch (sta->deflink.bandwidth) { 865 case IEEE80211_STA_RX_BW_160: 866 if (elem->phy_cap_info[0] & 867 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) 868 mt7915_mcu_set_sta_he_mcs(sta, 869 &he->max_nss_mcs[CMD_HE_MCS_BW8080], 870 le16_to_cpu(mcs_map.rx_mcs_80p80)); 871 872 mt7915_mcu_set_sta_he_mcs(sta, 873 &he->max_nss_mcs[CMD_HE_MCS_BW160], 874 le16_to_cpu(mcs_map.rx_mcs_160)); 875 fallthrough; 876 default: 877 mt7915_mcu_set_sta_he_mcs(sta, 878 &he->max_nss_mcs[CMD_HE_MCS_BW80], 879 le16_to_cpu(mcs_map.rx_mcs_80)); 880 break; 881 } 882 883 he->t_frame_dur = 884 HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); 885 he->max_ampdu_exp = 886 HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]); 887 888 he->bw_set = 889 HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]); 890 he->device_class = 891 HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]); 892 he->punc_pream_rx = 893 HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); 894 895 he->dcm_tx_mode = 896 HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]); 897 he->dcm_tx_max_nss = 898 HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]); 899 he->dcm_rx_mode = 900 HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]); 901 he->dcm_rx_max_nss = 902 HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]); 903 he->dcm_rx_max_nss = 904 HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]); 905 906 he->pkt_ext = 2; 907 } 908 909 static void 910 mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 911 struct ieee80211_sta *sta, struct ieee80211_vif *vif) 912 { 913 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 914 struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem; 915 struct sta_rec_muru *muru; 916 struct tlv *tlv; 917 918 if (vif->type != NL80211_IFTYPE_STATION && 919 vif->type != NL80211_IFTYPE_AP) 920 return; 921 922 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru)); 923 924 muru = (struct sta_rec_muru *)tlv; 925 926 muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer || 927 mvif->cap.vht_mu_ebfer || 928 mvif->cap.vht_mu_ebfee; 929 if (!is_mt7915(&dev->mt76)) 930 muru->cfg.mimo_ul_en = true; 931 muru->cfg.ofdma_dl_en = true; 932 933 if (sta->deflink.vht_cap.vht_supported) 934 muru->mimo_dl.vht_mu_bfee = 935 !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE); 936 937 if (!sta->deflink.he_cap.has_he) 938 return; 939 940 muru->mimo_dl.partial_bw_dl_mimo = 941 HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]); 942 943 muru->mimo_ul.full_ul_mimo = 944 HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]); 945 muru->mimo_ul.partial_ul_mimo = 946 HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); 947 948 muru->ofdma_dl.punc_pream_rx = 949 HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]); 950 muru->ofdma_dl.he_20m_in_40m_2g = 951 HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]); 952 muru->ofdma_dl.he_20m_in_160m = 953 HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); 954 muru->ofdma_dl.he_80m_in_160m = 955 HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]); 956 957 muru->ofdma_ul.t_frame_dur = 958 HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]); 959 muru->ofdma_ul.mu_cascading = 960 HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]); 961 muru->ofdma_ul.uo_ra = 962 HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]); 963 muru->ofdma_ul.rx_ctrl_frame_to_mbss = 964 HE_MAC(CAP3_RX_CTRL_FRAME_TO_MULTIBSS, elem->mac_cap_info[3]); 965 } 966 967 static void 968 mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) 969 { 970 struct sta_rec_ht *ht; 971 struct tlv *tlv; 972 973 if (!sta->deflink.ht_cap.ht_supported) 974 return; 975 976 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); 977 978 ht = (struct sta_rec_ht *)tlv; 979 ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); 980 } 981 982 static void 983 mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) 984 { 985 struct sta_rec_vht *vht; 986 struct tlv *tlv; 987 988 if (!sta->deflink.vht_cap.vht_supported) 989 return; 990 991 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); 992 993 vht = (struct sta_rec_vht *)tlv; 994 vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); 995 vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; 996 vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; 997 } 998 999 static void 1000 mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1001 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1002 { 1003 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1004 struct sta_rec_amsdu *amsdu; 1005 struct tlv *tlv; 1006 1007 if (vif->type != NL80211_IFTYPE_STATION && 1008 vif->type != NL80211_IFTYPE_AP) 1009 return; 1010 1011 if (!sta->deflink.agg.max_amsdu_len) 1012 return; 1013 1014 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); 1015 amsdu = (struct sta_rec_amsdu *)tlv; 1016 amsdu->max_amsdu_num = 8; 1017 amsdu->amsdu_en = true; 1018 msta->wcid.amsdu = true; 1019 1020 switch (sta->deflink.agg.max_amsdu_len) { 1021 case IEEE80211_MAX_MPDU_LEN_VHT_11454: 1022 if (!is_mt7915(&dev->mt76)) { 1023 amsdu->max_mpdu_size = 1024 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 1025 return; 1026 } 1027 fallthrough; 1028 case IEEE80211_MAX_MPDU_LEN_HT_7935: 1029 case IEEE80211_MAX_MPDU_LEN_VHT_7991: 1030 amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991; 1031 return; 1032 default: 1033 amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; 1034 return; 1035 } 1036 } 1037 1038 static int 1039 mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1040 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1041 { 1042 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1043 struct mt7915_sta *msta; 1044 struct wtbl_req_hdr *wtbl_hdr; 1045 struct mt76_wcid *wcid; 1046 struct tlv *tlv; 1047 1048 msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; 1049 wcid = sta ? &msta->wcid : NULL; 1050 1051 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); 1052 wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, 1053 WTBL_RESET_AND_SET, tlv, 1054 &skb); 1055 if (IS_ERR(wtbl_hdr)) 1056 return PTR_ERR(wtbl_hdr); 1057 1058 mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv, 1059 wtbl_hdr); 1060 mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr); 1061 if (sta) 1062 mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv, 1063 wtbl_hdr, mvif->cap.ht_ldpc, 1064 mvif->cap.vht_ldpc); 1065 1066 return 0; 1067 } 1068 1069 static inline bool 1070 mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif, 1071 struct ieee80211_sta *sta, bool bfee) 1072 { 1073 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1074 int sts = hweight16(phy->mt76->chainmask); 1075 1076 if (vif->type != NL80211_IFTYPE_STATION && 1077 vif->type != NL80211_IFTYPE_AP) 1078 return false; 1079 1080 if (!bfee && sts < 2) 1081 return false; 1082 1083 if (sta->deflink.he_cap.has_he) { 1084 struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; 1085 1086 if (bfee) 1087 return mvif->cap.he_su_ebfee && 1088 HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]); 1089 else 1090 return mvif->cap.he_su_ebfer && 1091 HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]); 1092 } 1093 1094 if (sta->deflink.vht_cap.vht_supported) { 1095 u32 cap = sta->deflink.vht_cap.cap; 1096 1097 if (bfee) 1098 return mvif->cap.vht_su_ebfee && 1099 (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE); 1100 else 1101 return mvif->cap.vht_su_ebfer && 1102 (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE); 1103 } 1104 1105 return false; 1106 } 1107 1108 static void 1109 mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf) 1110 { 1111 bf->sounding_phy = MT_PHY_TYPE_OFDM; 1112 bf->ndp_rate = 0; /* mcs0 */ 1113 bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ 1114 bf->rept_poll_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */ 1115 } 1116 1117 static void 1118 mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy, 1119 struct sta_rec_bf *bf) 1120 { 1121 struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs; 1122 u8 n = 0; 1123 1124 bf->tx_mode = MT_PHY_TYPE_HT; 1125 1126 if ((mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) && 1127 (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED)) 1128 n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK, 1129 mcs->tx_params); 1130 else if (mcs->rx_mask[3]) 1131 n = 3; 1132 else if (mcs->rx_mask[2]) 1133 n = 2; 1134 else if (mcs->rx_mask[1]) 1135 n = 1; 1136 1137 bf->nrow = hweight8(phy->mt76->chainmask) - 1; 1138 bf->ncol = min_t(u8, bf->nrow, n); 1139 bf->ibf_ncol = n; 1140 } 1141 1142 static void 1143 mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy, 1144 struct sta_rec_bf *bf, bool explicit) 1145 { 1146 struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; 1147 struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap; 1148 u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map); 1149 u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1150 u8 tx_ant = hweight8(phy->mt76->chainmask) - 1; 1151 1152 bf->tx_mode = MT_PHY_TYPE_VHT; 1153 1154 if (explicit) { 1155 u8 sts, snd_dim; 1156 1157 mt7915_mcu_sta_sounding_rate(bf); 1158 1159 sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, 1160 pc->cap); 1161 snd_dim = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 1162 vc->cap); 1163 bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant); 1164 bf->ncol = min_t(u8, nss_mcs, bf->nrow); 1165 bf->ibf_ncol = bf->ncol; 1166 1167 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 1168 bf->nrow = 1; 1169 } else { 1170 bf->nrow = tx_ant; 1171 bf->ncol = min_t(u8, nss_mcs, bf->nrow); 1172 bf->ibf_ncol = nss_mcs; 1173 1174 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) 1175 bf->ibf_nrow = 1; 1176 } 1177 } 1178 1179 static void 1180 mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif, 1181 struct mt7915_phy *phy, struct sta_rec_bf *bf) 1182 { 1183 struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap; 1184 struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem; 1185 const struct ieee80211_sta_he_cap *vc = 1186 mt76_connac_get_he_phy_cap(phy->mt76, vif); 1187 const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem; 1188 u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80); 1189 u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1190 u8 snd_dim, sts; 1191 1192 bf->tx_mode = MT_PHY_TYPE_HE_SU; 1193 1194 mt7915_mcu_sta_sounding_rate(bf); 1195 1196 bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB, 1197 pe->phy_cap_info[6]); 1198 bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB, 1199 pe->phy_cap_info[6]); 1200 snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 1201 ve->phy_cap_info[5]); 1202 sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK, 1203 pe->phy_cap_info[4]); 1204 bf->nrow = min_t(u8, snd_dim, sts); 1205 bf->ncol = min_t(u8, nss_mcs, bf->nrow); 1206 bf->ibf_ncol = bf->ncol; 1207 1208 if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160) 1209 return; 1210 1211 /* go over for 160MHz and 80p80 */ 1212 if (pe->phy_cap_info[0] & 1213 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) { 1214 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160); 1215 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1216 1217 bf->ncol_gt_bw80 = nss_mcs; 1218 } 1219 1220 if (pe->phy_cap_info[0] & 1221 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) { 1222 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80); 1223 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map); 1224 1225 if (bf->ncol_gt_bw80) 1226 bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs); 1227 else 1228 bf->ncol_gt_bw80 = nss_mcs; 1229 } 1230 1231 snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK, 1232 ve->phy_cap_info[5]); 1233 sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK, 1234 pe->phy_cap_info[4]); 1235 1236 bf->nrow_gt_bw80 = min_t(int, snd_dim, sts); 1237 } 1238 1239 static void 1240 mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1241 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1242 { 1243 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1244 struct mt7915_phy *phy = mvif->phy; 1245 int tx_ant = hweight8(phy->mt76->chainmask) - 1; 1246 struct sta_rec_bf *bf; 1247 struct tlv *tlv; 1248 static const u8 matrix[4][4] = { 1249 {0, 0, 0, 0}, 1250 {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */ 1251 {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */ 1252 {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */ 1253 }; 1254 bool ebf; 1255 1256 if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) 1257 return; 1258 1259 ebf = mt7915_is_ebf_supported(phy, vif, sta, false); 1260 if (!ebf && !dev->ibf) 1261 return; 1262 1263 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf)); 1264 bf = (struct sta_rec_bf *)tlv; 1265 1266 /* he: eBF only, in accordance with spec 1267 * vht: support eBF and iBF 1268 * ht: iBF only, since mac80211 lacks of eBF support 1269 */ 1270 if (sta->deflink.he_cap.has_he && ebf) 1271 mt7915_mcu_sta_bfer_he(sta, vif, phy, bf); 1272 else if (sta->deflink.vht_cap.vht_supported) 1273 mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf); 1274 else if (sta->deflink.ht_cap.ht_supported) 1275 mt7915_mcu_sta_bfer_ht(sta, phy, bf); 1276 else 1277 return; 1278 1279 bf->bf_cap = ebf ? ebf : dev->ibf << 1; 1280 bf->bw = sta->deflink.bandwidth; 1281 bf->ibf_dbw = sta->deflink.bandwidth; 1282 bf->ibf_nrow = tx_ant; 1283 1284 if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol) 1285 bf->ibf_timeout = 0x48; 1286 else 1287 bf->ibf_timeout = 0x18; 1288 1289 if (ebf && bf->nrow != tx_ant) 1290 bf->mem_20m = matrix[tx_ant][bf->ncol]; 1291 else 1292 bf->mem_20m = matrix[bf->nrow][bf->ncol]; 1293 1294 switch (sta->deflink.bandwidth) { 1295 case IEEE80211_STA_RX_BW_160: 1296 case IEEE80211_STA_RX_BW_80: 1297 bf->mem_total = bf->mem_20m * 2; 1298 break; 1299 case IEEE80211_STA_RX_BW_40: 1300 bf->mem_total = bf->mem_20m; 1301 break; 1302 case IEEE80211_STA_RX_BW_20: 1303 default: 1304 break; 1305 } 1306 } 1307 1308 static void 1309 mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1310 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1311 { 1312 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1313 struct mt7915_phy *phy = mvif->phy; 1314 int tx_ant = hweight8(phy->mt76->chainmask) - 1; 1315 struct sta_rec_bfee *bfee; 1316 struct tlv *tlv; 1317 u8 nrow = 0; 1318 1319 if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he)) 1320 return; 1321 1322 if (!mt7915_is_ebf_supported(phy, vif, sta, true)) 1323 return; 1324 1325 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee)); 1326 bfee = (struct sta_rec_bfee *)tlv; 1327 1328 if (sta->deflink.he_cap.has_he) { 1329 struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem; 1330 1331 nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK, 1332 pe->phy_cap_info[5]); 1333 } else if (sta->deflink.vht_cap.vht_supported) { 1334 struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap; 1335 1336 nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK, 1337 pc->cap); 1338 } 1339 1340 /* reply with identity matrix to avoid 2x2 BF negative gain */ 1341 bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2); 1342 } 1343 1344 static enum mcu_mmps_mode 1345 mt7915_mcu_get_mmps_mode(enum ieee80211_smps_mode smps) 1346 { 1347 switch (smps) { 1348 case IEEE80211_SMPS_OFF: 1349 return MCU_MMPS_DISABLE; 1350 case IEEE80211_SMPS_STATIC: 1351 return MCU_MMPS_STATIC; 1352 case IEEE80211_SMPS_DYNAMIC: 1353 return MCU_MMPS_DYNAMIC; 1354 default: 1355 return MCU_MMPS_DISABLE; 1356 } 1357 } 1358 1359 int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev, 1360 struct ieee80211_vif *vif, 1361 struct ieee80211_sta *sta, 1362 void *data, u32 field) 1363 { 1364 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1365 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1366 struct sta_phy *phy = data; 1367 struct sta_rec_ra_fixed *ra; 1368 struct sk_buff *skb; 1369 struct tlv *tlv; 1370 1371 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1372 &msta->wcid); 1373 if (IS_ERR(skb)) 1374 return PTR_ERR(skb); 1375 1376 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra)); 1377 ra = (struct sta_rec_ra_fixed *)tlv; 1378 1379 switch (field) { 1380 case RATE_PARAM_AUTO: 1381 break; 1382 case RATE_PARAM_FIXED: 1383 case RATE_PARAM_FIXED_MCS: 1384 case RATE_PARAM_FIXED_GI: 1385 case RATE_PARAM_FIXED_HE_LTF: 1386 if (phy) 1387 ra->phy = *phy; 1388 break; 1389 case RATE_PARAM_MMPS_UPDATE: 1390 ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode); 1391 break; 1392 case RATE_PARAM_SPE_UPDATE: 1393 ra->spe_idx = *(u8 *)data; 1394 break; 1395 default: 1396 break; 1397 } 1398 ra->field = cpu_to_le32(field); 1399 1400 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1401 MCU_EXT_CMD(STA_REC_UPDATE), true); 1402 } 1403 1404 int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1405 struct ieee80211_sta *sta) 1406 { 1407 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1408 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1409 struct wtbl_req_hdr *wtbl_hdr; 1410 struct tlv *sta_wtbl; 1411 struct sk_buff *skb; 1412 int ret; 1413 1414 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1415 &msta->wcid); 1416 if (IS_ERR(skb)) 1417 return PTR_ERR(skb); 1418 1419 sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, 1420 sizeof(struct tlv)); 1421 wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid, 1422 WTBL_SET, sta_wtbl, &skb); 1423 if (IS_ERR(wtbl_hdr)) 1424 return PTR_ERR(wtbl_hdr); 1425 1426 mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr); 1427 1428 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 1429 MCU_EXT_CMD(STA_REC_UPDATE), true); 1430 if (ret) 1431 return ret; 1432 1433 return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, NULL, 1434 RATE_PARAM_MMPS_UPDATE); 1435 } 1436 1437 static int 1438 mt7915_mcu_set_spe_idx(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1439 struct ieee80211_sta *sta) 1440 { 1441 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1442 struct mt76_phy *mphy = mvif->phy->mt76; 1443 u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask); 1444 1445 return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &spe_idx, 1446 RATE_PARAM_SPE_UPDATE); 1447 } 1448 1449 static int 1450 mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev, 1451 struct ieee80211_vif *vif, 1452 struct ieee80211_sta *sta) 1453 { 1454 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1455 struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; 1456 struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; 1457 enum nl80211_band band = chandef->chan->band; 1458 struct sta_phy phy = {}; 1459 int ret, nrates = 0; 1460 1461 #define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \ 1462 do { \ 1463 u8 i, gi = mask->control[band]._gi; \ 1464 gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \ 1465 for (i = 0; i <= sta->deflink.bandwidth; i++) { \ 1466 phy.sgi |= gi << (i << (_he)); \ 1467 phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\ 1468 } \ 1469 for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \ 1470 if (!mask->control[band]._mcs[i]) \ 1471 continue; \ 1472 nrates += hweight16(mask->control[band]._mcs[i]); \ 1473 phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \ 1474 if (_ht) \ 1475 phy.mcs += 8 * i; \ 1476 } \ 1477 } while (0) 1478 1479 if (sta->deflink.he_cap.has_he) { 1480 __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1); 1481 } else if (sta->deflink.vht_cap.vht_supported) { 1482 __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0); 1483 } else if (sta->deflink.ht_cap.ht_supported) { 1484 __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0); 1485 } else { 1486 nrates = hweight32(mask->control[band].legacy); 1487 phy.mcs = ffs(mask->control[band].legacy) - 1; 1488 } 1489 #undef __sta_phy_bitrate_mask_check 1490 1491 /* fall back to auto rate control */ 1492 if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI && 1493 mask->control[band].he_gi == GENMASK(7, 0) && 1494 mask->control[band].he_ltf == GENMASK(7, 0) && 1495 nrates != 1) 1496 return 0; 1497 1498 /* fixed single rate */ 1499 if (nrates == 1) { 1500 ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, 1501 RATE_PARAM_FIXED_MCS); 1502 if (ret) 1503 return ret; 1504 } 1505 1506 /* fixed GI */ 1507 if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI || 1508 mask->control[band].he_gi != GENMASK(7, 0)) { 1509 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1510 u32 addr; 1511 1512 /* firmware updates only TXCMD but doesn't take WTBL into 1513 * account, so driver should update here to reflect the 1514 * actual txrate hardware sends out. 1515 */ 1516 addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7); 1517 if (sta->deflink.he_cap.has_he) 1518 mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi); 1519 else 1520 mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi); 1521 1522 ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, 1523 RATE_PARAM_FIXED_GI); 1524 if (ret) 1525 return ret; 1526 } 1527 1528 /* fixed HE_LTF */ 1529 if (mask->control[band].he_ltf != GENMASK(7, 0)) { 1530 ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, 1531 RATE_PARAM_FIXED_HE_LTF); 1532 if (ret) 1533 return ret; 1534 } 1535 1536 return mt7915_mcu_set_spe_idx(dev, vif, sta); 1537 } 1538 1539 static void 1540 mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, 1541 struct ieee80211_vif *vif, struct ieee80211_sta *sta) 1542 { 1543 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1544 struct mt76_phy *mphy = mvif->phy->mt76; 1545 struct cfg80211_chan_def *chandef = &mphy->chandef; 1546 struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask; 1547 enum nl80211_band band = chandef->chan->band; 1548 struct sta_rec_ra *ra; 1549 struct tlv *tlv; 1550 u32 supp_rate = sta->deflink.supp_rates[band]; 1551 u32 cap = sta->wme ? STA_CAP_WMM : 0; 1552 1553 tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra)); 1554 ra = (struct sta_rec_ra *)tlv; 1555 1556 ra->valid = true; 1557 ra->auto_rate = true; 1558 ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); 1559 ra->channel = chandef->chan->hw_value; 1560 ra->bw = sta->deflink.bandwidth; 1561 ra->phy.bw = sta->deflink.bandwidth; 1562 ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode); 1563 1564 if (supp_rate) { 1565 supp_rate &= mask->control[band].legacy; 1566 ra->rate_len = hweight32(supp_rate); 1567 1568 if (band == NL80211_BAND_2GHZ) { 1569 ra->supp_mode = MODE_CCK; 1570 ra->supp_cck_rate = supp_rate & GENMASK(3, 0); 1571 1572 if (ra->rate_len > 4) { 1573 ra->supp_mode |= MODE_OFDM; 1574 ra->supp_ofdm_rate = supp_rate >> 4; 1575 } 1576 } else { 1577 ra->supp_mode = MODE_OFDM; 1578 ra->supp_ofdm_rate = supp_rate; 1579 } 1580 } 1581 1582 if (sta->deflink.ht_cap.ht_supported) { 1583 ra->supp_mode |= MODE_HT; 1584 ra->af = sta->deflink.ht_cap.ampdu_factor; 1585 ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD); 1586 1587 cap |= STA_CAP_HT; 1588 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 1589 cap |= STA_CAP_SGI_20; 1590 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 1591 cap |= STA_CAP_SGI_40; 1592 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC) 1593 cap |= STA_CAP_TX_STBC; 1594 if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC) 1595 cap |= STA_CAP_RX_STBC; 1596 if (mvif->cap.ht_ldpc && 1597 (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)) 1598 cap |= STA_CAP_LDPC; 1599 1600 mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, 1601 mask->control[band].ht_mcs); 1602 ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs; 1603 } 1604 1605 if (sta->deflink.vht_cap.vht_supported) { 1606 u8 af; 1607 1608 ra->supp_mode |= MODE_VHT; 1609 af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, 1610 sta->deflink.vht_cap.cap); 1611 ra->af = max_t(u8, ra->af, af); 1612 1613 cap |= STA_CAP_VHT; 1614 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) 1615 cap |= STA_CAP_VHT_SGI_80; 1616 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160) 1617 cap |= STA_CAP_VHT_SGI_160; 1618 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC) 1619 cap |= STA_CAP_VHT_TX_STBC; 1620 if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1) 1621 cap |= STA_CAP_VHT_RX_STBC; 1622 if (mvif->cap.vht_ldpc && 1623 (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)) 1624 cap |= STA_CAP_VHT_LDPC; 1625 1626 mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, 1627 mask->control[band].vht_mcs); 1628 } 1629 1630 if (sta->deflink.he_cap.has_he) { 1631 ra->supp_mode |= MODE_HE; 1632 cap |= STA_CAP_HE; 1633 1634 if (sta->deflink.he_6ghz_capa.capa) 1635 ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, 1636 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); 1637 } 1638 1639 ra->sta_cap = cpu_to_le32(cap); 1640 } 1641 1642 int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1643 struct ieee80211_sta *sta, bool changed) 1644 { 1645 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1646 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1647 struct sk_buff *skb; 1648 int ret; 1649 1650 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1651 &msta->wcid); 1652 if (IS_ERR(skb)) 1653 return PTR_ERR(skb); 1654 1655 /* firmware rc algorithm refers to sta_rec_he for HE control. 1656 * once dev->rc_work changes the settings driver should also 1657 * update sta_rec_he here. 1658 */ 1659 if (changed) 1660 mt7915_mcu_sta_he_tlv(skb, sta, vif); 1661 1662 /* sta_rec_ra accommodates BW, NSS and only MCS range format 1663 * i.e 0-{7,8,9} for VHT. 1664 */ 1665 mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta); 1666 1667 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 1668 MCU_EXT_CMD(STA_REC_UPDATE), true); 1669 if (ret) 1670 return ret; 1671 1672 /* sta_rec_ra_fixed accommodates single rate, (HE)GI and HE_LTE, 1673 * and updates as peer fixed rate parameters, which overrides 1674 * sta_rec_ra and firmware rate control algorithm. 1675 */ 1676 return mt7915_mcu_add_rate_ctrl_fixed(dev, vif, sta); 1677 } 1678 1679 static int 1680 mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1681 struct ieee80211_sta *sta) 1682 { 1683 #define MT_STA_BSS_GROUP 1 1684 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1685 struct mt7915_sta *msta; 1686 struct { 1687 __le32 action; 1688 u8 wlan_idx_lo; 1689 u8 status; 1690 u8 wlan_idx_hi; 1691 u8 rsv0[5]; 1692 __le32 val; 1693 u8 rsv1[8]; 1694 } __packed req = { 1695 .action = cpu_to_le32(MT_STA_BSS_GROUP), 1696 .val = cpu_to_le32(mvif->mt76.idx % 16), 1697 }; 1698 1699 msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; 1700 req.wlan_idx_lo = to_wcid_lo(msta->wcid.idx); 1701 req.wlan_idx_hi = to_wcid_hi(msta->wcid.idx); 1702 1703 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req, 1704 sizeof(req), true); 1705 } 1706 1707 int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1708 struct ieee80211_sta *sta, int conn_state, bool newly) 1709 { 1710 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1711 struct ieee80211_link_sta *link_sta; 1712 struct mt7915_sta *msta; 1713 struct sk_buff *skb; 1714 int ret; 1715 1716 msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; 1717 link_sta = sta ? &sta->deflink : NULL; 1718 1719 skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 1720 &msta->wcid); 1721 if (IS_ERR(skb)) 1722 return PTR_ERR(skb); 1723 1724 /* starec basic */ 1725 mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, &vif->bss_conf, link_sta, 1726 conn_state, newly); 1727 /* tag order is in accordance with firmware dependency. */ 1728 if (sta && conn_state != CONN_STATE_DISCONNECT) { 1729 /* starec bfer */ 1730 mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta); 1731 /* starec ht */ 1732 mt7915_mcu_sta_ht_tlv(skb, sta); 1733 /* starec vht */ 1734 mt7915_mcu_sta_vht_tlv(skb, sta); 1735 /* starec uapsd */ 1736 mt76_connac_mcu_sta_uapsd(skb, vif, sta); 1737 } 1738 1739 if (newly || conn_state != CONN_STATE_DISCONNECT) { 1740 ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta); 1741 if (ret) { 1742 dev_kfree_skb(skb); 1743 return ret; 1744 } 1745 } 1746 1747 if (conn_state == CONN_STATE_DISCONNECT) 1748 goto out; 1749 1750 if (sta) { 1751 /* starec amsdu */ 1752 mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta); 1753 /* starec he */ 1754 mt7915_mcu_sta_he_tlv(skb, sta, vif); 1755 /* starec muru */ 1756 mt7915_mcu_sta_muru_tlv(dev, skb, sta, vif); 1757 /* starec bfee */ 1758 mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta); 1759 } 1760 1761 ret = mt7915_mcu_add_group(dev, vif, sta); 1762 if (ret) { 1763 dev_kfree_skb(skb); 1764 return ret; 1765 } 1766 out: 1767 ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb); 1768 if (ret) { 1769 dev_kfree_skb(skb); 1770 return ret; 1771 } 1772 1773 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 1774 MCU_EXT_CMD(STA_REC_UPDATE), true); 1775 } 1776 1777 int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev) 1778 { 1779 #ifdef CONFIG_NET_MEDIATEK_SOC_WED 1780 struct mtk_wed_device *wed = &dev->mt76.mmio.wed; 1781 struct { 1782 __le32 args[2]; 1783 } req = { 1784 .args[0] = cpu_to_le32(1), 1785 .args[1] = cpu_to_le32(6), 1786 }; 1787 1788 return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL, 1789 &req, sizeof(req)); 1790 #else 1791 return 0; 1792 #endif 1793 } 1794 1795 int mt7915_mcu_add_dev_info(struct mt7915_phy *phy, 1796 struct ieee80211_vif *vif, bool enable) 1797 { 1798 struct mt7915_dev *dev = phy->dev; 1799 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1800 struct { 1801 struct req_hdr { 1802 u8 omac_idx; 1803 u8 band_idx; 1804 __le16 tlv_num; 1805 u8 is_tlv_append; 1806 u8 rsv[3]; 1807 } __packed hdr; 1808 struct req_tlv { 1809 __le16 tag; 1810 __le16 len; 1811 u8 active; 1812 u8 band_idx; 1813 u8 omac_addr[ETH_ALEN]; 1814 } __packed tlv; 1815 } data = { 1816 .hdr = { 1817 .omac_idx = mvif->mt76.omac_idx, 1818 .band_idx = mvif->mt76.band_idx, 1819 .tlv_num = cpu_to_le16(1), 1820 .is_tlv_append = 1, 1821 }, 1822 .tlv = { 1823 .tag = cpu_to_le16(DEV_INFO_ACTIVE), 1824 .len = cpu_to_le16(sizeof(struct req_tlv)), 1825 .active = enable, 1826 .band_idx = mvif->mt76.band_idx, 1827 }, 1828 }; 1829 1830 if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) 1831 return mt7915_mcu_muar_config(phy, vif, false, enable); 1832 1833 memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN); 1834 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE), 1835 &data, sizeof(data), true); 1836 } 1837 1838 static void 1839 mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb, 1840 struct sk_buff *skb, struct bss_info_bcn *bcn, 1841 struct ieee80211_mutable_offsets *offs) 1842 { 1843 struct bss_info_bcn_cntdwn *info; 1844 struct tlv *tlv; 1845 int sub_tag; 1846 1847 if (!offs->cntdwn_counter_offs[0]) 1848 return; 1849 1850 sub_tag = vif->bss_conf.csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC; 1851 tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info), 1852 &bcn->sub_ntlv, &bcn->len); 1853 info = (struct bss_info_bcn_cntdwn *)tlv; 1854 info->cnt = skb->data[offs->cntdwn_counter_offs[0]]; 1855 } 1856 1857 static void 1858 mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb, 1859 struct ieee80211_vif *vif, struct bss_info_bcn *bcn, 1860 struct ieee80211_mutable_offsets *offs) 1861 { 1862 struct bss_info_bcn_mbss *mbss; 1863 const struct element *elem; 1864 struct tlv *tlv; 1865 1866 if (!vif->bss_conf.bssid_indicator) 1867 return; 1868 1869 tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID, 1870 sizeof(*mbss), &bcn->sub_ntlv, 1871 &bcn->len); 1872 1873 mbss = (struct bss_info_bcn_mbss *)tlv; 1874 mbss->offset[0] = cpu_to_le16(offs->tim_offset); 1875 mbss->bitmap = cpu_to_le32(1); 1876 1877 for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, 1878 &skb->data[offs->mbssid_off], 1879 skb->len - offs->mbssid_off) { 1880 const struct element *sub_elem; 1881 1882 if (elem->datalen < 2) 1883 continue; 1884 1885 for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) { 1886 const struct ieee80211_bssid_index *idx; 1887 const u8 *idx_ie; 1888 1889 if (sub_elem->id || sub_elem->datalen < 4) 1890 continue; /* not a valid BSS profile */ 1891 1892 /* Find WLAN_EID_MULTI_BSSID_IDX 1893 * in the merged nontransmitted profile 1894 */ 1895 idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, 1896 sub_elem->data, 1897 sub_elem->datalen); 1898 if (!idx_ie || idx_ie[1] < sizeof(*idx)) 1899 continue; 1900 1901 idx = (void *)(idx_ie + 2); 1902 if (!idx->bssid_index || idx->bssid_index > 31) 1903 continue; 1904 1905 mbss->offset[idx->bssid_index] = 1906 cpu_to_le16(idx_ie - skb->data); 1907 mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index)); 1908 } 1909 } 1910 } 1911 1912 static void 1913 mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1914 struct sk_buff *rskb, struct sk_buff *skb, 1915 struct bss_info_bcn *bcn, 1916 struct ieee80211_mutable_offsets *offs) 1917 { 1918 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 1919 struct bss_info_bcn_cont *cont; 1920 struct tlv *tlv; 1921 u8 *buf; 1922 int len = sizeof(*cont) + MT_TXD_SIZE + skb->len; 1923 1924 len = (len & 0x3) ? ((len | 0x3) + 1) : len; 1925 tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT, 1926 len, &bcn->sub_ntlv, &bcn->len); 1927 1928 cont = (struct bss_info_bcn_cont *)tlv; 1929 cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len); 1930 cont->tim_ofs = cpu_to_le16(offs->tim_offset); 1931 1932 if (offs->cntdwn_counter_offs[0]) { 1933 u16 offset = offs->cntdwn_counter_offs[0]; 1934 1935 if (vif->bss_conf.csa_active) 1936 cont->csa_ofs = cpu_to_le16(offset - 4); 1937 if (vif->bss_conf.color_change_active) 1938 cont->bcc_ofs = cpu_to_le16(offset - 3); 1939 } 1940 1941 buf = (u8 *)tlv + sizeof(*cont); 1942 mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL, 1943 0, BSS_CHANGED_BEACON); 1944 memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); 1945 } 1946 1947 int 1948 mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1949 u32 changed) 1950 { 1951 #define OFFLOAD_TX_MODE_SU BIT(0) 1952 #define OFFLOAD_TX_MODE_MU BIT(1) 1953 struct ieee80211_hw *hw = mt76_hw(dev); 1954 struct mt7915_phy *phy = mt7915_hw_phy(hw); 1955 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1956 struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef; 1957 enum nl80211_band band = chandef->chan->band; 1958 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 1959 struct bss_info_bcn *bcn; 1960 struct bss_info_inband_discovery *discov; 1961 struct ieee80211_tx_info *info; 1962 struct sk_buff *rskb, *skb = NULL; 1963 struct tlv *tlv, *sub_tlv; 1964 bool ext_phy = phy != &dev->phy; 1965 u8 *buf, interval; 1966 int len; 1967 1968 if (vif->bss_conf.nontransmitted) 1969 return 0; 1970 1971 rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL, 1972 MT7915_MAX_BSS_OFFLOAD_SIZE); 1973 if (IS_ERR(rskb)) 1974 return PTR_ERR(rskb); 1975 1976 tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); 1977 bcn = (struct bss_info_bcn *)tlv; 1978 bcn->enable = true; 1979 1980 if (changed & BSS_CHANGED_FILS_DISCOVERY) { 1981 interval = vif->bss_conf.fils_discovery.max_interval; 1982 skb = ieee80211_get_fils_discovery_tmpl(hw, vif, 0); 1983 } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP && 1984 vif->bss_conf.unsol_bcast_probe_resp_interval) { 1985 interval = vif->bss_conf.unsol_bcast_probe_resp_interval; 1986 skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif, 0); 1987 } 1988 1989 if (!skb) { 1990 dev_kfree_skb(rskb); 1991 return -EINVAL; 1992 } 1993 1994 info = IEEE80211_SKB_CB(skb); 1995 info->control.vif = vif; 1996 info->band = band; 1997 info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy); 1998 1999 len = sizeof(*discov) + MT_TXD_SIZE + skb->len; 2000 len = (len & 0x3) ? ((len | 0x3) + 1) : len; 2001 2002 if (skb->len > MT7915_MAX_BEACON_SIZE) { 2003 dev_err(dev->mt76.dev, "inband discovery size limit exceed\n"); 2004 dev_kfree_skb(rskb); 2005 dev_kfree_skb(skb); 2006 return -EINVAL; 2007 } 2008 2009 sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV, 2010 len, &bcn->sub_ntlv, &bcn->len); 2011 discov = (struct bss_info_inband_discovery *)sub_tlv; 2012 discov->tx_mode = OFFLOAD_TX_MODE_SU; 2013 /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */ 2014 discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY); 2015 discov->tx_interval = interval; 2016 discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len); 2017 discov->enable = !!interval; 2018 2019 buf = (u8 *)sub_tlv + sizeof(*discov); 2020 2021 mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL, 2022 0, changed); 2023 memcpy(buf + MT_TXD_SIZE, skb->data, skb->len); 2024 2025 dev_kfree_skb(skb); 2026 2027 return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, 2028 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 2029 } 2030 2031 int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2032 int en, u32 changed) 2033 { 2034 struct mt7915_dev *dev = mt7915_hw_dev(hw); 2035 struct mt7915_phy *phy = mt7915_hw_phy(hw); 2036 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 2037 struct ieee80211_mutable_offsets offs; 2038 struct ieee80211_tx_info *info; 2039 struct sk_buff *skb, *rskb; 2040 struct tlv *tlv; 2041 struct bss_info_bcn *bcn; 2042 int len = MT7915_MAX_BSS_OFFLOAD_SIZE; 2043 bool ext_phy = phy != &dev->phy; 2044 2045 if (vif->bss_conf.nontransmitted) 2046 return 0; 2047 2048 rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 2049 NULL, len); 2050 if (IS_ERR(rskb)) 2051 return PTR_ERR(rskb); 2052 2053 tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn)); 2054 bcn = (struct bss_info_bcn *)tlv; 2055 bcn->enable = en; 2056 2057 if (!en) 2058 goto out; 2059 2060 skb = ieee80211_beacon_get_template(hw, vif, &offs, 0); 2061 if (!skb) { 2062 dev_kfree_skb(rskb); 2063 return -EINVAL; 2064 } 2065 2066 if (skb->len > MT7915_MAX_BEACON_SIZE) { 2067 dev_err(dev->mt76.dev, "Bcn size limit exceed\n"); 2068 dev_kfree_skb(rskb); 2069 dev_kfree_skb(skb); 2070 return -EINVAL; 2071 } 2072 2073 info = IEEE80211_SKB_CB(skb); 2074 info->hw_queue = FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy); 2075 2076 mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs); 2077 mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs); 2078 mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs); 2079 dev_kfree_skb(skb); 2080 2081 out: 2082 return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb, 2083 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 2084 } 2085 2086 static int mt7915_driver_own(struct mt7915_dev *dev, u8 band) 2087 { 2088 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN); 2089 if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band), 2090 MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) { 2091 dev_err(dev->mt76.dev, "Timeout for driver own\n"); 2092 return -EIO; 2093 } 2094 2095 /* clear irq when the driver own success */ 2096 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band), 2097 MT_TOP_LPCR_HOST_BAND_STAT); 2098 2099 return 0; 2100 } 2101 2102 static int 2103 mt7915_firmware_state(struct mt7915_dev *dev, bool wa) 2104 { 2105 u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE, 2106 wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD); 2107 2108 if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE, 2109 state, 1000)) { 2110 dev_err(dev->mt76.dev, "Timeout for initializing firmware\n"); 2111 return -EIO; 2112 } 2113 return 0; 2114 } 2115 2116 static int mt7915_load_firmware(struct mt7915_dev *dev) 2117 { 2118 int ret; 2119 2120 /* Release Semaphore if taken by previous failed attempt */ 2121 ret = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); 2122 if (ret != PATCH_REL_SEM_SUCCESS) { 2123 dev_err(dev->mt76.dev, "Could not release semaphore\n"); 2124 /* Continue anyways */ 2125 } 2126 2127 /* Always restart MCU firmware */ 2128 mt76_connac_mcu_restart(&dev->mt76); 2129 2130 /* Check if MCU is ready */ 2131 ret = mt7915_firmware_state(dev, false); 2132 if (ret) { 2133 dev_err(dev->mt76.dev, "Firmware did not enter download state\n"); 2134 return ret; 2135 } 2136 2137 ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH)); 2138 if (ret) 2139 return ret; 2140 2141 ret = mt76_connac2_load_ram(&dev->mt76, fw_name_var(dev, FIRMWARE_WM), 2142 fw_name(dev, FIRMWARE_WA)); 2143 if (ret) 2144 return ret; 2145 2146 ret = mt7915_firmware_state(dev, true); 2147 if (ret) 2148 return ret; 2149 2150 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); 2151 2152 dev_dbg(dev->mt76.dev, "Firmware init done\n"); 2153 2154 return 0; 2155 } 2156 2157 int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl) 2158 { 2159 struct { 2160 u8 ctrl_val; 2161 u8 pad[3]; 2162 } data = { 2163 .ctrl_val = ctrl 2164 }; 2165 2166 if (type == MCU_FW_LOG_WA) 2167 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(FW_LOG_2_HOST), 2168 &data, sizeof(data), true); 2169 2170 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), &data, 2171 sizeof(data), true); 2172 } 2173 2174 int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level) 2175 { 2176 struct { 2177 u8 ver; 2178 u8 pad; 2179 __le16 len; 2180 u8 level; 2181 u8 rsv[3]; 2182 __le32 module_idx; 2183 } data = { 2184 .module_idx = cpu_to_le32(module), 2185 .level = level, 2186 }; 2187 2188 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_DBG_CTRL), &data, 2189 sizeof(data), false); 2190 } 2191 2192 int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enabled) 2193 { 2194 struct { 2195 __le32 cmd; 2196 u8 enable; 2197 } data = { 2198 .cmd = cpu_to_le32(MURU_SET_TXC_TX_STATS_EN), 2199 .enable = enabled, 2200 }; 2201 2202 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &data, 2203 sizeof(data), false); 2204 } 2205 2206 int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy) 2207 { 2208 struct mt7915_dev *dev = phy->dev; 2209 struct sk_buff *skb; 2210 struct mt7915_mcu_muru_stats *mu_stats; 2211 int ret; 2212 2213 struct { 2214 __le32 cmd; 2215 u8 band_idx; 2216 } req = { 2217 .cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS), 2218 .band_idx = phy->mt76->band_idx, 2219 }; 2220 2221 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), 2222 &req, sizeof(req), true, &skb); 2223 if (ret) 2224 return ret; 2225 2226 mu_stats = (struct mt7915_mcu_muru_stats *)(skb->data); 2227 2228 /* accumulate stats, these are clear-on-read */ 2229 #define __dl_u32(s) phy->mib.dl_##s += le32_to_cpu(mu_stats->dl.s) 2230 #define __ul_u32(s) phy->mib.ul_##s += le32_to_cpu(mu_stats->ul.s) 2231 __dl_u32(cck_cnt); 2232 __dl_u32(ofdm_cnt); 2233 __dl_u32(htmix_cnt); 2234 __dl_u32(htgf_cnt); 2235 __dl_u32(vht_su_cnt); 2236 __dl_u32(vht_2mu_cnt); 2237 __dl_u32(vht_3mu_cnt); 2238 __dl_u32(vht_4mu_cnt); 2239 __dl_u32(he_su_cnt); 2240 __dl_u32(he_2ru_cnt); 2241 __dl_u32(he_2mu_cnt); 2242 __dl_u32(he_3ru_cnt); 2243 __dl_u32(he_3mu_cnt); 2244 __dl_u32(he_4ru_cnt); 2245 __dl_u32(he_4mu_cnt); 2246 __dl_u32(he_5to8ru_cnt); 2247 __dl_u32(he_9to16ru_cnt); 2248 __dl_u32(he_gtr16ru_cnt); 2249 2250 __ul_u32(hetrig_su_cnt); 2251 __ul_u32(hetrig_2ru_cnt); 2252 __ul_u32(hetrig_3ru_cnt); 2253 __ul_u32(hetrig_4ru_cnt); 2254 __ul_u32(hetrig_5to8ru_cnt); 2255 __ul_u32(hetrig_9to16ru_cnt); 2256 __ul_u32(hetrig_gtr16ru_cnt); 2257 __ul_u32(hetrig_2mu_cnt); 2258 __ul_u32(hetrig_3mu_cnt); 2259 __ul_u32(hetrig_4mu_cnt); 2260 #undef __dl_u32 2261 #undef __ul_u32 2262 2263 dev_kfree_skb(skb); 2264 2265 return 0; 2266 } 2267 2268 static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled) 2269 { 2270 struct { 2271 u8 enable; 2272 u8 _rsv[3]; 2273 } __packed req = { 2274 .enable = enabled 2275 }; 2276 2277 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(MWDS_SUPPORT), &req, 2278 sizeof(req), false); 2279 } 2280 2281 int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val) 2282 { 2283 struct { 2284 __le32 cmd; 2285 u8 val[4]; 2286 } __packed req = { 2287 .cmd = cpu_to_le32(cmd), 2288 }; 2289 2290 put_unaligned_le32(val, req.val); 2291 2292 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req, 2293 sizeof(req), false); 2294 } 2295 2296 static int 2297 mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev) 2298 { 2299 #define RX_AIRTIME_FEATURE_CTRL 1 2300 #define RX_AIRTIME_BITWISE_CTRL 2 2301 #define RX_AIRTIME_CLEAR_EN 1 2302 struct { 2303 __le16 field; 2304 __le16 sub_field; 2305 __le32 set_status; 2306 __le32 get_status; 2307 u8 _rsv[12]; 2308 2309 bool airtime_en; 2310 bool mibtime_en; 2311 bool earlyend_en; 2312 u8 _rsv1[9]; 2313 2314 bool airtime_clear; 2315 bool mibtime_clear; 2316 u8 _rsv2[98]; 2317 } __packed req = { 2318 .field = cpu_to_le16(RX_AIRTIME_BITWISE_CTRL), 2319 .sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN), 2320 .airtime_clear = true, 2321 }; 2322 int ret; 2323 2324 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req, 2325 sizeof(req), true); 2326 if (ret) 2327 return ret; 2328 2329 req.field = cpu_to_le16(RX_AIRTIME_FEATURE_CTRL); 2330 req.sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN); 2331 req.airtime_en = true; 2332 2333 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req, 2334 sizeof(req), true); 2335 } 2336 2337 static int mt7915_red_set_watermark(struct mt7915_dev *dev) 2338 { 2339 #define RED_GLOBAL_TOKEN_WATERMARK 2 2340 struct { 2341 __le32 args[3]; 2342 u8 cmd; 2343 u8 version; 2344 u8 __rsv1[4]; 2345 __le16 len; 2346 __le16 high_mark; 2347 __le16 low_mark; 2348 u8 __rsv2[12]; 2349 } __packed req = { 2350 .args[0] = cpu_to_le32(MCU_WA_PARAM_RED_SETTING), 2351 .cmd = RED_GLOBAL_TOKEN_WATERMARK, 2352 .len = cpu_to_le16(sizeof(req) - sizeof(req.args)), 2353 .high_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256), 2354 .low_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256 - 1536), 2355 }; 2356 2357 return mt76_mcu_send_msg(&dev->mt76, MCU_WA_PARAM_CMD(SET), &req, 2358 sizeof(req), false); 2359 } 2360 2361 static int mt7915_mcu_set_red(struct mt7915_dev *dev, bool enabled) 2362 { 2363 #define RED_DISABLE 0 2364 #define RED_BY_WA_ENABLE 2 2365 int ret; 2366 u32 red_type = enabled ? RED_BY_WA_ENABLE : RED_DISABLE; 2367 __le32 req = cpu_to_le32(red_type); 2368 2369 if (enabled) { 2370 ret = mt7915_red_set_watermark(dev); 2371 if (ret < 0) 2372 return ret; 2373 } 2374 2375 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RED_ENABLE), &req, 2376 sizeof(req), false); 2377 if (ret < 0) 2378 return ret; 2379 2380 return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), 2381 MCU_WA_PARAM_RED, enabled, 0); 2382 } 2383 2384 int mt7915_mcu_init_firmware(struct mt7915_dev *dev) 2385 { 2386 int ret; 2387 2388 /* force firmware operation mode into normal state, 2389 * which should be set before firmware download stage. 2390 */ 2391 mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE); 2392 2393 ret = mt7915_driver_own(dev, 0); 2394 if (ret) 2395 return ret; 2396 /* set driver own for band1 when two hif exist */ 2397 if (dev->hif2) { 2398 ret = mt7915_driver_own(dev, 1); 2399 if (ret) 2400 return ret; 2401 } 2402 2403 ret = mt7915_load_firmware(dev); 2404 if (ret) 2405 return ret; 2406 2407 set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 2408 ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, 0); 2409 if (ret) 2410 return ret; 2411 2412 ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0); 2413 if (ret) 2414 return ret; 2415 2416 mt76_connac_mcu_del_wtbl_all(&dev->mt76); 2417 2418 if ((mtk_wed_device_active(&dev->mt76.mmio.wed) && 2419 is_mt7915(&dev->mt76)) || 2420 !mtk_wed_get_rx_capa(&dev->mt76.mmio.wed)) 2421 mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0); 2422 2423 ret = mt7915_mcu_set_mwds(dev, 1); 2424 if (ret) 2425 return ret; 2426 2427 ret = mt7915_mcu_set_muru_ctrl(dev, MURU_SET_PLATFORM_TYPE, 2428 MURU_PLATFORM_TYPE_PERF_LEVEL_2); 2429 if (ret) 2430 return ret; 2431 2432 ret = mt7915_mcu_init_rx_airtime(dev); 2433 if (ret) 2434 return ret; 2435 2436 return mt7915_mcu_set_red(dev, mtk_wed_device_active(&dev->mt76.mmio.wed)); 2437 } 2438 2439 int mt7915_mcu_init(struct mt7915_dev *dev) 2440 { 2441 static const struct mt76_mcu_ops mt7915_mcu_ops = { 2442 .max_retry = 1, 2443 .headroom = sizeof(struct mt76_connac2_mcu_txd), 2444 .mcu_skb_prepare_msg = mt76_connac2_mcu_fill_message, 2445 .mcu_skb_send_msg = mt7915_mcu_send_message, 2446 .mcu_parse_response = mt7915_mcu_parse_response, 2447 }; 2448 2449 dev->mt76.mcu_ops = &mt7915_mcu_ops; 2450 2451 return mt7915_mcu_init_firmware(dev); 2452 } 2453 2454 void mt7915_mcu_exit(struct mt7915_dev *dev) 2455 { 2456 mt76_connac_mcu_restart(&dev->mt76); 2457 if (mt7915_firmware_state(dev, false)) { 2458 dev_err(dev->mt76.dev, "Failed to exit mcu\n"); 2459 goto out; 2460 } 2461 2462 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN); 2463 if (dev->hif2) 2464 mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1), 2465 MT_TOP_LPCR_HOST_FW_OWN); 2466 out: 2467 skb_queue_purge(&dev->mt76.mcu.res_q); 2468 } 2469 2470 static int 2471 mt7915_mcu_set_rx_hdr_trans_blacklist(struct mt7915_dev *dev, int band) 2472 { 2473 struct { 2474 u8 operation; 2475 u8 count; 2476 u8 _rsv[2]; 2477 u8 index; 2478 u8 enable; 2479 __le16 etype; 2480 } req = { 2481 .operation = 1, 2482 .count = 1, 2483 .enable = 1, 2484 .etype = cpu_to_le16(ETH_P_PAE), 2485 }; 2486 2487 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), 2488 &req, sizeof(req), false); 2489 } 2490 2491 int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, 2492 bool enable, bool hdr_trans) 2493 { 2494 struct { 2495 u8 operation; 2496 u8 enable; 2497 u8 check_bssid; 2498 u8 insert_vlan; 2499 u8 remove_vlan; 2500 u8 tid; 2501 u8 mode; 2502 u8 rsv; 2503 } __packed req_trans = { 2504 .enable = hdr_trans, 2505 }; 2506 struct { 2507 u8 enable; 2508 u8 band; 2509 u8 rsv[2]; 2510 } __packed req_mac = { 2511 .enable = enable, 2512 .band = band, 2513 }; 2514 int ret; 2515 2516 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS), 2517 &req_trans, sizeof(req_trans), false); 2518 if (ret) 2519 return ret; 2520 2521 if (hdr_trans) 2522 mt7915_mcu_set_rx_hdr_trans_blacklist(dev, band); 2523 2524 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL), 2525 &req_mac, sizeof(req_mac), true); 2526 } 2527 2528 int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param) 2529 { 2530 struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param; 2531 u8 num = req->total; 2532 size_t len = sizeof(*req) - 2533 (IEEE80211_NUM_ACS - num) * sizeof(struct edca); 2534 2535 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), req, 2536 len, true); 2537 } 2538 2539 int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif) 2540 { 2541 #define TX_CMD_MODE 1 2542 struct mt7915_mcu_tx req = { 2543 .valid = true, 2544 .mode = TX_CMD_MODE, 2545 .total = IEEE80211_NUM_ACS, 2546 }; 2547 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 2548 int ac; 2549 2550 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 2551 struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; 2552 struct edca *e = &req.edca[ac]; 2553 2554 e->set = WMM_PARAM_SET; 2555 e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS; 2556 e->aifs = q->aifs; 2557 e->txop = cpu_to_le16(q->txop); 2558 2559 if (q->cw_min) 2560 e->cw_min = fls(q->cw_min); 2561 else 2562 e->cw_min = 5; 2563 2564 if (q->cw_max) 2565 e->cw_max = cpu_to_le16(fls(q->cw_max)); 2566 else 2567 e->cw_max = cpu_to_le16(10); 2568 } 2569 2570 return mt7915_mcu_update_edca(dev, &req); 2571 } 2572 2573 int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val) 2574 { 2575 struct { 2576 __le32 tag; 2577 __le16 min_lpn; 2578 u8 rsv[2]; 2579 } __packed req = { 2580 .tag = cpu_to_le32(0x1), 2581 .min_lpn = cpu_to_le16(val), 2582 }; 2583 2584 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req, 2585 sizeof(req), true); 2586 } 2587 2588 int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev, 2589 const struct mt7915_dfs_pulse *pulse) 2590 { 2591 struct { 2592 __le32 tag; 2593 2594 __le32 max_width; /* us */ 2595 __le32 max_pwr; /* dbm */ 2596 __le32 min_pwr; /* dbm */ 2597 __le32 min_stgr_pri; /* us */ 2598 __le32 max_stgr_pri; /* us */ 2599 __le32 min_cr_pri; /* us */ 2600 __le32 max_cr_pri; /* us */ 2601 } __packed req = { 2602 .tag = cpu_to_le32(0x3), 2603 2604 #define __req_field(field) .field = cpu_to_le32(pulse->field) 2605 __req_field(max_width), 2606 __req_field(max_pwr), 2607 __req_field(min_pwr), 2608 __req_field(min_stgr_pri), 2609 __req_field(max_stgr_pri), 2610 __req_field(min_cr_pri), 2611 __req_field(max_cr_pri), 2612 #undef __req_field 2613 }; 2614 2615 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req, 2616 sizeof(req), true); 2617 } 2618 2619 int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, 2620 const struct mt7915_dfs_pattern *pattern) 2621 { 2622 struct { 2623 __le32 tag; 2624 __le16 radar_type; 2625 2626 u8 enb; 2627 u8 stgr; 2628 u8 min_crpn; 2629 u8 max_crpn; 2630 u8 min_crpr; 2631 u8 min_pw; 2632 __le32 min_pri; 2633 __le32 max_pri; 2634 u8 max_pw; 2635 u8 min_crbn; 2636 u8 max_crbn; 2637 u8 min_stgpn; 2638 u8 max_stgpn; 2639 u8 min_stgpr; 2640 u8 rsv[2]; 2641 __le32 min_stgpr_diff; 2642 } __packed req = { 2643 .tag = cpu_to_le32(0x2), 2644 .radar_type = cpu_to_le16(index), 2645 2646 #define __req_field_u8(field) .field = pattern->field 2647 #define __req_field_u32(field) .field = cpu_to_le32(pattern->field) 2648 __req_field_u8(enb), 2649 __req_field_u8(stgr), 2650 __req_field_u8(min_crpn), 2651 __req_field_u8(max_crpn), 2652 __req_field_u8(min_crpr), 2653 __req_field_u8(min_pw), 2654 __req_field_u32(min_pri), 2655 __req_field_u32(max_pri), 2656 __req_field_u8(max_pw), 2657 __req_field_u8(min_crbn), 2658 __req_field_u8(max_crbn), 2659 __req_field_u8(min_stgpn), 2660 __req_field_u8(max_stgpn), 2661 __req_field_u8(min_stgpr), 2662 __req_field_u32(min_stgpr_diff), 2663 #undef __req_field_u8 2664 #undef __req_field_u32 2665 }; 2666 2667 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req, 2668 sizeof(req), true); 2669 } 2670 2671 static int 2672 mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy, 2673 struct cfg80211_chan_def *chandef, 2674 int cmd) 2675 { 2676 struct mt7915_dev *dev = phy->dev; 2677 struct mt76_phy *mphy = phy->mt76; 2678 struct ieee80211_channel *chan = mphy->chandef.chan; 2679 int freq = mphy->chandef.center_freq1; 2680 struct mt7915_mcu_background_chain_ctrl req = { 2681 .monitor_scan_type = 2, /* simple rx */ 2682 }; 2683 2684 if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP) 2685 return -EINVAL; 2686 2687 if (!cfg80211_chandef_valid(&mphy->chandef)) 2688 return -EINVAL; 2689 2690 switch (cmd) { 2691 case CH_SWITCH_BACKGROUND_SCAN_START: { 2692 req.chan = chan->hw_value; 2693 req.central_chan = ieee80211_frequency_to_channel(freq); 2694 req.bw = mt76_connac_chan_bw(&mphy->chandef); 2695 req.monitor_chan = chandef->chan->hw_value; 2696 req.monitor_central_chan = 2697 ieee80211_frequency_to_channel(chandef->center_freq1); 2698 req.monitor_bw = mt76_connac_chan_bw(chandef); 2699 req.band_idx = phy->mt76->band_idx; 2700 req.scan_mode = 1; 2701 break; 2702 } 2703 case CH_SWITCH_BACKGROUND_SCAN_RUNNING: 2704 req.monitor_chan = chandef->chan->hw_value; 2705 req.monitor_central_chan = 2706 ieee80211_frequency_to_channel(chandef->center_freq1); 2707 req.band_idx = phy->mt76->band_idx; 2708 req.scan_mode = 2; 2709 break; 2710 case CH_SWITCH_BACKGROUND_SCAN_STOP: 2711 req.chan = chan->hw_value; 2712 req.central_chan = ieee80211_frequency_to_channel(freq); 2713 req.bw = mt76_connac_chan_bw(&mphy->chandef); 2714 req.tx_stream = hweight8(mphy->antenna_mask); 2715 req.rx_stream = mphy->antenna_mask; 2716 break; 2717 default: 2718 return -EINVAL; 2719 } 2720 req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1; 2721 2722 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL), 2723 &req, sizeof(req), false); 2724 } 2725 2726 int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy, 2727 struct cfg80211_chan_def *chandef) 2728 { 2729 struct mt7915_dev *dev = phy->dev; 2730 int err, region, rdd_idx; 2731 2732 rdd_idx = mt7915_get_rdd_idx(phy, true); 2733 if (rdd_idx < 0) 2734 return -EINVAL; 2735 2736 if (!chandef) { /* disable offchain */ 2737 err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, rdd_idx, 0, 0); 2738 if (err) 2739 return err; 2740 2741 return mt7915_mcu_background_chain_ctrl(phy, NULL, 2742 CH_SWITCH_BACKGROUND_SCAN_STOP); 2743 } 2744 2745 err = mt7915_mcu_background_chain_ctrl(phy, chandef, 2746 CH_SWITCH_BACKGROUND_SCAN_START); 2747 if (err) 2748 return err; 2749 2750 switch (dev->mt76.region) { 2751 case NL80211_DFS_ETSI: 2752 region = 0; 2753 break; 2754 case NL80211_DFS_JP: 2755 region = 2; 2756 break; 2757 case NL80211_DFS_FCC: 2758 default: 2759 region = 1; 2760 break; 2761 } 2762 2763 return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, rdd_idx, 0, region); 2764 } 2765 2766 int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd) 2767 { 2768 static const u8 ch_band[] = { 2769 [NL80211_BAND_2GHZ] = 0, 2770 [NL80211_BAND_5GHZ] = 1, 2771 [NL80211_BAND_6GHZ] = 2, 2772 }; 2773 struct mt7915_dev *dev = phy->dev; 2774 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 2775 int freq1 = chandef->center_freq1; 2776 u8 band = phy->mt76->band_idx; 2777 struct { 2778 u8 control_ch; 2779 u8 center_ch; 2780 u8 bw; 2781 u8 tx_path_num; 2782 u8 rx_path; /* mask or num */ 2783 u8 switch_reason; 2784 u8 band_idx; 2785 u8 center_ch2; /* for 80+80 only */ 2786 __le16 cac_case; 2787 u8 channel_band; 2788 u8 rsv0; 2789 __le32 outband_freq; 2790 u8 txpower_drop; 2791 u8 ap_bw; 2792 u8 ap_center_ch; 2793 u8 rsv1[57]; 2794 } __packed req = { 2795 .control_ch = chandef->chan->hw_value, 2796 .center_ch = ieee80211_frequency_to_channel(freq1), 2797 .bw = mt76_connac_chan_bw(chandef), 2798 .tx_path_num = hweight16(phy->mt76->chainmask), 2799 .rx_path = phy->mt76->chainmask >> (dev->chainshift * band), 2800 .band_idx = band, 2801 .channel_band = ch_band[chandef->chan->band], 2802 }; 2803 2804 #ifdef CONFIG_NL80211_TESTMODE 2805 if (phy->mt76->test.tx_antenna_mask && 2806 mt76_testmode_enabled(phy->mt76)) { 2807 req.tx_path_num = fls(phy->mt76->test.tx_antenna_mask); 2808 req.rx_path = phy->mt76->test.tx_antenna_mask; 2809 } 2810 #endif 2811 2812 if (mt76_connac_spe_idx(phy->mt76->antenna_mask)) 2813 req.tx_path_num = fls(phy->mt76->antenna_mask); 2814 2815 if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR) 2816 req.switch_reason = CH_SWITCH_NORMAL; 2817 else if (phy->mt76->offchannel || 2818 phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE) 2819 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 2820 else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, 2821 NL80211_IFTYPE_AP)) 2822 req.switch_reason = CH_SWITCH_DFS; 2823 else 2824 req.switch_reason = CH_SWITCH_NORMAL; 2825 2826 if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH)) 2827 req.rx_path = hweight8(req.rx_path); 2828 2829 if (chandef->width == NL80211_CHAN_WIDTH_80P80) { 2830 int freq2 = chandef->center_freq2; 2831 2832 req.center_ch2 = ieee80211_frequency_to_channel(freq2); 2833 } 2834 2835 return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); 2836 } 2837 2838 static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev) 2839 { 2840 #define MAX_PAGE_IDX_MASK GENMASK(7, 5) 2841 #define PAGE_IDX_MASK GENMASK(4, 2) 2842 #define PER_PAGE_SIZE 0x400 2843 struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER }; 2844 u16 eeprom_size = mt7915_eeprom_size(dev); 2845 u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE); 2846 u8 *eep = (u8 *)dev->mt76.eeprom.data; 2847 int eep_len; 2848 int i; 2849 2850 for (i = 0; i < total; i++, eep += eep_len) { 2851 struct sk_buff *skb; 2852 int ret; 2853 2854 if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE)) 2855 eep_len = eeprom_size % PER_PAGE_SIZE; 2856 else 2857 eep_len = PER_PAGE_SIZE; 2858 2859 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 2860 sizeof(req) + eep_len); 2861 if (!skb) 2862 return -ENOMEM; 2863 2864 req.format = FIELD_PREP(MAX_PAGE_IDX_MASK, total - 1) | 2865 FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE; 2866 req.len = cpu_to_le16(eep_len); 2867 2868 skb_put_data(skb, &req, sizeof(req)); 2869 skb_put_data(skb, eep, eep_len); 2870 2871 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 2872 MCU_EXT_CMD(EFUSE_BUFFER_MODE), true); 2873 if (ret) 2874 return ret; 2875 } 2876 2877 return 0; 2878 } 2879 2880 int mt7915_mcu_set_eeprom(struct mt7915_dev *dev) 2881 { 2882 struct mt7915_mcu_eeprom req = { 2883 .buffer_mode = EE_MODE_EFUSE, 2884 .format = EE_FORMAT_WHOLE, 2885 }; 2886 2887 if (dev->flash_mode) 2888 return mt7915_mcu_set_eeprom_flash(dev); 2889 2890 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE), 2891 &req, sizeof(req), true); 2892 } 2893 2894 int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset, u8 *read_buf) 2895 { 2896 struct mt7915_mcu_eeprom_info req = { 2897 .addr = cpu_to_le32(round_down(offset, 2898 MT7915_EEPROM_BLOCK_SIZE)), 2899 }; 2900 struct mt7915_mcu_eeprom_info *res; 2901 struct sk_buff *skb; 2902 u8 *buf = read_buf; 2903 int ret; 2904 2905 ret = mt76_mcu_send_and_get_msg(&dev->mt76, 2906 MCU_EXT_QUERY(EFUSE_ACCESS), 2907 &req, sizeof(req), true, &skb); 2908 if (ret) 2909 return ret; 2910 2911 res = (struct mt7915_mcu_eeprom_info *)skb->data; 2912 if (!buf) 2913 buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr); 2914 memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE); 2915 2916 dev_kfree_skb(skb); 2917 2918 return 0; 2919 } 2920 2921 int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num) 2922 { 2923 struct { 2924 u8 _rsv; 2925 u8 version; 2926 u8 die_idx; 2927 u8 _rsv2; 2928 } __packed req = { 2929 .version = 1, 2930 }; 2931 struct sk_buff *skb; 2932 int ret; 2933 2934 ret = mt76_mcu_send_and_get_msg(&dev->mt76, 2935 MCU_EXT_QUERY(EFUSE_FREE_BLOCK), 2936 &req, sizeof(req), true, &skb); 2937 if (ret) 2938 return ret; 2939 2940 *block_num = *(u8 *)skb->data; 2941 dev_kfree_skb(skb); 2942 2943 return 0; 2944 } 2945 2946 static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, 2947 u8 *data, u32 len, int cmd) 2948 { 2949 struct { 2950 u8 dir; 2951 u8 valid; 2952 __le16 bitmap; 2953 s8 precal; 2954 u8 action; 2955 u8 band; 2956 u8 idx; 2957 u8 rsv[4]; 2958 __le32 len; 2959 } req = {}; 2960 struct sk_buff *skb; 2961 2962 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len); 2963 if (!skb) 2964 return -ENOMEM; 2965 2966 req.idx = idx; 2967 req.len = cpu_to_le32(len); 2968 skb_put_data(skb, &req, sizeof(req)); 2969 skb_put_data(skb, data, len); 2970 2971 return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false); 2972 } 2973 2974 int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev) 2975 { 2976 u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data; 2977 u32 total = mt7915_get_cal_group_size(dev); 2978 u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; 2979 2980 if (!(eep[offs] & MT_EE_WIFI_CAL_GROUP)) 2981 return 0; 2982 2983 /* 2984 * Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG 2985 * Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC 2986 */ 2987 while (total > 0) { 2988 int ret, len; 2989 2990 len = min_t(u32, total, MT_EE_CAL_UNIT); 2991 2992 ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len, 2993 MCU_EXT_CMD(GROUP_PRE_CAL_INFO)); 2994 if (ret) 2995 return ret; 2996 2997 total -= len; 2998 cal += len; 2999 idx++; 3000 } 3001 3002 return 0; 3003 } 3004 3005 static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) 3006 { 3007 int i; 3008 3009 for (i = 0; i < n_freqs; i++) 3010 if (cur == freqs[i]) 3011 return i; 3012 3013 return -1; 3014 } 3015 3016 static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw) 3017 { 3018 static const u16 freq_list_v1[] = { 3019 5180, 5200, 5220, 5240, 3020 5260, 5280, 5300, 5320, 3021 5500, 5520, 5540, 5560, 3022 5580, 5600, 5620, 5640, 3023 5660, 5680, 5700, 5745, 3024 5765, 5785, 5805, 5825 3025 }; 3026 static const u16 freq_list_v2[] = { 3027 /* 6G BW20*/ 3028 5955, 5975, 5995, 6015, 3029 6035, 6055, 6075, 6095, 3030 6115, 6135, 6155, 6175, 3031 6195, 6215, 6235, 6255, 3032 6275, 6295, 6315, 6335, 3033 6355, 6375, 6395, 6415, 3034 6435, 6455, 6475, 6495, 3035 6515, 6535, 6555, 6575, 3036 6595, 6615, 6635, 6655, 3037 6675, 6695, 6715, 6735, 3038 6755, 6775, 6795, 6815, 3039 6835, 6855, 6875, 6895, 3040 6915, 6935, 6955, 6975, 3041 6995, 7015, 7035, 7055, 3042 7075, 7095, 7115, 3043 /* 6G BW160 */ 3044 6025, 6185, 6345, 6505, 3045 6665, 6825, 6985, 3046 /* 5G BW20 */ 3047 5180, 5200, 5220, 5240, 3048 5260, 5280, 5300, 5320, 3049 5500, 5520, 5540, 5560, 3050 5580, 5600, 5620, 5640, 3051 5660, 5680, 5700, 5720, 3052 5745, 5765, 5785, 5805, 3053 5825, 5845, 5865, 5885, 3054 /* 5G BW160 */ 3055 5250, 5570, 5815 3056 }; 3057 const u16 *freq_list; 3058 int idx, n_freqs; 3059 3060 if (!is_mt7915(&dev->mt76)) { 3061 freq_list = freq_list_v2; 3062 n_freqs = ARRAY_SIZE(freq_list_v2); 3063 } else { 3064 freq_list = freq_list_v1; 3065 n_freqs = ARRAY_SIZE(freq_list_v1); 3066 } 3067 3068 if (freq < 4000) { 3069 if (freq < 2432) 3070 return n_freqs; 3071 if (freq < 2457) 3072 return n_freqs + 1; 3073 3074 return n_freqs + 2; 3075 } 3076 3077 if (bw == NL80211_CHAN_WIDTH_80P80) 3078 return -1; 3079 3080 if (bw != NL80211_CHAN_WIDTH_20) { 3081 idx = mt7915_find_freq_idx(freq_list, n_freqs, freq + 10); 3082 if (idx >= 0) 3083 return idx; 3084 3085 idx = mt7915_find_freq_idx(freq_list, n_freqs, freq - 10); 3086 if (idx >= 0) 3087 return idx; 3088 } 3089 3090 return mt7915_find_freq_idx(freq_list, n_freqs, freq); 3091 } 3092 3093 int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy) 3094 { 3095 struct mt7915_dev *dev = phy->dev; 3096 struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 3097 enum nl80211_band band = chandef->chan->band; 3098 u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2; 3099 u16 center_freq = chandef->center_freq1; 3100 u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data; 3101 u8 dpd_mask, cal_num = is_mt7915(&dev->mt76) ? 2 : 3; 3102 int idx; 3103 3104 switch (band) { 3105 case NL80211_BAND_2GHZ: 3106 dpd_mask = MT_EE_WIFI_CAL_DPD_2G; 3107 break; 3108 case NL80211_BAND_5GHZ: 3109 dpd_mask = MT_EE_WIFI_CAL_DPD_5G; 3110 break; 3111 case NL80211_BAND_6GHZ: 3112 dpd_mask = MT_EE_WIFI_CAL_DPD_6G; 3113 break; 3114 default: 3115 dpd_mask = 0; 3116 break; 3117 } 3118 3119 if (!(eep[offs] & dpd_mask)) 3120 return 0; 3121 3122 idx = mt7915_dpd_freq_idx(dev, center_freq, chandef->width); 3123 if (idx < 0) 3124 return -EINVAL; 3125 3126 /* Items: Tx DPD, Tx Flatness */ 3127 idx = idx * cal_num; 3128 cal += mt7915_get_cal_group_size(dev) + (idx * MT_EE_CAL_UNIT); 3129 3130 while (cal_num--) { 3131 int ret; 3132 3133 ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT, 3134 MCU_EXT_CMD(DPD_PRE_CAL_INFO)); 3135 if (ret) 3136 return ret; 3137 3138 idx++; 3139 cal += MT_EE_CAL_UNIT; 3140 } 3141 3142 return 0; 3143 } 3144 3145 int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch) 3146 { 3147 struct mt76_channel_state *state = phy->mt76->chan_state; 3148 struct mt76_channel_state *state_ts = &phy->state_ts; 3149 struct mt7915_dev *dev = phy->dev; 3150 struct mt7915_mcu_mib *res, req[5]; 3151 struct sk_buff *skb; 3152 static const u32 *offs; 3153 int i, ret, len, offs_cc; 3154 u64 cc_tx; 3155 3156 /* strict order */ 3157 if (is_mt7915(&dev->mt76)) { 3158 static const u32 chip_offs[] = { 3159 MIB_NON_WIFI_TIME, 3160 MIB_TX_TIME, 3161 MIB_RX_TIME, 3162 MIB_OBSS_AIRTIME, 3163 MIB_TXOP_INIT_COUNT, 3164 }; 3165 len = ARRAY_SIZE(chip_offs); 3166 offs = chip_offs; 3167 offs_cc = 20; 3168 } else { 3169 static const u32 chip_offs[] = { 3170 MIB_NON_WIFI_TIME_V2, 3171 MIB_TX_TIME_V2, 3172 MIB_RX_TIME_V2, 3173 MIB_OBSS_AIRTIME_V2 3174 }; 3175 len = ARRAY_SIZE(chip_offs); 3176 offs = chip_offs; 3177 offs_cc = 0; 3178 } 3179 3180 for (i = 0; i < len; i++) { 3181 req[i].band = cpu_to_le32(phy->mt76->band_idx); 3182 req[i].offs = cpu_to_le32(offs[i]); 3183 } 3184 3185 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO), 3186 req, len * sizeof(req[0]), true, &skb); 3187 if (ret) 3188 return ret; 3189 3190 res = (struct mt7915_mcu_mib *)(skb->data + offs_cc); 3191 3192 #define __res_u64(s) le64_to_cpu(res[s].data) 3193 /* subtract Tx backoff time from Tx duration for MT7915 */ 3194 if (is_mt7915(&dev->mt76)) { 3195 u64 backoff = (__res_u64(4) & 0xffff) * 79; /* 16us + 9us * 7 */ 3196 cc_tx = __res_u64(1) - backoff; 3197 } else { 3198 cc_tx = __res_u64(1); 3199 } 3200 3201 if (chan_switch) 3202 goto out; 3203 3204 state->cc_tx += cc_tx - state_ts->cc_tx; 3205 state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx; 3206 state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx; 3207 state->cc_busy += __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3) - 3208 state_ts->cc_busy; 3209 3210 out: 3211 state_ts->cc_tx = cc_tx; 3212 state_ts->cc_bss_rx = __res_u64(2); 3213 state_ts->cc_rx = __res_u64(2) + __res_u64(3); 3214 state_ts->cc_busy = __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3); 3215 #undef __res_u64 3216 3217 dev_kfree_skb(skb); 3218 3219 return 0; 3220 } 3221 3222 int mt7915_mcu_get_temperature(struct mt7915_phy *phy) 3223 { 3224 struct mt7915_dev *dev = phy->dev; 3225 struct { 3226 u8 ctrl_id; 3227 u8 action; 3228 u8 band_idx; 3229 u8 rsv[5]; 3230 } req = { 3231 .ctrl_id = THERMAL_SENSOR_TEMP_QUERY, 3232 .band_idx = phy->mt76->band_idx, 3233 }; 3234 3235 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req, 3236 sizeof(req), true); 3237 } 3238 3239 int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state) 3240 { 3241 struct mt7915_dev *dev = phy->dev; 3242 struct mt7915_mcu_thermal_ctrl req = { 3243 .band_idx = phy->mt76->band_idx, 3244 .ctrl_id = THERMAL_PROTECT_DUTY_CONFIG, 3245 }; 3246 int level, ret; 3247 3248 /* set duty cycle and level */ 3249 for (level = 0; level < 4; level++) { 3250 req.duty.duty_level = level; 3251 req.duty.duty_cycle = state; 3252 state /= 2; 3253 3254 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), 3255 &req, sizeof(req), false); 3256 if (ret) 3257 return ret; 3258 } 3259 return 0; 3260 } 3261 3262 int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy) 3263 { 3264 struct mt7915_dev *dev = phy->dev; 3265 struct { 3266 struct mt7915_mcu_thermal_ctrl ctrl; 3267 3268 __le32 trigger_temp; 3269 __le32 restore_temp; 3270 __le16 sustain_time; 3271 u8 rsv[2]; 3272 } __packed req = { 3273 .ctrl = { 3274 .band_idx = phy->mt76->band_idx, 3275 .type.protect_type = 1, 3276 .type.trigger_type = 1, 3277 }, 3278 }; 3279 int ret; 3280 3281 req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE; 3282 ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), 3283 &req, sizeof(req.ctrl), false); 3284 3285 if (ret) 3286 return ret; 3287 3288 /* set high-temperature trigger threshold */ 3289 req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE; 3290 /* add a safety margin ~10 */ 3291 req.restore_temp = cpu_to_le32(phy->throttle_temp[0] - 10); 3292 req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]); 3293 req.sustain_time = cpu_to_le16(10); 3294 3295 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT), 3296 &req, sizeof(req), false); 3297 } 3298 3299 int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower) 3300 { 3301 struct mt7915_dev *dev = phy->dev; 3302 struct { 3303 u8 format_id; 3304 u8 rsv; 3305 u8 band_idx; 3306 s8 txpower_min; 3307 } __packed req = { 3308 .format_id = TX_POWER_LIMIT_FRAME_MIN, 3309 .band_idx = phy->mt76->band_idx, 3310 .txpower_min = txpower * 2, /* 0.5db */ 3311 }; 3312 3313 return mt76_mcu_send_msg(&dev->mt76, 3314 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3315 sizeof(req), true); 3316 } 3317 3318 int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy, 3319 struct ieee80211_vif *vif, 3320 struct ieee80211_sta *sta, s8 txpower) 3321 { 3322 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 3323 struct mt7915_dev *dev = phy->dev; 3324 struct mt76_phy *mphy = phy->mt76; 3325 struct { 3326 u8 format_id; 3327 u8 rsv[3]; 3328 u8 band_idx; 3329 s8 txpower_max; 3330 __le16 wcid; 3331 s8 txpower_offs[48]; 3332 } __packed req = { 3333 .format_id = TX_POWER_LIMIT_FRAME, 3334 .band_idx = phy->mt76->band_idx, 3335 .txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2), 3336 .wcid = cpu_to_le16(msta->wcid.idx), 3337 }; 3338 int ret; 3339 s8 txpower_sku[MT7915_SKU_RATE_NUM]; 3340 3341 ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku), 3342 TX_POWER_INFO_RATE); 3343 if (ret) 3344 return ret; 3345 3346 txpower = mt76_get_power_bound(mphy, txpower); 3347 if (txpower > mphy->txpower_cur || txpower < 0) 3348 return -EINVAL; 3349 3350 if (txpower) { 3351 u32 offs, len, i; 3352 3353 if (sta->deflink.ht_cap.ht_supported) { 3354 const u8 *sku_len = mt7915_sku_group_len; 3355 3356 offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM]; 3357 len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40]; 3358 3359 if (sta->deflink.vht_cap.vht_supported) { 3360 offs += len; 3361 len = sku_len[SKU_VHT_BW20] * 4; 3362 3363 if (sta->deflink.he_cap.has_he) { 3364 offs += len + sku_len[SKU_HE_RU26] * 3; 3365 len = sku_len[SKU_HE_RU242] * 4; 3366 } 3367 } 3368 } else { 3369 return -EINVAL; 3370 } 3371 3372 for (i = 0; i < len; i++, offs++) 3373 req.txpower_offs[i] = 3374 DIV_ROUND_UP(txpower - txpower_sku[offs], 2); 3375 } 3376 3377 return mt76_mcu_send_msg(&dev->mt76, 3378 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3379 sizeof(req), true); 3380 } 3381 3382 static void 3383 mt7915_update_txpower(struct mt7915_phy *phy, int tx_power) 3384 { 3385 struct mt76_phy *mphy = phy->mt76; 3386 struct ieee80211_channel *chan = mphy->main_chandef.chan; 3387 int chain_idx, val, e2p_power_limit = 0; 3388 3389 if (!chan) { 3390 mphy->txpower_cur = tx_power; 3391 return; 3392 } 3393 3394 for (chain_idx = 0; chain_idx < hweight16(mphy->chainmask); chain_idx++) { 3395 val = mt7915_eeprom_get_target_power(phy->dev, chan, chain_idx); 3396 val += mt7915_eeprom_get_power_delta(phy->dev, chan->band); 3397 3398 e2p_power_limit = max_t(int, e2p_power_limit, val); 3399 } 3400 3401 if (phy->sku_limit_en) 3402 mphy->txpower_cur = min_t(int, e2p_power_limit, tx_power); 3403 else 3404 mphy->txpower_cur = e2p_power_limit; 3405 } 3406 3407 int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) 3408 { 3409 #define TX_POWER_LIMIT_TABLE_RATE 0 3410 #define TX_POWER_LIMIT_TABLE_PATH 1 3411 struct mt7915_dev *dev = phy->dev; 3412 struct mt76_phy *mphy = phy->mt76; 3413 struct ieee80211_hw *hw = mphy->hw; 3414 struct mt7915_sku_val { 3415 u8 format_id; 3416 u8 limit_type; 3417 u8 band_idx; 3418 } __packed hdr = { 3419 .format_id = TX_POWER_LIMIT_TABLE, 3420 .limit_type = TX_POWER_LIMIT_TABLE_RATE, 3421 .band_idx = phy->mt76->band_idx, 3422 }; 3423 int i, ret, tx_power; 3424 const u8 *len = mt7915_sku_group_len; 3425 struct mt76_power_limits la = {}; 3426 struct sk_buff *skb; 3427 3428 tx_power = mt76_get_power_bound(mphy, hw->conf.power_level); 3429 if (phy->sku_limit_en) { 3430 tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, 3431 &la, tx_power); 3432 mt7915_update_txpower(phy, tx_power); 3433 } else { 3434 mt7915_update_txpower(phy, tx_power); 3435 return 0; 3436 } 3437 3438 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 3439 sizeof(hdr) + MT7915_SKU_RATE_NUM); 3440 if (!skb) 3441 return -ENOMEM; 3442 3443 skb_put_data(skb, &hdr, sizeof(hdr)); 3444 skb_put_data(skb, &la.cck, len[SKU_CCK] + len[SKU_OFDM]); 3445 skb_put_data(skb, &la.mcs[0], len[SKU_HT_BW20]); 3446 skb_put_data(skb, &la.mcs[1], len[SKU_HT_BW40]); 3447 3448 /* vht */ 3449 for (i = 0; i < 4; i++) { 3450 skb_put_data(skb, &la.mcs[i], sizeof(la.mcs[i])); 3451 skb_put_zero(skb, 2); /* padding */ 3452 } 3453 3454 /* he */ 3455 skb_put_data(skb, &la.ru[0], sizeof(la.ru)); 3456 ret = mt76_mcu_skb_send_msg(&dev->mt76, skb, 3457 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), true); 3458 if (ret) 3459 return ret; 3460 3461 /* only set per-path power table when it's configured */ 3462 if (!phy->sku_path_en) 3463 return 0; 3464 3465 skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 3466 sizeof(hdr) + MT7915_SKU_PATH_NUM); 3467 if (!skb) 3468 return -ENOMEM; 3469 3470 hdr.limit_type = TX_POWER_LIMIT_TABLE_PATH; 3471 skb_put_data(skb, &hdr, sizeof(hdr)); 3472 skb_put_data(skb, &la.path.cck, sizeof(la.path.cck)); 3473 skb_put_data(skb, &la.path.ofdm, sizeof(la.path.ofdm)); 3474 skb_put_data(skb, &la.path.ofdm_bf[1], sizeof(la.path.ofdm_bf) - 1); 3475 3476 /* HT20 and HT40 */ 3477 skb_put_data(skb, &la.path.ru[3], sizeof(la.path.ru[3])); 3478 skb_put_data(skb, &la.path.ru_bf[3][1], sizeof(la.path.ru_bf[3]) - 1); 3479 skb_put_data(skb, &la.path.ru[4], sizeof(la.path.ru[4])); 3480 skb_put_data(skb, &la.path.ru_bf[4][1], sizeof(la.path.ru_bf[4]) - 1); 3481 3482 /* start from non-bf and bf fields of 3483 * BW20/RU242, BW40/RU484, BW80/RU996, BW160/RU2x996, 3484 * RU26, RU52, and RU106 3485 */ 3486 3487 for (i = 0; i < 8; i++) { 3488 bool bf = i % 2; 3489 u8 idx = (i + 6) / 2; 3490 s8 *buf = bf ? la.path.ru_bf[idx] : la.path.ru[idx]; 3491 /* The non-bf fields of RU26 to RU106 are special cases */ 3492 if (bf) 3493 skb_put_data(skb, buf + 1, 9); 3494 else 3495 skb_put_data(skb, buf, 10); 3496 } 3497 3498 for (i = 0; i < 6; i++) { 3499 bool bf = i % 2; 3500 u8 idx = i / 2; 3501 s8 *buf = bf ? la.path.ru_bf[idx] : la.path.ru[idx]; 3502 3503 skb_put_data(skb, buf, 10); 3504 } 3505 3506 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 3507 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), true); 3508 } 3509 3510 int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len, 3511 u8 category) 3512 { 3513 #define RATE_POWER_INFO 2 3514 struct mt7915_dev *dev = phy->dev; 3515 struct { 3516 u8 format_id; 3517 u8 category; 3518 u8 band_idx; 3519 u8 _rsv; 3520 } __packed req = { 3521 .format_id = TX_POWER_LIMIT_INFO, 3522 .category = category, 3523 .band_idx = phy->mt76->band_idx, 3524 }; 3525 struct sk_buff *skb; 3526 int ret, i; 3527 3528 ret = mt76_mcu_send_and_get_msg(&dev->mt76, 3529 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), 3530 &req, sizeof(req), true, &skb); 3531 if (ret) 3532 return ret; 3533 3534 if (category == TX_POWER_INFO_RATE) { 3535 s8 res[MT7915_SKU_RATE_NUM][2]; 3536 3537 memcpy(res, skb->data + 4, sizeof(res)); 3538 for (i = 0; i < len; i++) 3539 txpower[i] = res[i][req.band_idx]; 3540 } else if (category == TX_POWER_INFO_PATH) { 3541 memcpy(txpower, skb->data + 4, len); 3542 } 3543 3544 dev_kfree_skb(skb); 3545 3546 return 0; 3547 } 3548 3549 int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode, 3550 u8 en) 3551 { 3552 struct { 3553 u8 test_mode_en; 3554 u8 param_idx; 3555 u8 _rsv[2]; 3556 3557 u8 enable; 3558 u8 _rsv2[3]; 3559 3560 u8 pad[8]; 3561 } __packed req = { 3562 .test_mode_en = test_mode, 3563 .param_idx = param, 3564 .enable = en, 3565 }; 3566 3567 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req, 3568 sizeof(req), false); 3569 } 3570 3571 int mt7915_mcu_set_sku_en(struct mt7915_phy *phy) 3572 { 3573 struct mt7915_dev *dev = phy->dev; 3574 struct mt7915_sku { 3575 u8 format_id; 3576 u8 sku_enable; 3577 u8 band_idx; 3578 u8 rsv; 3579 } __packed req = { 3580 .band_idx = phy->mt76->band_idx, 3581 }; 3582 int ret; 3583 3584 req.sku_enable = phy->sku_limit_en; 3585 req.format_id = TX_POWER_LIMIT_ENABLE; 3586 3587 ret = mt76_mcu_send_msg(&dev->mt76, 3588 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3589 sizeof(req), true); 3590 if (ret) 3591 return ret; 3592 3593 req.sku_enable = phy->sku_path_en; 3594 req.format_id = TX_POWER_LIMIT_PATH_ENABLE; 3595 3596 return mt76_mcu_send_msg(&dev->mt76, 3597 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req, 3598 sizeof(req), true); 3599 } 3600 3601 int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band) 3602 { 3603 struct { 3604 u8 action; 3605 u8 set; 3606 u8 band; 3607 u8 rsv; 3608 } req = { 3609 .action = action, 3610 .set = set, 3611 .band = band, 3612 }; 3613 3614 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SER_TRIGGER), 3615 &req, sizeof(req), false); 3616 } 3617 3618 int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action) 3619 { 3620 struct { 3621 u8 action; 3622 union { 3623 struct { 3624 u8 snd_mode; 3625 u8 sta_num; 3626 u8 rsv; 3627 u8 wlan_idx[4]; 3628 __le32 snd_period; /* ms */ 3629 } __packed snd; 3630 struct { 3631 bool ebf; 3632 bool ibf; 3633 u8 rsv; 3634 } __packed type; 3635 struct { 3636 u8 bf_num; 3637 u8 bf_bitmap; 3638 u8 bf_sel[8]; 3639 u8 rsv[5]; 3640 } __packed mod; 3641 }; 3642 } __packed req = { 3643 .action = action, 3644 }; 3645 3646 #define MT_BF_PROCESSING 4 3647 switch (action) { 3648 case MT_BF_SOUNDING_ON: 3649 req.snd.snd_mode = MT_BF_PROCESSING; 3650 break; 3651 case MT_BF_TYPE_UPDATE: 3652 req.type.ebf = true; 3653 req.type.ibf = dev->ibf; 3654 break; 3655 case MT_BF_MODULE_UPDATE: 3656 req.mod.bf_num = 2; 3657 req.mod.bf_bitmap = GENMASK(1, 0); 3658 break; 3659 default: 3660 return -EINVAL; 3661 } 3662 3663 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req, 3664 sizeof(req), true); 3665 } 3666 3667 static int 3668 mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val) 3669 { 3670 struct mt7915_dev *dev = phy->dev; 3671 struct mt7915_mcu_sr_ctrl req = { 3672 .action = action, 3673 .argnum = 1, 3674 .band_idx = phy->mt76->band_idx, 3675 .val = cpu_to_le32(val), 3676 }; 3677 3678 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3679 sizeof(req), true); 3680 } 3681 3682 static int 3683 mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy, 3684 struct ieee80211_he_obss_pd *he_obss_pd) 3685 { 3686 struct mt7915_dev *dev = phy->dev; 3687 struct { 3688 struct mt7915_mcu_sr_ctrl ctrl; 3689 struct { 3690 u8 pd_th_non_srg; 3691 u8 pd_th_srg; 3692 u8 period_offs; 3693 u8 rcpi_src; 3694 __le16 obss_pd_min; 3695 __le16 obss_pd_min_srg; 3696 u8 resp_txpwr_mode; 3697 u8 txpwr_restrict_mode; 3698 u8 txpwr_ref; 3699 u8 rsv[3]; 3700 } __packed param; 3701 } __packed req = { 3702 .ctrl = { 3703 .action = SPR_SET_PARAM, 3704 .argnum = 9, 3705 .band_idx = phy->mt76->band_idx, 3706 }, 3707 }; 3708 int ret; 3709 u8 max_th = 82, non_srg_max_th = 62; 3710 3711 /* disable firmware dynamical PD asjustment */ 3712 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false); 3713 if (ret) 3714 return ret; 3715 3716 if (he_obss_pd->sr_ctrl & 3717 IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) 3718 req.param.pd_th_non_srg = max_th; 3719 else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT) 3720 req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset; 3721 else 3722 req.param.pd_th_non_srg = non_srg_max_th; 3723 3724 if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) 3725 req.param.pd_th_srg = max_th - he_obss_pd->max_offset; 3726 3727 req.param.obss_pd_min = cpu_to_le16(82); 3728 req.param.obss_pd_min_srg = cpu_to_le16(82); 3729 req.param.txpwr_restrict_mode = 2; 3730 req.param.txpwr_ref = 21; 3731 3732 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3733 sizeof(req), true); 3734 } 3735 3736 static int 3737 mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3738 struct ieee80211_he_obss_pd *he_obss_pd) 3739 { 3740 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3741 struct mt7915_dev *dev = phy->dev; 3742 u8 omac = mvif->mt76.omac_idx; 3743 struct { 3744 struct mt7915_mcu_sr_ctrl ctrl; 3745 struct { 3746 u8 omac; 3747 u8 rsv[3]; 3748 u8 flag[20]; 3749 } __packed siga; 3750 } __packed req = { 3751 .ctrl = { 3752 .action = SPR_SET_SIGA, 3753 .argnum = 1, 3754 .band_idx = phy->mt76->band_idx, 3755 }, 3756 .siga = { 3757 .omac = omac > HW_BSSID_MAX ? omac - 12 : omac, 3758 }, 3759 }; 3760 int ret; 3761 3762 if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED) 3763 req.siga.flag[req.siga.omac] = 0xf; 3764 else 3765 return 0; 3766 3767 /* switch to normal AP mode */ 3768 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0); 3769 if (ret) 3770 return ret; 3771 3772 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3773 sizeof(req), true); 3774 } 3775 3776 static int 3777 mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy, 3778 struct ieee80211_he_obss_pd *he_obss_pd) 3779 { 3780 struct mt7915_dev *dev = phy->dev; 3781 struct { 3782 struct mt7915_mcu_sr_ctrl ctrl; 3783 struct { 3784 __le32 color_l[2]; 3785 __le32 color_h[2]; 3786 __le32 bssid_l[2]; 3787 __le32 bssid_h[2]; 3788 } __packed bitmap; 3789 } __packed req = { 3790 .ctrl = { 3791 .action = SPR_SET_SRG_BITMAP, 3792 .argnum = 4, 3793 .band_idx = phy->mt76->band_idx, 3794 }, 3795 }; 3796 u32 bitmap; 3797 3798 memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap)); 3799 req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3800 3801 memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap)); 3802 req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3803 3804 memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap)); 3805 req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3806 3807 memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap)); 3808 req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap); 3809 3810 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req, 3811 sizeof(req), true); 3812 } 3813 3814 int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3815 struct ieee80211_he_obss_pd *he_obss_pd) 3816 { 3817 int ret; 3818 3819 /* enable firmware scene detection algorithms */ 3820 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect); 3821 if (ret) 3822 return ret; 3823 3824 /* firmware dynamically adjusts PD threshold so skip manual control */ 3825 if (sr_scene_detect && !he_obss_pd->enable) 3826 return 0; 3827 3828 /* enable spatial reuse */ 3829 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable); 3830 if (ret) 3831 return ret; 3832 3833 if (sr_scene_detect || !he_obss_pd->enable) 3834 return 0; 3835 3836 ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true); 3837 if (ret) 3838 return ret; 3839 3840 /* set SRG/non-SRG OBSS PD threshold */ 3841 ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd); 3842 if (ret) 3843 return ret; 3844 3845 /* Set SR prohibit */ 3846 ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd); 3847 if (ret) 3848 return ret; 3849 3850 /* set SRG BSS color/BSSID bitmap */ 3851 return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd); 3852 } 3853 3854 int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3855 struct ieee80211_sta *sta, struct rate_info *rate) 3856 { 3857 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3858 struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 3859 struct mt7915_dev *dev = phy->dev; 3860 struct mt76_phy *mphy = phy->mt76; 3861 struct { 3862 u8 category; 3863 u8 band; 3864 __le16 wcid; 3865 } __packed req = { 3866 .category = MCU_PHY_STATE_CONTENTION_RX_RATE, 3867 .band = mvif->mt76.band_idx, 3868 .wcid = cpu_to_le16(msta->wcid.idx), 3869 }; 3870 struct ieee80211_supported_band *sband; 3871 struct mt7915_mcu_phy_rx_info *res; 3872 struct sk_buff *skb; 3873 int ret; 3874 bool cck = false; 3875 3876 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO), 3877 &req, sizeof(req), true, &skb); 3878 if (ret) 3879 return ret; 3880 3881 res = (struct mt7915_mcu_phy_rx_info *)skb->data; 3882 3883 rate->mcs = res->rate; 3884 rate->nss = res->nsts + 1; 3885 3886 switch (res->mode) { 3887 case MT_PHY_TYPE_CCK: 3888 cck = true; 3889 fallthrough; 3890 case MT_PHY_TYPE_OFDM: 3891 if (mphy->chandef.chan->band == NL80211_BAND_5GHZ) 3892 sband = &mphy->sband_5g.sband; 3893 else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ) 3894 sband = &mphy->sband_6g.sband; 3895 else 3896 sband = &mphy->sband_2g.sband; 3897 3898 rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck); 3899 rate->legacy = sband->bitrates[rate->mcs].bitrate; 3900 break; 3901 case MT_PHY_TYPE_HT: 3902 case MT_PHY_TYPE_HT_GF: 3903 if (rate->mcs > 31) { 3904 ret = -EINVAL; 3905 goto out; 3906 } 3907 3908 rate->flags = RATE_INFO_FLAGS_MCS; 3909 if (res->gi) 3910 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 3911 break; 3912 case MT_PHY_TYPE_VHT: 3913 if (rate->mcs > 9) { 3914 ret = -EINVAL; 3915 goto out; 3916 } 3917 3918 rate->flags = RATE_INFO_FLAGS_VHT_MCS; 3919 if (res->gi) 3920 rate->flags |= RATE_INFO_FLAGS_SHORT_GI; 3921 break; 3922 case MT_PHY_TYPE_HE_SU: 3923 case MT_PHY_TYPE_HE_EXT_SU: 3924 case MT_PHY_TYPE_HE_TB: 3925 case MT_PHY_TYPE_HE_MU: 3926 if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) { 3927 ret = -EINVAL; 3928 goto out; 3929 } 3930 rate->he_gi = res->gi; 3931 rate->flags = RATE_INFO_FLAGS_HE_MCS; 3932 break; 3933 default: 3934 ret = -EINVAL; 3935 goto out; 3936 } 3937 3938 switch (res->bw) { 3939 case IEEE80211_STA_RX_BW_160: 3940 rate->bw = RATE_INFO_BW_160; 3941 break; 3942 case IEEE80211_STA_RX_BW_80: 3943 rate->bw = RATE_INFO_BW_80; 3944 break; 3945 case IEEE80211_STA_RX_BW_40: 3946 rate->bw = RATE_INFO_BW_40; 3947 break; 3948 default: 3949 rate->bw = RATE_INFO_BW_20; 3950 break; 3951 } 3952 3953 out: 3954 dev_kfree_skb(skb); 3955 3956 return ret; 3957 } 3958 3959 int mt7915_mcu_set_protection(struct mt7915_phy *phy, struct ieee80211_vif *vif, 3960 u8 ht_mode, bool use_cts_prot) 3961 { 3962 struct mt7915_dev *dev = phy->dev; 3963 int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_prot); 3964 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 3965 struct bss_info_prot *prot; 3966 struct sk_buff *skb; 3967 struct tlv *tlv; 3968 enum { 3969 PROT_NONMEMBER = BIT(1), 3970 PROT_20MHZ = BIT(2), 3971 PROT_NONHT_MIXED = BIT(3), 3972 PROT_LEGACY_ERP = BIT(5), 3973 PROT_NONGF_STA = BIT(7), 3974 }; 3975 u32 rts_threshold; 3976 3977 skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 3978 NULL, len); 3979 if (IS_ERR(skb)) 3980 return PTR_ERR(skb); 3981 3982 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_PROTECT_INFO, 3983 sizeof(*prot)); 3984 prot = (struct bss_info_prot *)tlv; 3985 3986 switch (ht_mode & IEEE80211_HT_OP_MODE_PROTECTION) { 3987 case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: 3988 prot->prot_mode = cpu_to_le32(PROT_NONMEMBER); 3989 break; 3990 case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: 3991 prot->prot_mode = cpu_to_le32(PROT_20MHZ); 3992 break; 3993 case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: 3994 prot->prot_mode = cpu_to_le32(PROT_NONHT_MIXED); 3995 break; 3996 } 3997 3998 if (ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) 3999 prot->prot_mode |= cpu_to_le32(PROT_NONGF_STA); 4000 4001 if (use_cts_prot) 4002 prot->prot_mode |= cpu_to_le32(PROT_LEGACY_ERP); 4003 4004 /* reuse current RTS setting */ 4005 rts_threshold = phy->mt76->hw->wiphy->rts_threshold; 4006 if (rts_threshold == (u32)-1) 4007 prot->rts_len_thres = cpu_to_le32(MT7915_RTS_LEN_THRES); 4008 else 4009 prot->rts_len_thres = cpu_to_le32(rts_threshold); 4010 4011 prot->rts_pkt_thres = 0x2; 4012 4013 prot->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); 4014 if (!prot->he_rts_thres) 4015 prot->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); 4016 4017 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 4018 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 4019 } 4020 4021 int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif, 4022 struct cfg80211_he_bss_color *he_bss_color) 4023 { 4024 int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_color); 4025 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 4026 struct bss_info_color *bss_color; 4027 struct sk_buff *skb; 4028 struct tlv *tlv; 4029 4030 skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, 4031 NULL, len); 4032 if (IS_ERR(skb)) 4033 return PTR_ERR(skb); 4034 4035 tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, 4036 sizeof(*bss_color)); 4037 bss_color = (struct bss_info_color *)tlv; 4038 bss_color->disable = !he_bss_color->enabled; 4039 bss_color->color = he_bss_color->color; 4040 4041 return mt76_mcu_skb_send_msg(&dev->mt76, skb, 4042 MCU_EXT_CMD(BSS_INFO_UPDATE), true); 4043 } 4044 4045 #define TWT_AGRT_TRIGGER BIT(0) 4046 #define TWT_AGRT_ANNOUNCE BIT(1) 4047 #define TWT_AGRT_PROTECT BIT(2) 4048 4049 int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev, 4050 struct mt7915_vif *mvif, 4051 struct mt7915_twt_flow *flow, 4052 int cmd) 4053 { 4054 struct { 4055 u8 tbl_idx; 4056 u8 cmd; 4057 u8 own_mac_idx; 4058 u8 flowid; /* 0xff for group id */ 4059 __le16 peer_id; /* specify the peer_id (msb=0) 4060 * or group_id (msb=1) 4061 */ 4062 u8 duration; /* 256 us */ 4063 u8 bss_idx; 4064 __le64 start_tsf; 4065 __le16 mantissa; 4066 u8 exponent; 4067 u8 is_ap; 4068 u8 agrt_params; 4069 u8 rsv[23]; 4070 } __packed req = { 4071 .tbl_idx = flow->table_id, 4072 .cmd = cmd, 4073 .own_mac_idx = mvif->mt76.omac_idx, 4074 .flowid = flow->id, 4075 .peer_id = cpu_to_le16(flow->wcid), 4076 .duration = flow->duration, 4077 .bss_idx = mvif->mt76.idx, 4078 .start_tsf = cpu_to_le64(flow->tsf), 4079 .mantissa = flow->mantissa, 4080 .exponent = flow->exp, 4081 .is_ap = true, 4082 }; 4083 4084 if (flow->protection) 4085 req.agrt_params |= TWT_AGRT_PROTECT; 4086 if (!flow->flowtype) 4087 req.agrt_params |= TWT_AGRT_ANNOUNCE; 4088 if (flow->trigger) 4089 req.agrt_params |= TWT_AGRT_TRIGGER; 4090 4091 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE), 4092 &req, sizeof(req), true); 4093 } 4094 4095 int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx) 4096 { 4097 struct { 4098 __le32 cmd; 4099 __le32 arg0; 4100 __le32 arg1; 4101 __le16 arg2; 4102 } __packed req = { 4103 .cmd = cpu_to_le32(0x15), 4104 }; 4105 struct mt7915_mcu_wa_tx_stat { 4106 __le16 wcid; 4107 u8 __rsv2[2]; 4108 4109 /* tx_bytes is deprecated since WA byte counter uses u32, 4110 * which easily leads to overflow. 4111 */ 4112 __le32 tx_bytes; 4113 __le32 tx_packets; 4114 } __packed *res; 4115 struct mt76_wcid *wcid; 4116 struct sk_buff *skb; 4117 int ret, len; 4118 u16 ret_wcid; 4119 4120 if (is_mt7915(&dev->mt76)) { 4121 req.arg0 = cpu_to_le32(wlan_idx); 4122 len = sizeof(req) - sizeof(req.arg2); 4123 } else { 4124 req.arg0 = cpu_to_le32(1); 4125 req.arg2 = cpu_to_le16(wlan_idx); 4126 len = sizeof(req); 4127 } 4128 4129 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY), 4130 &req, len, true, &skb); 4131 if (ret) 4132 return ret; 4133 4134 if (!is_mt7915(&dev->mt76)) 4135 skb_pull(skb, 4); 4136 4137 res = (struct mt7915_mcu_wa_tx_stat *)skb->data; 4138 4139 ret_wcid = le16_to_cpu(res->wcid); 4140 if (is_mt7915(&dev->mt76)) 4141 ret_wcid &= 0xff; 4142 4143 if (ret_wcid != wlan_idx) { 4144 ret = -EINVAL; 4145 goto out; 4146 } 4147 4148 rcu_read_lock(); 4149 4150 wcid = mt76_wcid_ptr(dev, wlan_idx); 4151 if (wcid) 4152 wcid->stats.tx_packets += le32_to_cpu(res->tx_packets); 4153 else 4154 ret = -EINVAL; 4155 4156 rcu_read_unlock(); 4157 out: 4158 dev_kfree_skb(skb); 4159 4160 return ret; 4161 } 4162 4163 int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set) 4164 { 4165 struct { 4166 __le32 idx; 4167 __le32 ofs; 4168 __le32 data; 4169 } __packed req = { 4170 .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 24))), 4171 .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(23, 0))), 4172 .data = set ? cpu_to_le32(*val) : 0, 4173 }; 4174 struct sk_buff *skb; 4175 int ret; 4176 4177 if (set) 4178 return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS), 4179 &req, sizeof(req), false); 4180 4181 ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS), 4182 &req, sizeof(req), true, &skb); 4183 if (ret) 4184 return ret; 4185 4186 *val = le32_to_cpu(*(__le32 *)(skb->data + 8)); 4187 dev_kfree_skb(skb); 4188 4189 return 0; 4190 } 4191