1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2024-2025 Intel Corporation 4 */ 5 #include "mlo.h" 6 #include "phy.h" 7 8 /* Block reasons helper */ 9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \ 10 HOW(PREVENTION) \ 11 HOW(WOWLAN) \ 12 HOW(ROC) \ 13 HOW(NON_BSS) \ 14 HOW(TMP_NON_BSS) \ 15 HOW(TPT) 16 17 static const char * 18 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked) 19 { 20 /* Using switch without "default" will warn about missing entries */ 21 switch (blocked) { 22 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x; 23 HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE) 24 #undef REASON_CASE 25 } 26 27 return "ERROR"; 28 } 29 30 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask) 31 { 32 #define NAME_FMT(x) "%s" 33 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "", 34 IWL_DEBUG_INFO(mld, 35 "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT) 36 " (0x%x)\n", 37 HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) 38 mask); 39 #undef NAME_FMT 40 #undef NAME_PR 41 } 42 43 /* Exit reasons helper */ 44 #define HANDLE_EMLSR_EXIT_REASONS(HOW) \ 45 HOW(BLOCK) \ 46 HOW(MISSED_BEACON) \ 47 HOW(FAIL_ENTRY) \ 48 HOW(CSA) \ 49 HOW(EQUAL_BAND) \ 50 HOW(LOW_RSSI) \ 51 HOW(LINK_USAGE) \ 52 HOW(BT_COEX) \ 53 HOW(CHAN_LOAD) \ 54 HOW(RFI) \ 55 HOW(FW_REQUEST) \ 56 HOW(INVALID) 57 58 static const char * 59 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit) 60 { 61 /* Using switch without "default" will warn about missing entries */ 62 switch (exit) { 63 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x; 64 HANDLE_EMLSR_EXIT_REASONS(REASON_CASE) 65 #undef REASON_CASE 66 } 67 68 return "ERROR"; 69 } 70 71 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask) 72 { 73 #define NAME_FMT(x) "%s" 74 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "", 75 IWL_DEBUG_INFO(mld, 76 "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT) 77 " (0x%x)\n", 78 HANDLE_EMLSR_EXIT_REASONS(NAME_PR) 79 mask); 80 #undef NAME_FMT 81 #undef NAME_PR 82 } 83 84 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk) 85 { 86 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 87 emlsr.prevent_done_wk.work); 88 struct ieee80211_vif *vif = 89 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 90 91 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & 92 IWL_MLD_EMLSR_BLOCKED_PREVENTION))) 93 return; 94 95 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 96 IWL_MLD_EMLSR_BLOCKED_PREVENTION); 97 } 98 99 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy, 100 struct wiphy_work *wk) 101 { 102 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 103 emlsr.tmp_non_bss_done_wk.work); 104 struct ieee80211_vif *vif = 105 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 106 107 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons & 108 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS))) 109 return; 110 111 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 112 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS); 113 } 114 115 #define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC) 116 #define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC) 117 118 /* Exit reasons that can cause longer EMLSR prevention */ 119 #define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \ 120 IWL_MLD_EMLSR_EXIT_LINK_USAGE | \ 121 IWL_MLD_EMLSR_EXIT_FW_REQUEST) 122 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400) 123 124 #define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300) 125 #define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600) 126 127 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld, 128 struct iwl_mld_vif *mld_vif, 129 enum iwl_mld_emlsr_exit reason) 130 { 131 unsigned long delay; 132 133 /* 134 * Reset the counter if more than 400 seconds have passed between one 135 * exit and the other, or if we exited due to a different reason. 136 * Will also reset the counter after the long prevention is done. 137 */ 138 if (time_after(jiffies, mld_vif->emlsr.last_exit_ts + 139 IWL_MLD_PREVENT_EMLSR_TIMEOUT) || 140 mld_vif->emlsr.last_exit_reason != reason) 141 mld_vif->emlsr.exit_repeat_count = 0; 142 143 mld_vif->emlsr.last_exit_reason = reason; 144 mld_vif->emlsr.last_exit_ts = jiffies; 145 mld_vif->emlsr.exit_repeat_count++; 146 147 /* 148 * Do not add a prevention when the reason was a block. For a block, 149 * EMLSR will be enabled again on unblock. 150 */ 151 if (reason == IWL_MLD_EMLSR_EXIT_BLOCK) 152 return; 153 154 /* Set prevention for a minimum of 30 seconds */ 155 mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION; 156 delay = IWL_MLD_TRIGGER_LINK_SEL_TIME; 157 158 /* Handle repeats for reasons that can cause long prevention */ 159 if (mld_vif->emlsr.exit_repeat_count > 1 && 160 reason & IWL_MLD_PREVENT_EMLSR_REASONS) { 161 if (mld_vif->emlsr.exit_repeat_count == 2) 162 delay = IWL_MLD_EMLSR_PREVENT_SHORT; 163 else 164 delay = IWL_MLD_EMLSR_PREVENT_LONG; 165 166 /* 167 * The timeouts are chosen so that this will not happen, i.e. 168 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT 169 */ 170 WARN_ON(mld_vif->emlsr.exit_repeat_count > 3); 171 } 172 173 IWL_DEBUG_INFO(mld, 174 "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n", 175 delay / HZ, mld_vif->emlsr.exit_repeat_count, 176 iwl_mld_get_emlsr_exit_string(reason), reason); 177 178 wiphy_delayed_work_queue(mld->wiphy, 179 &mld_vif->emlsr.prevent_done_wk, delay); 180 } 181 182 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw, 183 struct ieee80211_chanctx_conf *ctx, 184 void *dat) 185 { 186 struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx); 187 188 /* It is ok to do it for all chanctx (and not only for the ones that 189 * belong to the EMLSR vif) since EMLSR is not allowed if there is 190 * another vif. 191 */ 192 phy->avg_channel_load_not_by_us = 0; 193 } 194 195 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 196 enum iwl_mld_emlsr_exit exit, u8 link_to_keep, 197 bool sync) 198 { 199 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 200 u16 new_active_links; 201 int ret = 0; 202 203 lockdep_assert_wiphy(mld->wiphy); 204 205 /* On entry failure need to exit anyway, even if entered from debugfs */ 206 if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE) 207 return 0; 208 209 /* Ignore exit request if EMLSR is not active */ 210 if (!iwl_mld_emlsr_active(vif)) 211 return 0; 212 213 if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized)) 214 return 0; 215 216 if (WARN_ON(!(vif->active_links & BIT(link_to_keep)))) 217 link_to_keep = __ffs(vif->active_links); 218 219 new_active_links = BIT(link_to_keep); 220 IWL_DEBUG_INFO(mld, 221 "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n", 222 iwl_mld_get_emlsr_exit_string(exit), exit, 223 vif->active_links, new_active_links); 224 225 if (sync) 226 ret = ieee80211_set_active_links(vif, new_active_links); 227 else 228 ieee80211_set_active_links_async(vif, new_active_links); 229 230 /* Update latest exit reason and check EMLSR prevention */ 231 iwl_mld_check_emlsr_prevention(mld, mld_vif, exit); 232 233 /* channel_load_not_by_us is invalid when in EMLSR. 234 * Clear it so wrong values won't be used. 235 */ 236 ieee80211_iter_chan_contexts_atomic(mld->hw, 237 iwl_mld_clear_avg_chan_load_iter, 238 NULL); 239 240 return ret; 241 } 242 243 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 244 enum iwl_mld_emlsr_exit exit, u8 link_to_keep) 245 { 246 _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false); 247 } 248 249 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif, 250 enum iwl_mld_emlsr_blocked reason, 251 u8 link_to_keep, bool sync) 252 { 253 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 254 255 lockdep_assert_wiphy(mld->wiphy); 256 257 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) 258 return 0; 259 260 if (mld_vif->emlsr.blocked_reasons & reason) 261 return 0; 262 263 mld_vif->emlsr.blocked_reasons |= reason; 264 265 IWL_DEBUG_INFO(mld, 266 "Blocking EMLSR mode. reason = %s (0x%x)\n", 267 iwl_mld_get_emlsr_blocked_string(reason), reason); 268 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); 269 270 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) 271 wiphy_delayed_work_cancel(mld_vif->mld->wiphy, 272 &mld_vif->emlsr.check_tpt_wk); 273 274 return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK, 275 link_to_keep, sync); 276 } 277 278 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 279 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) 280 { 281 _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false); 282 } 283 284 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif, 285 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep) 286 { 287 return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true); 288 } 289 290 static void _iwl_mld_select_links(struct iwl_mld *mld, 291 struct ieee80211_vif *vif); 292 293 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif, 294 enum iwl_mld_emlsr_blocked reason) 295 { 296 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 297 298 lockdep_assert_wiphy(mld->wiphy); 299 300 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif)) 301 return; 302 303 if (!(mld_vif->emlsr.blocked_reasons & reason)) 304 return; 305 306 mld_vif->emlsr.blocked_reasons &= ~reason; 307 308 IWL_DEBUG_INFO(mld, 309 "Unblocking EMLSR mode. reason = %s (0x%x)\n", 310 iwl_mld_get_emlsr_blocked_string(reason), reason); 311 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons); 312 313 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT) 314 wiphy_delayed_work_queue(mld_vif->mld->wiphy, 315 &mld_vif->emlsr.check_tpt_wk, 316 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); 317 318 if (mld_vif->emlsr.blocked_reasons) 319 return; 320 321 IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n"); 322 iwl_mld_int_mlo_scan(mld, vif); 323 } 324 325 static void 326 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac, 327 struct ieee80211_vif *vif) 328 { 329 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 330 struct iwl_esr_mode_notif *notif = (void *)data; 331 332 if (!iwl_mld_vif_has_emlsr_cap(vif)) 333 return; 334 335 switch (le32_to_cpu(notif->action)) { 336 case ESR_RECOMMEND_LEAVE: 337 iwl_mld_exit_emlsr(mld_vif->mld, vif, 338 IWL_MLD_EMLSR_EXIT_FW_REQUEST, 339 iwl_mld_get_primary_link(vif)); 340 break; 341 case ESR_RECOMMEND_ENTER: 342 case ESR_FORCE_LEAVE: 343 default: 344 IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n", 345 le32_to_cpu(notif->action)); 346 } 347 } 348 349 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld, 350 struct iwl_rx_packet *pkt) 351 { 352 ieee80211_iterate_active_interfaces_mtx(mld->hw, 353 IEEE80211_IFACE_ITER_NORMAL, 354 iwl_mld_vif_iter_emlsr_mode_notif, 355 pkt->data); 356 } 357 358 static void 359 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac, 360 struct ieee80211_vif *vif) 361 { 362 if (!iwl_mld_vif_has_emlsr_cap(vif)) 363 return; 364 365 ieee80211_connection_loss(vif); 366 } 367 368 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld, 369 struct iwl_rx_packet *pkt) 370 { 371 const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data; 372 u32 fw_link_id = le32_to_cpu(notif->link_id); 373 struct ieee80211_bss_conf *bss_conf = 374 iwl_mld_fw_id_to_link_conf(mld, fw_link_id); 375 376 IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n", 377 le32_to_cpu(notif->activation) ? "enter" : "exit", 378 bss_conf ? bss_conf->link_id : -1, 379 le32_to_cpu(notif->link_id), 380 le32_to_cpu(notif->err_code)); 381 382 if (IWL_FW_CHECK(mld, !bss_conf, 383 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n", 384 le32_to_cpu(notif->activation) ? "" : "de", 385 fw_link_id)) { 386 ieee80211_iterate_active_interfaces_mtx( 387 mld->hw, IEEE80211_IFACE_ITER_NORMAL, 388 iwl_mld_vif_iter_disconnect_emlsr, NULL); 389 return; 390 } 391 392 /* Disconnect if we failed to deactivate a link */ 393 if (!le32_to_cpu(notif->activation)) { 394 ieee80211_connection_loss(bss_conf->vif); 395 return; 396 } 397 398 /* 399 * We failed to activate the second link, go back to the link specified 400 * by the firmware as that is the one that is still valid now. 401 */ 402 iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY, 403 bss_conf->link_id); 404 } 405 406 /* Active non-station link tracking */ 407 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac, 408 struct ieee80211_vif *vif) 409 { 410 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 411 int *count = _data; 412 413 if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION) 414 return; 415 416 *count += iwl_mld_count_active_links(mld_vif->mld, vif); 417 } 418 419 struct iwl_mld_update_emlsr_block_data { 420 bool block; 421 int result; 422 }; 423 424 static void 425 iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac, 426 struct ieee80211_vif *vif) 427 { 428 struct iwl_mld_update_emlsr_block_data *data = _data; 429 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 430 int ret; 431 432 if (data->block) { 433 ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif, 434 IWL_MLD_EMLSR_BLOCKED_NON_BSS, 435 iwl_mld_get_primary_link(vif)); 436 if (ret) 437 data->result = ret; 438 } else { 439 iwl_mld_unblock_emlsr(mld_vif->mld, vif, 440 IWL_MLD_EMLSR_BLOCKED_NON_BSS); 441 } 442 } 443 444 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld, 445 int pending_link_changes) 446 { 447 /* An active link of a non-station vif blocks EMLSR. Upon activation 448 * block EMLSR on the bss vif. Upon deactivation, check if this link 449 * was the last non-station link active, and if so unblock the bss vif 450 */ 451 struct iwl_mld_update_emlsr_block_data block_data = {}; 452 int count = pending_link_changes; 453 454 /* No need to count if we are activating a non-BSS link */ 455 if (count <= 0) 456 ieee80211_iterate_active_interfaces_mtx(mld->hw, 457 IEEE80211_IFACE_ITER_NORMAL, 458 iwl_mld_count_non_bss_links, 459 &count); 460 461 /* 462 * We could skip updating it if the block change did not change (and 463 * pending_link_changes is non-zero). 464 */ 465 block_data.block = !!count; 466 467 ieee80211_iterate_active_interfaces_mtx(mld->hw, 468 IEEE80211_IFACE_ITER_NORMAL, 469 iwl_mld_vif_iter_update_emlsr_non_bss_block, 470 &block_data); 471 472 return block_data.result; 473 } 474 475 #define EMLSR_SEC_LINK_MIN_PERC 10 476 #define EMLSR_MIN_TX 3000 477 #define EMLSR_MIN_RX 400 478 479 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk) 480 { 481 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 482 emlsr.check_tpt_wk.work); 483 struct ieee80211_vif *vif = 484 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 485 struct iwl_mld *mld = mld_vif->mld; 486 struct iwl_mld_sta *mld_sta; 487 struct iwl_mld_link *sec_link; 488 unsigned long total_tx = 0, total_rx = 0; 489 unsigned long sec_link_tx = 0, sec_link_rx = 0; 490 u8 sec_link_tx_perc, sec_link_rx_perc; 491 s8 sec_link_id; 492 493 if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta) 494 return; 495 496 mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta); 497 498 /* We only count for the AP sta in a MLO connection */ 499 if (!mld_sta->mpdu_counters) 500 return; 501 502 /* This wk should only run when the TPT blocker isn't set. 503 * When the blocker is set, the decision to remove it, as well as 504 * clearing the counters is done in DP (to avoid having a wk every 505 * 5 seconds when idle. When the blocker is unset, we are not idle anyway) 506 */ 507 if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT)) 508 return; 509 /* 510 * TPT is unblocked, need to check if the TPT criteria is still met. 511 * 512 * If EMLSR is active, then we also need to check the secondar link 513 * requirements. 514 */ 515 if (iwl_mld_emlsr_active(vif)) { 516 sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif)); 517 sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id); 518 if (WARN_ON_ONCE(!sec_link)) 519 return; 520 /* We need the FW ID here */ 521 sec_link_id = sec_link->fw_id; 522 } else { 523 sec_link_id = -1; 524 } 525 526 /* Sum up RX and TX MPDUs from the different queues/links */ 527 for (int q = 0; q < mld->trans->num_rx_queues; q++) { 528 struct iwl_mld_per_q_mpdu_counter *queue_counter = 529 &mld_sta->mpdu_counters[q]; 530 531 spin_lock_bh(&queue_counter->lock); 532 533 /* The link IDs that doesn't exist will contain 0 */ 534 for (int link = 0; 535 link < ARRAY_SIZE(queue_counter->per_link); 536 link++) { 537 total_tx += queue_counter->per_link[link].tx; 538 total_rx += queue_counter->per_link[link].rx; 539 } 540 541 if (sec_link_id != -1) { 542 sec_link_tx += queue_counter->per_link[sec_link_id].tx; 543 sec_link_rx += queue_counter->per_link[sec_link_id].rx; 544 } 545 546 memset(queue_counter->per_link, 0, 547 sizeof(queue_counter->per_link)); 548 549 spin_unlock_bh(&queue_counter->lock); 550 } 551 552 IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", 553 total_tx, total_rx); 554 555 /* If we don't have enough MPDUs - exit EMLSR */ 556 if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH && 557 total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) { 558 iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT, 559 iwl_mld_get_primary_link(vif)); 560 return; 561 } 562 563 /* EMLSR is not active */ 564 if (sec_link_id == -1) 565 return; 566 567 IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n", 568 sec_link_id, sec_link_tx, sec_link_rx); 569 570 /* Calculate the percentage of the secondary link TX/RX */ 571 sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0; 572 sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0; 573 574 /* 575 * The TX/RX percentage is checked only if it exceeds the required 576 * minimum. In addition, RX is checked only if the TX check failed. 577 */ 578 if ((total_tx > EMLSR_MIN_TX && 579 sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) || 580 (total_rx > EMLSR_MIN_RX && 581 sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) { 582 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE, 583 iwl_mld_get_primary_link(vif)); 584 return; 585 } 586 587 /* Check again when the next window ends */ 588 wiphy_delayed_work_queue(mld_vif->mld->wiphy, 589 &mld_vif->emlsr.check_tpt_wk, 590 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW)); 591 } 592 593 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk) 594 { 595 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif, 596 emlsr.unblock_tpt_wk); 597 struct ieee80211_vif *vif = 598 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv); 599 600 iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT); 601 } 602 603 /* 604 * Link selection 605 */ 606 607 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld, 608 const struct cfg80211_chan_def *chandef, 609 bool low) 610 { 611 if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ && 612 chandef->chan->band != NL80211_BAND_5GHZ && 613 chandef->chan->band != NL80211_BAND_6GHZ)) 614 return S8_MAX; 615 616 #define RSSI_THRESHOLD(_low, _bw) \ 617 (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \ 618 : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ 619 620 switch (chandef->width) { 621 case NL80211_CHAN_WIDTH_20_NOHT: 622 case NL80211_CHAN_WIDTH_20: 623 /* 320 MHz has the same thresholds as 20 MHz */ 624 case NL80211_CHAN_WIDTH_320: 625 return RSSI_THRESHOLD(low, 20); 626 case NL80211_CHAN_WIDTH_40: 627 return RSSI_THRESHOLD(low, 40); 628 case NL80211_CHAN_WIDTH_80: 629 return RSSI_THRESHOLD(low, 80); 630 case NL80211_CHAN_WIDTH_160: 631 return RSSI_THRESHOLD(low, 160); 632 default: 633 WARN_ON(1); 634 return S8_MAX; 635 } 636 #undef RSSI_THRESHOLD 637 } 638 639 static u32 640 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld, 641 struct ieee80211_vif *vif, 642 struct iwl_mld_link_sel_data *link, 643 bool primary) 644 { 645 struct wiphy *wiphy = mld->wiphy; 646 struct ieee80211_bss_conf *conf; 647 u32 ret = 0; 648 649 conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]); 650 if (WARN_ON_ONCE(!conf)) 651 return IWL_MLD_EMLSR_EXIT_INVALID; 652 653 if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active) 654 ret |= IWL_MLD_EMLSR_EXIT_BT_COEX; 655 656 if (link->signal < 657 iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false)) 658 ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI; 659 660 if (conf->csa_active) 661 ret |= IWL_MLD_EMLSR_EXIT_CSA; 662 663 if (ret) { 664 IWL_DEBUG_INFO(mld, 665 "Link %d is not allowed for EMLSR as %s\n", 666 link->link_id, 667 primary ? "primary" : "secondary"); 668 iwl_mld_print_emlsr_exit(mld, ret); 669 } 670 671 return ret; 672 } 673 674 static u8 675 iwl_mld_set_link_sel_data(struct iwl_mld *mld, 676 struct ieee80211_vif *vif, 677 struct iwl_mld_link_sel_data *data, 678 unsigned long usable_links, 679 u8 *best_link_idx) 680 { 681 u8 n_data = 0; 682 u16 max_grade = 0; 683 unsigned long link_id; 684 685 /* 686 * TODO: don't select links that weren't discovered in the last scan 687 * This requires mac80211 (or cfg80211) changes to forward/track when 688 * a BSS was last updated. cfg80211 already tracks this information but 689 * it is not exposed within the kernel. 690 */ 691 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { 692 struct ieee80211_bss_conf *link_conf = 693 link_conf_dereference_protected(vif, link_id); 694 695 if (WARN_ON_ONCE(!link_conf)) 696 continue; 697 698 /* Ignore any BSS that was not seen in the last MLO scan */ 699 if (ktime_before(link_conf->bss->ts_boottime, 700 mld->scan.last_mlo_scan_time)) 701 continue; 702 703 data[n_data].link_id = link_id; 704 data[n_data].chandef = &link_conf->chanreq.oper; 705 data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal); 706 data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf); 707 708 if (n_data == 0 || data[n_data].grade > max_grade) { 709 max_grade = data[n_data].grade; 710 *best_link_idx = n_data; 711 } 712 n_data++; 713 } 714 715 return n_data; 716 } 717 718 static u32 719 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx) 720 { 721 const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx); 722 723 switch (phy->chandef.width) { 724 case NL80211_CHAN_WIDTH_320: 725 case NL80211_CHAN_WIDTH_160: 726 return 5; 727 case NL80211_CHAN_WIDTH_80: 728 return 7; 729 default: 730 break; 731 } 732 return 10; 733 } 734 735 VISIBLE_IF_IWLWIFI_KUNIT bool 736 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld, 737 struct ieee80211_vif *vif, 738 const struct iwl_mld_link_sel_data *a, 739 const struct iwl_mld_link_sel_data *b) 740 { 741 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 742 struct iwl_mld_link *link_a = 743 iwl_mld_link_dereference_check(mld_vif, a->link_id); 744 struct ieee80211_chanctx_conf *chanctx_a = NULL; 745 u32 bw_a, bw_b, ratio; 746 u32 primary_load_perc; 747 748 if (!link_a || !link_a->active) { 749 IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n"); 750 return false; 751 } 752 753 chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx); 754 755 if (WARN_ON(!chanctx_a)) 756 return false; 757 758 primary_load_perc = 759 iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us; 760 761 IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc); 762 763 if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) { 764 IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n"); 765 return false; 766 } 767 768 if (iwl_mld_vif_low_latency(mld_vif)) { 769 IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n"); 770 return true; 771 } 772 773 if (a->chandef->width <= b->chandef->width) 774 return true; 775 776 bw_a = cfg80211_chandef_get_width(a->chandef); 777 bw_b = cfg80211_chandef_get_width(b->chandef); 778 ratio = bw_a / bw_b; 779 780 switch (ratio) { 781 case 2: 782 return primary_load_perc > 25; 783 case 4: 784 return primary_load_perc > 40; 785 case 8: 786 case 16: 787 return primary_load_perc > 50; 788 } 789 790 return false; 791 } 792 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr); 793 794 static bool 795 iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif, 796 struct iwl_mld_link_sel_data *a, 797 struct iwl_mld_link_sel_data *b) 798 { 799 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 800 struct iwl_mld *mld = mld_vif->mld; 801 u32 reason_mask = 0; 802 803 /* Per-link considerations */ 804 if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) || 805 iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false)) 806 return false; 807 808 if (a->chandef->chan->band == b->chandef->chan->band) { 809 const struct cfg80211_chan_def *c_low = a->chandef; 810 const struct cfg80211_chan_def *c_high = b->chandef; 811 u32 c_low_upper_edge, c_high_lower_edge; 812 813 if (c_low->chan->center_freq > c_high->chan->center_freq) 814 swap(c_low, c_high); 815 816 c_low_upper_edge = c_low->chan->center_freq + 817 cfg80211_chandef_get_width(c_low) / 2; 818 c_high_lower_edge = c_high->chan->center_freq - 819 cfg80211_chandef_get_width(c_high) / 2; 820 821 if (a->chandef->chan->band == NL80211_BAND_5GHZ && 822 c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) { 823 /* This case is fine - HW/FW can deal with it, there's 824 * enough separation between the two channels. 825 */ 826 } else { 827 reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND; 828 } 829 } 830 if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b)) 831 reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD; 832 833 if (reason_mask) { 834 IWL_DEBUG_INFO(mld, 835 "Links %d and %d are not a valid pair for EMLSR\n", 836 a->link_id, b->link_id); 837 IWL_DEBUG_INFO(mld, 838 "Links bandwidth are: %d and %d\n", 839 nl80211_chan_width_to_mhz(a->chandef->width), 840 nl80211_chan_width_to_mhz(b->chandef->width)); 841 iwl_mld_print_emlsr_exit(mld, reason_mask); 842 return false; 843 } 844 845 return true; 846 } 847 848 /* Calculation is done with fixed-point with a scaling factor of 1/256 */ 849 #define SCALE_FACTOR 256 850 851 /* 852 * Returns the combined grade of two given links. 853 * Returns 0 if EMLSR is not allowed with these 2 links. 854 */ 855 static 856 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld, 857 struct ieee80211_vif *vif, 858 struct iwl_mld_link_sel_data *a, 859 struct iwl_mld_link_sel_data *b, 860 u8 *primary_id) 861 { 862 struct ieee80211_bss_conf *primary_conf; 863 struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy; 864 unsigned int primary_load; 865 866 lockdep_assert_wiphy(wiphy); 867 868 /* a is always primary, b is always secondary */ 869 if (b->grade > a->grade) 870 swap(a, b); 871 872 *primary_id = a->link_id; 873 874 if (!iwl_mld_valid_emlsr_pair(vif, a, b)) 875 return 0; 876 877 primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]); 878 879 if (WARN_ON_ONCE(!primary_conf)) 880 return 0; 881 882 primary_load = iwl_mld_get_chan_load(mld, primary_conf); 883 884 /* The more the primary link is loaded, the more worthwhile EMLSR becomes */ 885 return a->grade + ((b->grade * primary_load) / SCALE_FACTOR); 886 } 887 888 static void _iwl_mld_select_links(struct iwl_mld *mld, 889 struct ieee80211_vif *vif) 890 { 891 struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS]; 892 struct iwl_mld_link_sel_data *best_link; 893 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 894 int max_active_links = iwl_mld_max_active_links(mld, vif); 895 u16 new_active, usable_links = ieee80211_vif_usable_links(vif); 896 u8 best_idx, new_primary, n_data; 897 u16 max_grade; 898 899 lockdep_assert_wiphy(mld->wiphy); 900 901 if (!mld_vif->authorized || hweight16(usable_links) <= 1) 902 return; 903 904 if (WARN(ktime_before(mld->scan.last_mlo_scan_time, 905 ktime_sub_ns(ktime_get_boottime_ns(), 906 5ULL * NSEC_PER_SEC)), 907 "Last MLO scan was too long ago, can't select links\n")) 908 return; 909 910 /* The logic below is simple and not suited for more than 2 links */ 911 WARN_ON_ONCE(max_active_links > 2); 912 913 n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links, 914 &best_idx); 915 916 if (WARN(!n_data, "Couldn't find a valid grade for any link!\n")) 917 return; 918 919 /* Default to selecting the single best link */ 920 best_link = &data[best_idx]; 921 new_primary = best_link->link_id; 922 new_active = BIT(best_link->link_id); 923 max_grade = best_link->grade; 924 925 /* If EMLSR is not possible, activate the best link */ 926 if (max_active_links == 1 || n_data == 1 || 927 !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE || 928 mld_vif->emlsr.blocked_reasons) 929 goto set_active; 930 931 /* Try to find the best link combination */ 932 for (u8 a = 0; a < n_data; a++) { 933 for (u8 b = a + 1; b < n_data; b++) { 934 u8 best_in_pair; 935 u16 emlsr_grade = 936 iwl_mld_get_emlsr_grade(mld, vif, 937 &data[a], &data[b], 938 &best_in_pair); 939 940 /* 941 * Prefer (new) EMLSR combination to prefer EMLSR over 942 * a single link. 943 */ 944 if (emlsr_grade < max_grade) 945 continue; 946 947 max_grade = emlsr_grade; 948 new_primary = best_in_pair; 949 new_active = BIT(data[a].link_id) | 950 BIT(data[b].link_id); 951 } 952 } 953 954 set_active: 955 IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n", 956 new_active, new_primary); 957 958 mld_vif->emlsr.selected_primary = new_primary; 959 mld_vif->emlsr.selected_links = new_active; 960 961 ieee80211_set_active_links_async(vif, new_active); 962 } 963 964 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac, 965 struct ieee80211_vif *vif) 966 { 967 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 968 struct iwl_mld *mld = mld_vif->mld; 969 970 _iwl_mld_select_links(mld, vif); 971 } 972 973 void iwl_mld_select_links(struct iwl_mld *mld) 974 { 975 ieee80211_iterate_active_interfaces_mtx(mld->hw, 976 IEEE80211_IFACE_ITER_NORMAL, 977 iwl_mld_vif_iter_select_links, 978 NULL); 979 } 980 981 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac, 982 struct ieee80211_vif *vif) 983 { 984 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 985 struct iwl_mld *mld = mld_vif->mld; 986 struct ieee80211_bss_conf *link; 987 unsigned int link_id; 988 989 if (!mld->bt_is_active) { 990 iwl_mld_retry_emlsr(mld, vif); 991 return; 992 } 993 994 /* BT is turned ON but we are not in EMLSR, nothing to do */ 995 if (!iwl_mld_emlsr_active(vif)) 996 return; 997 998 /* In EMLSR and BT is turned ON */ 999 1000 for_each_vif_active_link(vif, link, link_id) { 1001 if (WARN_ON(!link->chanreq.oper.chan)) 1002 continue; 1003 1004 if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) { 1005 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX, 1006 iwl_mld_get_primary_link(vif)); 1007 return; 1008 } 1009 } 1010 } 1011 1012 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld) 1013 { 1014 ieee80211_iterate_active_interfaces_mtx(mld->hw, 1015 IEEE80211_IFACE_ITER_NORMAL, 1016 iwl_mld_emlsr_check_bt_iter, 1017 NULL); 1018 } 1019 1020 struct iwl_mld_chan_load_data { 1021 struct iwl_mld_phy *phy; 1022 u32 prev_chan_load_not_by_us; 1023 }; 1024 1025 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac, 1026 struct ieee80211_vif *vif) 1027 { 1028 struct iwl_mld_chan_load_data *data = _data; 1029 const struct iwl_mld_phy *phy = data->phy; 1030 struct ieee80211_chanctx_conf *chanctx = 1031 container_of((const void *)phy, struct ieee80211_chanctx_conf, 1032 drv_priv); 1033 struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld; 1034 struct ieee80211_bss_conf *prim_link; 1035 unsigned int prim_link_id; 1036 1037 prim_link_id = iwl_mld_get_primary_link(vif); 1038 prim_link = link_conf_dereference_protected(vif, prim_link_id); 1039 1040 if (WARN_ON(!prim_link)) 1041 return; 1042 1043 if (chanctx != rcu_access_pointer(prim_link->chanctx_conf)) 1044 return; 1045 1046 if (iwl_mld_emlsr_active(vif)) { 1047 int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link, 1048 true); 1049 1050 if (chan_load < 0) 1051 return; 1052 1053 /* chan_load is in range [0,255] */ 1054 if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD)) 1055 iwl_mld_exit_emlsr(mld, vif, 1056 IWL_MLD_EMLSR_EXIT_CHAN_LOAD, 1057 prim_link_id); 1058 } else { 1059 u32 old_chan_load = data->prev_chan_load_not_by_us; 1060 u32 new_chan_load = phy->avg_channel_load_not_by_us; 1061 u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx); 1062 1063 #define THRESHOLD_CROSSED(threshold) \ 1064 (old_chan_load <= (threshold) && new_chan_load > (threshold)) 1065 1066 if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) || 1067 THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50)) 1068 iwl_mld_retry_emlsr(mld, vif); 1069 #undef THRESHOLD_CROSSED 1070 } 1071 } 1072 1073 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw, 1074 struct iwl_mld_phy *phy, 1075 u32 prev_chan_load_not_by_us) 1076 { 1077 struct iwl_mld_chan_load_data data = { 1078 .phy = phy, 1079 .prev_chan_load_not_by_us = prev_chan_load_not_by_us, 1080 }; 1081 1082 ieee80211_iterate_active_interfaces_mtx(hw, 1083 IEEE80211_IFACE_ITER_NORMAL, 1084 iwl_mld_chan_load_update_iter, 1085 &data); 1086 } 1087 1088 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif) 1089 { 1090 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif); 1091 1092 if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) || 1093 mld_vif->emlsr.blocked_reasons) 1094 return; 1095 1096 iwl_mld_int_mlo_scan(mld, vif); 1097 } 1098