1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5 #include "mlo.h"
6 #include "phy.h"
7
8 /* Block reasons helper */
9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
10 HOW(PREVENTION) \
11 HOW(WOWLAN) \
12 HOW(ROC) \
13 HOW(NON_BSS) \
14 HOW(TMP_NON_BSS) \
15 HOW(TPT)
16
17 static const char *
iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)18 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
19 {
20 /* Using switch without "default" will warn about missing entries */
21 switch (blocked) {
22 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
23 HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
24 #undef REASON_CASE
25 }
26
27 return "ERROR";
28 }
29
iwl_mld_print_emlsr_blocked(struct iwl_mld * mld,u32 mask)30 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
31 {
32 #define NAME_FMT(x) "%s"
33 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
34 IWL_DEBUG_INFO(mld,
35 "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
36 " (0x%x)\n",
37 HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR)
38 mask);
39 #undef NAME_FMT
40 #undef NAME_PR
41 }
42
43 /* Exit reasons helper */
44 #define HANDLE_EMLSR_EXIT_REASONS(HOW) \
45 HOW(BLOCK) \
46 HOW(MISSED_BEACON) \
47 HOW(FAIL_ENTRY) \
48 HOW(CSA) \
49 HOW(EQUAL_BAND) \
50 HOW(LOW_RSSI) \
51 HOW(LINK_USAGE) \
52 HOW(BT_COEX) \
53 HOW(CHAN_LOAD) \
54 HOW(RFI) \
55 HOW(FW_REQUEST)
56
57 static const char *
iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)58 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
59 {
60 /* Using switch without "default" will warn about missing entries */
61 switch (exit) {
62 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
63 HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
64 #undef REASON_CASE
65 }
66
67 return "ERROR";
68 }
69
iwl_mld_print_emlsr_exit(struct iwl_mld * mld,u32 mask)70 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
71 {
72 #define NAME_FMT(x) "%s"
73 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
74 IWL_DEBUG_INFO(mld,
75 "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
76 " (0x%x)\n",
77 HANDLE_EMLSR_EXIT_REASONS(NAME_PR)
78 mask);
79 #undef NAME_FMT
80 #undef NAME_PR
81 }
82
iwl_mld_emlsr_prevent_done_wk(struct wiphy * wiphy,struct wiphy_work * wk)83 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
84 {
85 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
86 emlsr.prevent_done_wk.work);
87 struct ieee80211_vif *vif =
88 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
89
90 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
91 IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
92 return;
93
94 iwl_mld_unblock_emlsr(mld_vif->mld, vif,
95 IWL_MLD_EMLSR_BLOCKED_PREVENTION);
96 }
97
iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy * wiphy,struct wiphy_work * wk)98 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
99 struct wiphy_work *wk)
100 {
101 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
102 emlsr.tmp_non_bss_done_wk.work);
103 struct ieee80211_vif *vif =
104 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
105
106 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
107 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
108 return;
109
110 iwl_mld_unblock_emlsr(mld_vif->mld, vif,
111 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
112 }
113
114 #define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
115 #define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
116
117 /* Exit reasons that can cause longer EMLSR prevention */
118 #define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
119 IWL_MLD_EMLSR_EXIT_LINK_USAGE | \
120 IWL_MLD_EMLSR_EXIT_FW_REQUEST)
121 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400)
122
123 #define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300)
124 #define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600)
125
iwl_mld_check_emlsr_prevention(struct iwl_mld * mld,struct iwl_mld_vif * mld_vif,enum iwl_mld_emlsr_exit reason)126 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
127 struct iwl_mld_vif *mld_vif,
128 enum iwl_mld_emlsr_exit reason)
129 {
130 unsigned long delay;
131
132 /*
133 * Reset the counter if more than 400 seconds have passed between one
134 * exit and the other, or if we exited due to a different reason.
135 * Will also reset the counter after the long prevention is done.
136 */
137 if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
138 IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
139 mld_vif->emlsr.last_exit_reason != reason)
140 mld_vif->emlsr.exit_repeat_count = 0;
141
142 mld_vif->emlsr.last_exit_reason = reason;
143 mld_vif->emlsr.last_exit_ts = jiffies;
144 mld_vif->emlsr.exit_repeat_count++;
145
146 /*
147 * Do not add a prevention when the reason was a block. For a block,
148 * EMLSR will be enabled again on unblock.
149 */
150 if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
151 return;
152
153 /* Set prevention for a minimum of 30 seconds */
154 mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
155 delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
156
157 /* Handle repeats for reasons that can cause long prevention */
158 if (mld_vif->emlsr.exit_repeat_count > 1 &&
159 reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
160 if (mld_vif->emlsr.exit_repeat_count == 2)
161 delay = IWL_MLD_EMLSR_PREVENT_SHORT;
162 else
163 delay = IWL_MLD_EMLSR_PREVENT_LONG;
164
165 /*
166 * The timeouts are chosen so that this will not happen, i.e.
167 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
168 */
169 WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
170 }
171
172 IWL_DEBUG_INFO(mld,
173 "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
174 delay / HZ, mld_vif->emlsr.exit_repeat_count,
175 iwl_mld_get_emlsr_exit_string(reason), reason);
176
177 wiphy_delayed_work_queue(mld->wiphy,
178 &mld_vif->emlsr.prevent_done_wk, delay);
179 }
180
iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,void * dat)181 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
182 struct ieee80211_chanctx_conf *ctx,
183 void *dat)
184 {
185 struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
186
187 /* It is ok to do it for all chanctx (and not only for the ones that
188 * belong to the EMLSR vif) since EMLSR is not allowed if there is
189 * another vif.
190 */
191 phy->avg_channel_load_not_by_us = 0;
192 }
193
_iwl_mld_exit_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_exit exit,u8 link_to_keep,bool sync)194 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
195 enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
196 bool sync)
197 {
198 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
199 u16 new_active_links;
200 int ret = 0;
201
202 lockdep_assert_wiphy(mld->wiphy);
203
204 /* On entry failure need to exit anyway, even if entered from debugfs */
205 if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
206 return 0;
207
208 /* Ignore exit request if EMLSR is not active */
209 if (!iwl_mld_emlsr_active(vif))
210 return 0;
211
212 if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
213 return 0;
214
215 if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
216 link_to_keep = __ffs(vif->active_links);
217
218 new_active_links = BIT(link_to_keep);
219 IWL_DEBUG_INFO(mld,
220 "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
221 iwl_mld_get_emlsr_exit_string(exit), exit,
222 vif->active_links, new_active_links);
223
224 if (sync)
225 ret = ieee80211_set_active_links(vif, new_active_links);
226 else
227 ieee80211_set_active_links_async(vif, new_active_links);
228
229 /* Update latest exit reason and check EMLSR prevention */
230 iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
231
232 /* channel_load_not_by_us is invalid when in EMLSR.
233 * Clear it so wrong values won't be used.
234 */
235 ieee80211_iter_chan_contexts_atomic(mld->hw,
236 iwl_mld_clear_avg_chan_load_iter,
237 NULL);
238
239 return ret;
240 }
241
iwl_mld_exit_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_exit exit,u8 link_to_keep)242 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
243 enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
244 {
245 _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
246 }
247
_iwl_mld_emlsr_block(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason,u8 link_to_keep,bool sync)248 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
249 enum iwl_mld_emlsr_blocked reason,
250 u8 link_to_keep, bool sync)
251 {
252 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
253
254 lockdep_assert_wiphy(mld->wiphy);
255
256 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
257 return 0;
258
259 if (mld_vif->emlsr.blocked_reasons & reason)
260 return 0;
261
262 mld_vif->emlsr.blocked_reasons |= reason;
263
264 IWL_DEBUG_INFO(mld,
265 "Blocking EMLSR mode. reason = %s (0x%x)\n",
266 iwl_mld_get_emlsr_blocked_string(reason), reason);
267 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
268
269 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
270 wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
271 &mld_vif->emlsr.check_tpt_wk);
272
273 return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
274 link_to_keep, sync);
275 }
276
iwl_mld_block_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason,u8 link_to_keep)277 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
278 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
279 {
280 _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
281 }
282
iwl_mld_block_emlsr_sync(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason,u8 link_to_keep)283 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
284 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
285 {
286 return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
287 }
288
289 static void _iwl_mld_select_links(struct iwl_mld *mld,
290 struct ieee80211_vif *vif);
291
iwl_mld_unblock_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason)292 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
293 enum iwl_mld_emlsr_blocked reason)
294 {
295 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
296
297 lockdep_assert_wiphy(mld->wiphy);
298
299 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
300 return;
301
302 if (!(mld_vif->emlsr.blocked_reasons & reason))
303 return;
304
305 mld_vif->emlsr.blocked_reasons &= ~reason;
306
307 IWL_DEBUG_INFO(mld,
308 "Unblocking EMLSR mode. reason = %s (0x%x)\n",
309 iwl_mld_get_emlsr_blocked_string(reason), reason);
310 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
311
312 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
313 wiphy_delayed_work_queue(mld_vif->mld->wiphy,
314 &mld_vif->emlsr.check_tpt_wk,
315 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
316
317 if (mld_vif->emlsr.blocked_reasons)
318 return;
319
320 IWL_DEBUG_INFO(mld, "EMLSR is unblocked\n");
321 iwl_mld_int_mlo_scan(mld, vif);
322 }
323
324 static void
iwl_mld_vif_iter_emlsr_mode_notif(void * data,u8 * mac,struct ieee80211_vif * vif)325 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
326 struct ieee80211_vif *vif)
327 {
328 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
329 struct iwl_esr_mode_notif *notif = (void *)data;
330
331 if (!iwl_mld_vif_has_emlsr_cap(vif))
332 return;
333
334 switch (le32_to_cpu(notif->action)) {
335 case ESR_RECOMMEND_LEAVE:
336 iwl_mld_exit_emlsr(mld_vif->mld, vif,
337 IWL_MLD_EMLSR_EXIT_FW_REQUEST,
338 iwl_mld_get_primary_link(vif));
339 break;
340 case ESR_RECOMMEND_ENTER:
341 case ESR_FORCE_LEAVE:
342 default:
343 IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
344 le32_to_cpu(notif->action));
345 }
346 }
347
iwl_mld_handle_emlsr_mode_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)348 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
349 struct iwl_rx_packet *pkt)
350 {
351 ieee80211_iterate_active_interfaces_mtx(mld->hw,
352 IEEE80211_IFACE_ITER_NORMAL,
353 iwl_mld_vif_iter_emlsr_mode_notif,
354 pkt->data);
355 }
356
357 static void
iwl_mld_vif_iter_disconnect_emlsr(void * data,u8 * mac,struct ieee80211_vif * vif)358 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
359 struct ieee80211_vif *vif)
360 {
361 if (!iwl_mld_vif_has_emlsr_cap(vif))
362 return;
363
364 ieee80211_connection_loss(vif);
365 }
366
iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)367 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
368 struct iwl_rx_packet *pkt)
369 {
370 const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
371 u32 fw_link_id = le32_to_cpu(notif->link_id);
372 struct ieee80211_bss_conf *bss_conf =
373 iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
374
375 IWL_DEBUG_INFO(mld, "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
376 le32_to_cpu(notif->activation) ? "enter" : "exit",
377 bss_conf ? bss_conf->link_id : -1,
378 le32_to_cpu(notif->link_id),
379 le32_to_cpu(notif->err_code));
380
381 if (IWL_FW_CHECK(mld, !bss_conf,
382 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
383 le32_to_cpu(notif->activation) ? "" : "de",
384 fw_link_id)) {
385 ieee80211_iterate_active_interfaces_mtx(
386 mld->hw, IEEE80211_IFACE_ITER_NORMAL,
387 iwl_mld_vif_iter_disconnect_emlsr, NULL);
388 return;
389 }
390
391 /* Disconnect if we failed to deactivate a link */
392 if (!le32_to_cpu(notif->activation)) {
393 ieee80211_connection_loss(bss_conf->vif);
394 return;
395 }
396
397 /*
398 * We failed to activate the second link, go back to the link specified
399 * by the firmware as that is the one that is still valid now.
400 */
401 iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
402 bss_conf->link_id);
403 }
404
405 /* Active non-station link tracking */
iwl_mld_count_non_bss_links(void * _data,u8 * mac,struct ieee80211_vif * vif)406 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
407 struct ieee80211_vif *vif)
408 {
409 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
410 int *count = _data;
411
412 if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
413 return;
414
415 *count += iwl_mld_count_active_links(mld_vif->mld, vif);
416 }
417
418 struct iwl_mld_update_emlsr_block_data {
419 bool block;
420 int result;
421 };
422
423 static void
iwl_mld_vif_iter_update_emlsr_non_bss_block(void * _data,u8 * mac,struct ieee80211_vif * vif)424 iwl_mld_vif_iter_update_emlsr_non_bss_block(void *_data, u8 *mac,
425 struct ieee80211_vif *vif)
426 {
427 struct iwl_mld_update_emlsr_block_data *data = _data;
428 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
429 int ret;
430
431 if (data->block) {
432 ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
433 IWL_MLD_EMLSR_BLOCKED_NON_BSS,
434 iwl_mld_get_primary_link(vif));
435 if (ret)
436 data->result = ret;
437 } else {
438 iwl_mld_unblock_emlsr(mld_vif->mld, vif,
439 IWL_MLD_EMLSR_BLOCKED_NON_BSS);
440 }
441 }
442
iwl_mld_emlsr_check_non_bss_block(struct iwl_mld * mld,int pending_link_changes)443 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
444 int pending_link_changes)
445 {
446 /* An active link of a non-station vif blocks EMLSR. Upon activation
447 * block EMLSR on the bss vif. Upon deactivation, check if this link
448 * was the last non-station link active, and if so unblock the bss vif
449 */
450 struct iwl_mld_update_emlsr_block_data block_data = {};
451 int count = pending_link_changes;
452
453 /* No need to count if we are activating a non-BSS link */
454 if (count <= 0)
455 ieee80211_iterate_active_interfaces_mtx(mld->hw,
456 IEEE80211_IFACE_ITER_NORMAL,
457 iwl_mld_count_non_bss_links,
458 &count);
459
460 /*
461 * We could skip updating it if the block change did not change (and
462 * pending_link_changes is non-zero).
463 */
464 block_data.block = !!count;
465
466 ieee80211_iterate_active_interfaces_mtx(mld->hw,
467 IEEE80211_IFACE_ITER_NORMAL,
468 iwl_mld_vif_iter_update_emlsr_non_bss_block,
469 &block_data);
470
471 return block_data.result;
472 }
473
474 #define EMLSR_SEC_LINK_MIN_PERC 10
475 #define EMLSR_MIN_TX 3000
476 #define EMLSR_MIN_RX 400
477
iwl_mld_emlsr_check_tpt(struct wiphy * wiphy,struct wiphy_work * wk)478 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
479 {
480 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
481 emlsr.check_tpt_wk.work);
482 struct ieee80211_vif *vif =
483 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
484 struct iwl_mld *mld = mld_vif->mld;
485 struct iwl_mld_sta *mld_sta;
486 struct iwl_mld_link *sec_link;
487 unsigned long total_tx = 0, total_rx = 0;
488 unsigned long sec_link_tx = 0, sec_link_rx = 0;
489 u8 sec_link_tx_perc, sec_link_rx_perc;
490 s8 sec_link_id;
491
492 if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
493 return;
494
495 mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
496
497 /* We only count for the AP sta in a MLO connection */
498 if (!mld_sta->mpdu_counters)
499 return;
500
501 /* This wk should only run when the TPT blocker isn't set.
502 * When the blocker is set, the decision to remove it, as well as
503 * clearing the counters is done in DP (to avoid having a wk every
504 * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
505 */
506 if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
507 return;
508 /*
509 * TPT is unblocked, need to check if the TPT criteria is still met.
510 *
511 * If EMLSR is active, then we also need to check the secondar link
512 * requirements.
513 */
514 if (iwl_mld_emlsr_active(vif)) {
515 sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
516 sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
517 if (WARN_ON_ONCE(!sec_link))
518 return;
519 /* We need the FW ID here */
520 sec_link_id = sec_link->fw_id;
521 } else {
522 sec_link_id = -1;
523 }
524
525 /* Sum up RX and TX MPDUs from the different queues/links */
526 for (int q = 0; q < mld->trans->num_rx_queues; q++) {
527 struct iwl_mld_per_q_mpdu_counter *queue_counter =
528 &mld_sta->mpdu_counters[q];
529
530 spin_lock_bh(&queue_counter->lock);
531
532 /* The link IDs that doesn't exist will contain 0 */
533 for (int link = 0;
534 link < ARRAY_SIZE(queue_counter->per_link);
535 link++) {
536 total_tx += queue_counter->per_link[link].tx;
537 total_rx += queue_counter->per_link[link].rx;
538 }
539
540 if (sec_link_id != -1) {
541 sec_link_tx += queue_counter->per_link[sec_link_id].tx;
542 sec_link_rx += queue_counter->per_link[sec_link_id].rx;
543 }
544
545 memset(queue_counter->per_link, 0,
546 sizeof(queue_counter->per_link));
547
548 spin_unlock_bh(&queue_counter->lock);
549 }
550
551 IWL_DEBUG_INFO(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
552 total_tx, total_rx);
553
554 /* If we don't have enough MPDUs - exit EMLSR */
555 if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
556 total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
557 iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
558 iwl_mld_get_primary_link(vif));
559 return;
560 }
561
562 /* EMLSR is not active */
563 if (sec_link_id == -1)
564 return;
565
566 IWL_DEBUG_INFO(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
567 sec_link_id, sec_link_tx, sec_link_rx);
568
569 /* Calculate the percentage of the secondary link TX/RX */
570 sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
571 sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
572
573 /*
574 * The TX/RX percentage is checked only if it exceeds the required
575 * minimum. In addition, RX is checked only if the TX check failed.
576 */
577 if ((total_tx > EMLSR_MIN_TX &&
578 sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
579 (total_rx > EMLSR_MIN_RX &&
580 sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
581 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
582 iwl_mld_get_primary_link(vif));
583 return;
584 }
585
586 /* Check again when the next window ends */
587 wiphy_delayed_work_queue(mld_vif->mld->wiphy,
588 &mld_vif->emlsr.check_tpt_wk,
589 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
590 }
591
iwl_mld_emlsr_unblock_tpt_wk(struct wiphy * wiphy,struct wiphy_work * wk)592 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
593 {
594 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
595 emlsr.unblock_tpt_wk);
596 struct ieee80211_vif *vif =
597 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
598
599 iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
600 }
601
602 /*
603 * Link selection
604 */
605
iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld * mld,const struct cfg80211_chan_def * chandef,bool low)606 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
607 const struct cfg80211_chan_def *chandef,
608 bool low)
609 {
610 if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
611 chandef->chan->band != NL80211_BAND_5GHZ &&
612 chandef->chan->band != NL80211_BAND_6GHZ))
613 return S8_MAX;
614
615 #define RSSI_THRESHOLD(_low, _bw) \
616 (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \
617 : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
618
619 switch (chandef->width) {
620 case NL80211_CHAN_WIDTH_20_NOHT:
621 case NL80211_CHAN_WIDTH_20:
622 /* 320 MHz has the same thresholds as 20 MHz */
623 case NL80211_CHAN_WIDTH_320:
624 return RSSI_THRESHOLD(low, 20);
625 case NL80211_CHAN_WIDTH_40:
626 return RSSI_THRESHOLD(low, 40);
627 case NL80211_CHAN_WIDTH_80:
628 return RSSI_THRESHOLD(low, 80);
629 case NL80211_CHAN_WIDTH_160:
630 return RSSI_THRESHOLD(low, 160);
631 default:
632 WARN_ON(1);
633 return S8_MAX;
634 }
635 #undef RSSI_THRESHOLD
636 }
637
638 static u32
iwl_mld_emlsr_disallowed_with_link(struct iwl_mld * mld,struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * link,bool primary)639 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
640 struct ieee80211_vif *vif,
641 struct iwl_mld_link_sel_data *link,
642 bool primary)
643 {
644 struct wiphy *wiphy = mld->wiphy;
645 struct ieee80211_bss_conf *conf;
646 enum iwl_mld_emlsr_exit ret = 0;
647
648 conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
649 if (WARN_ON_ONCE(!conf))
650 return false;
651
652 if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
653 ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
654
655 if (link->signal <
656 iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
657 ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
658
659 if (conf->csa_active)
660 ret |= IWL_MLD_EMLSR_EXIT_CSA;
661
662 if (ret) {
663 IWL_DEBUG_INFO(mld,
664 "Link %d is not allowed for EMLSR as %s\n",
665 link->link_id,
666 primary ? "primary" : "secondary");
667 iwl_mld_print_emlsr_exit(mld, ret);
668 }
669
670 return ret;
671 }
672
673 static u8
iwl_mld_set_link_sel_data(struct iwl_mld * mld,struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * data,unsigned long usable_links,u8 * best_link_idx)674 iwl_mld_set_link_sel_data(struct iwl_mld *mld,
675 struct ieee80211_vif *vif,
676 struct iwl_mld_link_sel_data *data,
677 unsigned long usable_links,
678 u8 *best_link_idx)
679 {
680 u8 n_data = 0;
681 u16 max_grade = 0;
682 unsigned long link_id;
683
684 /*
685 * TODO: don't select links that weren't discovered in the last scan
686 * This requires mac80211 (or cfg80211) changes to forward/track when
687 * a BSS was last updated. cfg80211 already tracks this information but
688 * it is not exposed within the kernel.
689 */
690 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
691 struct ieee80211_bss_conf *link_conf =
692 link_conf_dereference_protected(vif, link_id);
693
694 if (WARN_ON_ONCE(!link_conf))
695 continue;
696
697 /* Ignore any BSS that was not seen in the last MLO scan */
698 if (ktime_before(link_conf->bss->ts_boottime,
699 mld->scan.last_mlo_scan_time))
700 continue;
701
702 data[n_data].link_id = link_id;
703 data[n_data].chandef = &link_conf->chanreq.oper;
704 data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
705 data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
706
707 if (n_data == 0 || data[n_data].grade > max_grade) {
708 max_grade = data[n_data].grade;
709 *best_link_idx = n_data;
710 }
711 n_data++;
712 }
713
714 return n_data;
715 }
716
717 static u32
iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf * chanctx)718 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
719 {
720 const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
721
722 switch (phy->chandef.width) {
723 case NL80211_CHAN_WIDTH_320:
724 case NL80211_CHAN_WIDTH_160:
725 return 5;
726 case NL80211_CHAN_WIDTH_80:
727 return 7;
728 default:
729 break;
730 }
731 return 10;
732 }
733
734 VISIBLE_IF_IWLWIFI_KUNIT bool
iwl_mld_channel_load_allows_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,const struct iwl_mld_link_sel_data * a,const struct iwl_mld_link_sel_data * b)735 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
736 struct ieee80211_vif *vif,
737 const struct iwl_mld_link_sel_data *a,
738 const struct iwl_mld_link_sel_data *b)
739 {
740 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
741 struct iwl_mld_link *link_a =
742 iwl_mld_link_dereference_check(mld_vif, a->link_id);
743 struct ieee80211_chanctx_conf *chanctx_a = NULL;
744 u32 bw_a, bw_b, ratio;
745 u32 primary_load_perc;
746
747 if (!link_a || !link_a->active) {
748 IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
749 return false;
750 }
751
752 chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
753
754 if (WARN_ON(!chanctx_a))
755 return false;
756
757 primary_load_perc =
758 iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
759
760 IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
761
762 if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
763 IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
764 return false;
765 }
766
767 if (iwl_mld_vif_low_latency(mld_vif)) {
768 IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
769 return true;
770 }
771
772 if (a->chandef->width <= b->chandef->width)
773 return true;
774
775 bw_a = nl80211_chan_width_to_mhz(a->chandef->width);
776 bw_b = nl80211_chan_width_to_mhz(b->chandef->width);
777 ratio = bw_a / bw_b;
778
779 switch (ratio) {
780 case 2:
781 return primary_load_perc > 25;
782 case 4:
783 return primary_load_perc > 40;
784 case 8:
785 case 16:
786 return primary_load_perc > 50;
787 }
788
789 return false;
790 }
791 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_channel_load_allows_emlsr);
792
793 static bool
iwl_mld_valid_emlsr_pair(struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * a,struct iwl_mld_link_sel_data * b)794 iwl_mld_valid_emlsr_pair(struct ieee80211_vif *vif,
795 struct iwl_mld_link_sel_data *a,
796 struct iwl_mld_link_sel_data *b)
797 {
798 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
799 struct iwl_mld *mld = mld_vif->mld;
800 u32 reason_mask = 0;
801
802 /* Per-link considerations */
803 if (iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true) ||
804 iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false))
805 return false;
806
807 if (a->chandef->chan->band == b->chandef->chan->band)
808 reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
809 if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
810 reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
811
812 if (reason_mask) {
813 IWL_DEBUG_INFO(mld,
814 "Links %d and %d are not a valid pair for EMLSR\n",
815 a->link_id, b->link_id);
816 IWL_DEBUG_INFO(mld,
817 "Links bandwidth are: %d and %d\n",
818 nl80211_chan_width_to_mhz(a->chandef->width),
819 nl80211_chan_width_to_mhz(b->chandef->width));
820 iwl_mld_print_emlsr_exit(mld, reason_mask);
821 return false;
822 }
823
824 return true;
825 }
826
827 /* Calculation is done with fixed-point with a scaling factor of 1/256 */
828 #define SCALE_FACTOR 256
829
830 /*
831 * Returns the combined grade of two given links.
832 * Returns 0 if EMLSR is not allowed with these 2 links.
833 */
834 static
iwl_mld_get_emlsr_grade(struct iwl_mld * mld,struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * a,struct iwl_mld_link_sel_data * b,u8 * primary_id)835 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
836 struct ieee80211_vif *vif,
837 struct iwl_mld_link_sel_data *a,
838 struct iwl_mld_link_sel_data *b,
839 u8 *primary_id)
840 {
841 struct ieee80211_bss_conf *primary_conf;
842 struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
843 unsigned int primary_load;
844
845 lockdep_assert_wiphy(wiphy);
846
847 /* a is always primary, b is always secondary */
848 if (b->grade > a->grade)
849 swap(a, b);
850
851 *primary_id = a->link_id;
852
853 if (!iwl_mld_valid_emlsr_pair(vif, a, b))
854 return 0;
855
856 primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
857
858 if (WARN_ON_ONCE(!primary_conf))
859 return 0;
860
861 primary_load = iwl_mld_get_chan_load(mld, primary_conf);
862
863 /* The more the primary link is loaded, the more worthwhile EMLSR becomes */
864 return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
865 }
866
_iwl_mld_select_links(struct iwl_mld * mld,struct ieee80211_vif * vif)867 static void _iwl_mld_select_links(struct iwl_mld *mld,
868 struct ieee80211_vif *vif)
869 {
870 struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
871 struct iwl_mld_link_sel_data *best_link;
872 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
873 int max_active_links = iwl_mld_max_active_links(mld, vif);
874 u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
875 u8 best_idx, new_primary, n_data;
876 u16 max_grade;
877
878 lockdep_assert_wiphy(mld->wiphy);
879
880 if (!mld_vif->authorized || hweight16(usable_links) <= 1)
881 return;
882
883 if (WARN(ktime_before(mld->scan.last_mlo_scan_time,
884 ktime_sub_ns(ktime_get_boottime_ns(),
885 5ULL * NSEC_PER_SEC)),
886 "Last MLO scan was too long ago, can't select links\n"))
887 return;
888
889 /* The logic below is simple and not suited for more than 2 links */
890 WARN_ON_ONCE(max_active_links > 2);
891
892 n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
893 &best_idx);
894
895 if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
896 return;
897
898 /* Default to selecting the single best link */
899 best_link = &data[best_idx];
900 new_primary = best_link->link_id;
901 new_active = BIT(best_link->link_id);
902 max_grade = best_link->grade;
903
904 /* If EMLSR is not possible, activate the best link */
905 if (max_active_links == 1 || n_data == 1 ||
906 !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
907 mld_vif->emlsr.blocked_reasons)
908 goto set_active;
909
910 /* Try to find the best link combination */
911 for (u8 a = 0; a < n_data; a++) {
912 for (u8 b = a + 1; b < n_data; b++) {
913 u8 best_in_pair;
914 u16 emlsr_grade =
915 iwl_mld_get_emlsr_grade(mld, vif,
916 &data[a], &data[b],
917 &best_in_pair);
918
919 /*
920 * Prefer (new) EMLSR combination to prefer EMLSR over
921 * a single link.
922 */
923 if (emlsr_grade < max_grade)
924 continue;
925
926 max_grade = emlsr_grade;
927 new_primary = best_in_pair;
928 new_active = BIT(data[a].link_id) |
929 BIT(data[b].link_id);
930 }
931 }
932
933 set_active:
934 IWL_DEBUG_INFO(mld, "Link selection result: 0x%x. Primary = %d\n",
935 new_active, new_primary);
936
937 mld_vif->emlsr.selected_primary = new_primary;
938 mld_vif->emlsr.selected_links = new_active;
939
940 ieee80211_set_active_links_async(vif, new_active);
941 }
942
iwl_mld_vif_iter_select_links(void * _data,u8 * mac,struct ieee80211_vif * vif)943 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
944 struct ieee80211_vif *vif)
945 {
946 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
947 struct iwl_mld *mld = mld_vif->mld;
948
949 _iwl_mld_select_links(mld, vif);
950 }
951
iwl_mld_select_links(struct iwl_mld * mld)952 void iwl_mld_select_links(struct iwl_mld *mld)
953 {
954 ieee80211_iterate_active_interfaces_mtx(mld->hw,
955 IEEE80211_IFACE_ITER_NORMAL,
956 iwl_mld_vif_iter_select_links,
957 NULL);
958 }
959
iwl_mld_emlsr_check_bt_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)960 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
961 struct ieee80211_vif *vif)
962 {
963 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
964 struct iwl_mld *mld = mld_vif->mld;
965 struct ieee80211_bss_conf *link;
966 unsigned int link_id;
967
968 if (!mld->bt_is_active) {
969 iwl_mld_retry_emlsr(mld, vif);
970 return;
971 }
972
973 /* BT is turned ON but we are not in EMLSR, nothing to do */
974 if (!iwl_mld_emlsr_active(vif))
975 return;
976
977 /* In EMLSR and BT is turned ON */
978
979 for_each_vif_active_link(vif, link, link_id) {
980 if (WARN_ON(!link->chanreq.oper.chan))
981 continue;
982
983 if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
984 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
985 iwl_mld_get_primary_link(vif));
986 return;
987 }
988 }
989 }
990
iwl_mld_emlsr_check_bt(struct iwl_mld * mld)991 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
992 {
993 ieee80211_iterate_active_interfaces_mtx(mld->hw,
994 IEEE80211_IFACE_ITER_NORMAL,
995 iwl_mld_emlsr_check_bt_iter,
996 NULL);
997 }
998
999 struct iwl_mld_chan_load_data {
1000 struct iwl_mld_phy *phy;
1001 u32 prev_chan_load_not_by_us;
1002 };
1003
iwl_mld_chan_load_update_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)1004 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
1005 struct ieee80211_vif *vif)
1006 {
1007 struct iwl_mld_chan_load_data *data = _data;
1008 const struct iwl_mld_phy *phy = data->phy;
1009 struct ieee80211_chanctx_conf *chanctx =
1010 container_of((const void *)phy, struct ieee80211_chanctx_conf,
1011 drv_priv);
1012 struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
1013 struct ieee80211_bss_conf *prim_link;
1014 unsigned int prim_link_id;
1015
1016 prim_link_id = iwl_mld_get_primary_link(vif);
1017 prim_link = link_conf_dereference_protected(vif, prim_link_id);
1018
1019 if (WARN_ON(!prim_link))
1020 return;
1021
1022 if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
1023 return;
1024
1025 if (iwl_mld_emlsr_active(vif)) {
1026 int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
1027 true);
1028
1029 if (chan_load < 0)
1030 return;
1031
1032 /* chan_load is in range [0,255] */
1033 if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
1034 iwl_mld_exit_emlsr(mld, vif,
1035 IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
1036 prim_link_id);
1037 } else {
1038 u32 old_chan_load = data->prev_chan_load_not_by_us;
1039 u32 new_chan_load = phy->avg_channel_load_not_by_us;
1040 u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
1041
1042 #define THRESHOLD_CROSSED(threshold) \
1043 (old_chan_load <= (threshold) && new_chan_load > (threshold))
1044
1045 if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
1046 THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
1047 iwl_mld_retry_emlsr(mld, vif);
1048 #undef THRESHOLD_CROSSED
1049 }
1050 }
1051
iwl_mld_emlsr_check_chan_load(struct ieee80211_hw * hw,struct iwl_mld_phy * phy,u32 prev_chan_load_not_by_us)1052 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
1053 struct iwl_mld_phy *phy,
1054 u32 prev_chan_load_not_by_us)
1055 {
1056 struct iwl_mld_chan_load_data data = {
1057 .phy = phy,
1058 .prev_chan_load_not_by_us = prev_chan_load_not_by_us,
1059 };
1060
1061 ieee80211_iterate_active_interfaces_mtx(hw,
1062 IEEE80211_IFACE_ITER_NORMAL,
1063 iwl_mld_chan_load_update_iter,
1064 &data);
1065 }
1066
iwl_mld_retry_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif)1067 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
1068 {
1069 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1070
1071 if (!iwl_mld_vif_has_emlsr_cap(vif) || iwl_mld_emlsr_active(vif) ||
1072 mld_vif->emlsr.blocked_reasons)
1073 return;
1074
1075 iwl_mld_int_mlo_scan(mld, vif);
1076 }
1077