1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5 #include "mlo.h"
6 #include "phy.h"
7
8 /* Block reasons helper */
9 #define HANDLE_EMLSR_BLOCKED_REASONS(HOW) \
10 HOW(PREVENTION) \
11 HOW(WOWLAN) \
12 HOW(ROC) \
13 HOW(NON_BSS) \
14 HOW(TMP_NON_BSS) \
15 HOW(TPT) \
16 HOW(NAN)
17
18 static const char *
iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)19 iwl_mld_get_emlsr_blocked_string(enum iwl_mld_emlsr_blocked blocked)
20 {
21 /* Using switch without "default" will warn about missing entries */
22 switch (blocked) {
23 #define REASON_CASE(x) case IWL_MLD_EMLSR_BLOCKED_##x: return #x;
24 HANDLE_EMLSR_BLOCKED_REASONS(REASON_CASE)
25 #undef REASON_CASE
26 }
27
28 return "ERROR";
29 }
30
iwl_mld_print_emlsr_blocked(struct iwl_mld * mld,u32 mask)31 static void iwl_mld_print_emlsr_blocked(struct iwl_mld *mld, u32 mask)
32 {
33 #define NAME_FMT(x) "%s"
34 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_BLOCKED_##x) ? "[" #x "]" : "",
35 IWL_DEBUG_EHT(mld,
36 "EMLSR blocked = " HANDLE_EMLSR_BLOCKED_REASONS(NAME_FMT)
37 " (0x%x)\n", HANDLE_EMLSR_BLOCKED_REASONS(NAME_PR) mask);
38 #undef NAME_FMT
39 #undef NAME_PR
40 }
41
42 /* Exit reasons helper */
43 #define HANDLE_EMLSR_EXIT_REASONS(HOW) \
44 HOW(BLOCK) \
45 HOW(MISSED_BEACON) \
46 HOW(FAIL_ENTRY) \
47 HOW(CSA) \
48 HOW(EQUAL_BAND) \
49 HOW(LOW_RSSI) \
50 HOW(LINK_USAGE) \
51 HOW(BT_COEX) \
52 HOW(CHAN_LOAD) \
53 HOW(RFI) \
54 HOW(FW_REQUEST) \
55 HOW(INVALID)
56
57 static const char *
iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)58 iwl_mld_get_emlsr_exit_string(enum iwl_mld_emlsr_exit exit)
59 {
60 /* Using switch without "default" will warn about missing entries */
61 switch (exit) {
62 #define REASON_CASE(x) case IWL_MLD_EMLSR_EXIT_##x: return #x;
63 HANDLE_EMLSR_EXIT_REASONS(REASON_CASE)
64 #undef REASON_CASE
65 }
66
67 return "ERROR";
68 }
69
iwl_mld_print_emlsr_exit(struct iwl_mld * mld,u32 mask)70 static void iwl_mld_print_emlsr_exit(struct iwl_mld *mld, u32 mask)
71 {
72 #define NAME_FMT(x) "%s"
73 #define NAME_PR(x) (mask & IWL_MLD_EMLSR_EXIT_##x) ? "[" #x "]" : "",
74 IWL_DEBUG_EHT(mld,
75 "EMLSR exit = " HANDLE_EMLSR_EXIT_REASONS(NAME_FMT)
76 " (0x%x)\n", HANDLE_EMLSR_EXIT_REASONS(NAME_PR) mask);
77 #undef NAME_FMT
78 #undef NAME_PR
79 }
80
iwl_mld_emlsr_prevent_done_wk(struct wiphy * wiphy,struct wiphy_work * wk)81 void iwl_mld_emlsr_prevent_done_wk(struct wiphy *wiphy, struct wiphy_work *wk)
82 {
83 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
84 emlsr.prevent_done_wk.work);
85 struct ieee80211_vif *vif =
86 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
87
88 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
89 IWL_MLD_EMLSR_BLOCKED_PREVENTION)))
90 return;
91
92 iwl_mld_unblock_emlsr(mld_vif->mld, vif,
93 IWL_MLD_EMLSR_BLOCKED_PREVENTION);
94 }
95
iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy * wiphy,struct wiphy_work * wk)96 void iwl_mld_emlsr_tmp_non_bss_done_wk(struct wiphy *wiphy,
97 struct wiphy_work *wk)
98 {
99 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
100 emlsr.tmp_non_bss_done_wk.work);
101 struct ieee80211_vif *vif =
102 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
103
104 if (WARN_ON(!(mld_vif->emlsr.blocked_reasons &
105 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS)))
106 return;
107
108 iwl_mld_unblock_emlsr(mld_vif->mld, vif,
109 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS);
110 }
111
112 #define IWL_MLD_TRIGGER_LINK_SEL_TIME (HZ * IWL_MLD_TRIGGER_LINK_SEL_TIME_SEC)
113 #define IWL_MLD_SCAN_EXPIRE_TIME (HZ * IWL_MLD_SCAN_EXPIRE_TIME_SEC)
114
115 /* Exit reasons that can cause longer EMLSR prevention */
116 #define IWL_MLD_PREVENT_EMLSR_REASONS (IWL_MLD_EMLSR_EXIT_MISSED_BEACON | \
117 IWL_MLD_EMLSR_EXIT_LINK_USAGE | \
118 IWL_MLD_EMLSR_EXIT_FW_REQUEST)
119 #define IWL_MLD_PREVENT_EMLSR_TIMEOUT (HZ * 400)
120
121 #define IWL_MLD_EMLSR_PREVENT_SHORT (HZ * 300)
122 #define IWL_MLD_EMLSR_PREVENT_LONG (HZ * 600)
123
iwl_mld_check_emlsr_prevention(struct iwl_mld * mld,struct iwl_mld_vif * mld_vif,enum iwl_mld_emlsr_exit reason)124 static void iwl_mld_check_emlsr_prevention(struct iwl_mld *mld,
125 struct iwl_mld_vif *mld_vif,
126 enum iwl_mld_emlsr_exit reason)
127 {
128 unsigned long delay;
129
130 /*
131 * Reset the counter if more than 400 seconds have passed between one
132 * exit and the other, or if we exited due to a different reason.
133 * Will also reset the counter after the long prevention is done.
134 */
135 if (time_after(jiffies, mld_vif->emlsr.last_exit_ts +
136 IWL_MLD_PREVENT_EMLSR_TIMEOUT) ||
137 mld_vif->emlsr.last_exit_reason != reason)
138 mld_vif->emlsr.exit_repeat_count = 0;
139
140 mld_vif->emlsr.last_exit_reason = reason;
141 mld_vif->emlsr.last_exit_ts = jiffies;
142 mld_vif->emlsr.exit_repeat_count++;
143
144 /*
145 * Do not add a prevention when the reason was a block. For a block,
146 * EMLSR will be enabled again on unblock.
147 */
148 if (reason == IWL_MLD_EMLSR_EXIT_BLOCK)
149 return;
150
151 /* Set prevention for a minimum of 30 seconds */
152 mld_vif->emlsr.blocked_reasons |= IWL_MLD_EMLSR_BLOCKED_PREVENTION;
153 delay = IWL_MLD_TRIGGER_LINK_SEL_TIME;
154
155 /* Handle repeats for reasons that can cause long prevention */
156 if (mld_vif->emlsr.exit_repeat_count > 1 &&
157 reason & IWL_MLD_PREVENT_EMLSR_REASONS) {
158 if (mld_vif->emlsr.exit_repeat_count == 2)
159 delay = IWL_MLD_EMLSR_PREVENT_SHORT;
160 else
161 delay = IWL_MLD_EMLSR_PREVENT_LONG;
162
163 /*
164 * The timeouts are chosen so that this will not happen, i.e.
165 * IWL_MLD_EMLSR_PREVENT_LONG > IWL_MLD_PREVENT_EMLSR_TIMEOUT
166 */
167 WARN_ON(mld_vif->emlsr.exit_repeat_count > 3);
168 }
169
170 IWL_DEBUG_EHT(mld,
171 "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
172 delay / HZ, mld_vif->emlsr.exit_repeat_count,
173 iwl_mld_get_emlsr_exit_string(reason), reason);
174
175 wiphy_delayed_work_queue(mld->wiphy,
176 &mld_vif->emlsr.prevent_done_wk, delay);
177 }
178
iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,void * dat)179 static void iwl_mld_clear_avg_chan_load_iter(struct ieee80211_hw *hw,
180 struct ieee80211_chanctx_conf *ctx,
181 void *dat)
182 {
183 struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(ctx);
184
185 /* It is ok to do it for all chanctx (and not only for the ones that
186 * belong to the EMLSR vif) since EMLSR is not allowed if there is
187 * another vif.
188 */
189 phy->avg_channel_load_not_by_us = 0;
190 }
191
_iwl_mld_exit_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_exit exit,u8 link_to_keep,bool sync)192 static int _iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
193 enum iwl_mld_emlsr_exit exit, u8 link_to_keep,
194 bool sync)
195 {
196 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
197 u16 new_active_links;
198 int ret = 0;
199
200 lockdep_assert_wiphy(mld->wiphy);
201
202 /* On entry failure need to exit anyway, even if entered from debugfs */
203 if (exit != IWL_MLD_EMLSR_EXIT_FAIL_ENTRY && !IWL_MLD_AUTO_EML_ENABLE)
204 return 0;
205
206 /* Ignore exit request if EMLSR is not active */
207 if (!iwl_mld_emlsr_active(vif))
208 return 0;
209
210 if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mld_vif->authorized))
211 return 0;
212
213 if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
214 link_to_keep = __ffs(vif->active_links);
215
216 new_active_links = BIT(link_to_keep);
217 IWL_DEBUG_EHT(mld,
218 "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
219 iwl_mld_get_emlsr_exit_string(exit), exit,
220 vif->active_links, new_active_links);
221
222 if (sync)
223 ret = ieee80211_set_active_links(vif, new_active_links);
224 else
225 ieee80211_set_active_links_async(vif, new_active_links);
226
227 /* Update latest exit reason and check EMLSR prevention */
228 iwl_mld_check_emlsr_prevention(mld, mld_vif, exit);
229
230 /* channel_load_not_by_us is invalid when in EMLSR.
231 * Clear it so wrong values won't be used.
232 */
233 ieee80211_iter_chan_contexts_atomic(mld->hw,
234 iwl_mld_clear_avg_chan_load_iter,
235 NULL);
236
237 return ret;
238 }
239
iwl_mld_exit_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_exit exit,u8 link_to_keep)240 void iwl_mld_exit_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
241 enum iwl_mld_emlsr_exit exit, u8 link_to_keep)
242 {
243 _iwl_mld_exit_emlsr(mld, vif, exit, link_to_keep, false);
244 }
245
_iwl_mld_emlsr_block(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason,u8 link_to_keep,bool sync)246 static int _iwl_mld_emlsr_block(struct iwl_mld *mld, struct ieee80211_vif *vif,
247 enum iwl_mld_emlsr_blocked reason,
248 u8 link_to_keep, bool sync)
249 {
250 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
251
252 lockdep_assert_wiphy(mld->wiphy);
253
254 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
255 return 0;
256
257 if (mld_vif->emlsr.blocked_reasons & reason)
258 return 0;
259
260 mld_vif->emlsr.blocked_reasons |= reason;
261
262 IWL_DEBUG_EHT(mld, "Blocking EMLSR mode. reason = %s (0x%x)\n",
263 iwl_mld_get_emlsr_blocked_string(reason), reason);
264 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
265
266 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
267 wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
268 &mld_vif->emlsr.check_tpt_wk);
269
270 return _iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BLOCK,
271 link_to_keep, sync);
272 }
273
iwl_mld_block_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason,u8 link_to_keep)274 void iwl_mld_block_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
275 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
276 {
277 _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, false);
278 }
279
iwl_mld_block_emlsr_sync(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason,u8 link_to_keep)280 int iwl_mld_block_emlsr_sync(struct iwl_mld *mld, struct ieee80211_vif *vif,
281 enum iwl_mld_emlsr_blocked reason, u8 link_to_keep)
282 {
283 return _iwl_mld_emlsr_block(mld, vif, reason, link_to_keep, true);
284 }
285
286 #define IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT (10 * HZ)
287
iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void * _data,u8 * mac,struct ieee80211_vif * vif)288 static void iwl_mld_vif_iter_emlsr_block_tmp_non_bss(void *_data, u8 *mac,
289 struct ieee80211_vif *vif)
290 {
291 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
292 int ret;
293
294 if (!iwl_mld_vif_has_emlsr_cap(vif))
295 return;
296
297 ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
298 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS,
299 iwl_mld_get_primary_link(vif));
300 if (ret)
301 return;
302
303 wiphy_delayed_work_queue(mld_vif->mld->wiphy,
304 &mld_vif->emlsr.tmp_non_bss_done_wk,
305 IWL_MLD_EMLSR_BLOCKED_TMP_NON_BSS_TIMEOUT);
306 }
307
iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld * mld)308 void iwl_mld_emlsr_block_tmp_non_bss(struct iwl_mld *mld)
309 {
310 ieee80211_iterate_active_interfaces_mtx(mld->hw,
311 IEEE80211_IFACE_ITER_NORMAL,
312 iwl_mld_vif_iter_emlsr_block_tmp_non_bss,
313 NULL);
314 }
315
316 static void _iwl_mld_select_links(struct iwl_mld *mld,
317 struct ieee80211_vif *vif);
318
iwl_mld_unblock_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,enum iwl_mld_emlsr_blocked reason)319 void iwl_mld_unblock_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif,
320 enum iwl_mld_emlsr_blocked reason)
321 {
322 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
323
324 lockdep_assert_wiphy(mld->wiphy);
325
326 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif))
327 return;
328
329 if (!(mld_vif->emlsr.blocked_reasons & reason))
330 return;
331
332 mld_vif->emlsr.blocked_reasons &= ~reason;
333
334 IWL_DEBUG_EHT(mld, "Unblocking EMLSR mode. reason = %s (0x%x)\n",
335 iwl_mld_get_emlsr_blocked_string(reason), reason);
336 iwl_mld_print_emlsr_blocked(mld, mld_vif->emlsr.blocked_reasons);
337
338 if (reason == IWL_MLD_EMLSR_BLOCKED_TPT)
339 wiphy_delayed_work_queue(mld_vif->mld->wiphy,
340 &mld_vif->emlsr.check_tpt_wk,
341 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
342
343 if (mld_vif->emlsr.blocked_reasons)
344 return;
345
346 IWL_DEBUG_EHT(mld, "EMLSR is unblocked\n");
347 iwl_mld_int_mlo_scan(mld, vif);
348 }
349
350 static void
iwl_mld_vif_iter_emlsr_mode_notif(void * data,u8 * mac,struct ieee80211_vif * vif)351 iwl_mld_vif_iter_emlsr_mode_notif(void *data, u8 *mac,
352 struct ieee80211_vif *vif)
353 {
354 const struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
355 const struct iwl_esr_mode_notif *notif = (void *)data;
356 enum iwl_mvm_fw_esr_recommendation action = le32_to_cpu(notif->action);
357
358 if (!iwl_mld_vif_has_emlsr_cap(vif))
359 return;
360
361 switch (action) {
362 case ESR_RECOMMEND_LEAVE:
363 IWL_DEBUG_EHT(mld_vif->mld,
364 "FW recommend leave reason = 0x%x\n",
365 le32_to_cpu(notif->leave_reason_mask));
366
367 iwl_mld_exit_emlsr(mld_vif->mld, vif,
368 IWL_MLD_EMLSR_EXIT_FW_REQUEST,
369 iwl_mld_get_primary_link(vif));
370 break;
371 case ESR_FORCE_LEAVE:
372 IWL_DEBUG_EHT(mld_vif->mld, "FW force leave reason = 0x%x\n",
373 le32_to_cpu(notif->leave_reason_mask));
374 fallthrough;
375 case ESR_RECOMMEND_ENTER:
376 default:
377 IWL_WARN(mld_vif->mld, "Unexpected EMLSR notification: %d\n",
378 action);
379 }
380 }
381
iwl_mld_handle_emlsr_mode_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)382 void iwl_mld_handle_emlsr_mode_notif(struct iwl_mld *mld,
383 struct iwl_rx_packet *pkt)
384 {
385 ieee80211_iterate_active_interfaces_mtx(mld->hw,
386 IEEE80211_IFACE_ITER_NORMAL,
387 iwl_mld_vif_iter_emlsr_mode_notif,
388 pkt->data);
389 }
390
391 static void
iwl_mld_vif_iter_disconnect_emlsr(void * data,u8 * mac,struct ieee80211_vif * vif)392 iwl_mld_vif_iter_disconnect_emlsr(void *data, u8 *mac,
393 struct ieee80211_vif *vif)
394 {
395 if (!iwl_mld_vif_has_emlsr_cap(vif))
396 return;
397
398 ieee80211_connection_loss(vif);
399 }
400
iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)401 void iwl_mld_handle_emlsr_trans_fail_notif(struct iwl_mld *mld,
402 struct iwl_rx_packet *pkt)
403 {
404 const struct iwl_esr_trans_fail_notif *notif = (const void *)pkt->data;
405 u32 fw_link_id = le32_to_cpu(notif->link_id);
406 struct ieee80211_bss_conf *bss_conf =
407 iwl_mld_fw_id_to_link_conf(mld, fw_link_id);
408
409 IWL_DEBUG_EHT(mld,
410 "Failed to %s EMLSR on link %d (FW: %d), reason %d\n",
411 le32_to_cpu(notif->activation) ? "enter" : "exit",
412 bss_conf ? bss_conf->link_id : -1,
413 le32_to_cpu(notif->link_id),
414 le32_to_cpu(notif->err_code));
415
416 if (IWL_FW_CHECK(mld, !bss_conf,
417 "FW reported failure to %sactivate EMLSR on a non-existing link: %d\n",
418 le32_to_cpu(notif->activation) ? "" : "de",
419 fw_link_id)) {
420 ieee80211_iterate_active_interfaces_mtx(
421 mld->hw, IEEE80211_IFACE_ITER_NORMAL,
422 iwl_mld_vif_iter_disconnect_emlsr, NULL);
423 return;
424 }
425
426 /* Disconnect if we failed to deactivate a link */
427 if (!le32_to_cpu(notif->activation)) {
428 ieee80211_connection_loss(bss_conf->vif);
429 return;
430 }
431
432 /*
433 * We failed to activate the second link, go back to the link specified
434 * by the firmware as that is the one that is still valid now.
435 */
436 iwl_mld_exit_emlsr(mld, bss_conf->vif, IWL_MLD_EMLSR_EXIT_FAIL_ENTRY,
437 bss_conf->link_id);
438 }
439
440 /* Active non-station link tracking */
iwl_mld_count_non_bss_links(void * _data,u8 * mac,struct ieee80211_vif * vif)441 static void iwl_mld_count_non_bss_links(void *_data, u8 *mac,
442 struct ieee80211_vif *vif)
443 {
444 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
445 int *count = _data;
446
447 if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
448 return;
449
450 *count += iwl_mld_count_active_links(mld_vif->mld, vif);
451 }
452
453 struct iwl_mld_update_emlsr_block_data {
454 bool block;
455 enum iwl_mld_emlsr_blocked reason;
456 int result;
457 };
458
459 static void
iwl_mld_vif_iter_update_emlsr_block(void * _data,u8 * mac,struct ieee80211_vif * vif)460 iwl_mld_vif_iter_update_emlsr_block(void *_data, u8 *mac,
461 struct ieee80211_vif *vif)
462 {
463 struct iwl_mld_update_emlsr_block_data *data = _data;
464 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
465 int ret;
466
467 if (!iwl_mld_vif_has_emlsr_cap(vif))
468 return;
469
470 if (data->block) {
471 ret = iwl_mld_block_emlsr_sync(mld_vif->mld, vif,
472 data->reason,
473 iwl_mld_get_primary_link(vif));
474 if (ret)
475 data->result = ret;
476 } else {
477 iwl_mld_unblock_emlsr(mld_vif->mld, vif,
478 data->reason);
479 }
480 }
481
iwl_mld_update_emlsr_block(struct iwl_mld * mld,bool block,enum iwl_mld_emlsr_blocked reason)482 int iwl_mld_update_emlsr_block(struct iwl_mld *mld, bool block,
483 enum iwl_mld_emlsr_blocked reason)
484 {
485 struct iwl_mld_update_emlsr_block_data block_data = {
486 .block = block,
487 .reason = reason,
488 };
489
490 ieee80211_iterate_active_interfaces_mtx(mld->hw,
491 IEEE80211_IFACE_ITER_NORMAL,
492 iwl_mld_vif_iter_update_emlsr_block,
493 &block_data);
494
495 return block_data.result;
496 }
497
iwl_mld_emlsr_check_non_bss_block(struct iwl_mld * mld,int pending_link_changes)498 int iwl_mld_emlsr_check_non_bss_block(struct iwl_mld *mld,
499 int pending_link_changes)
500 {
501 /* An active link of a non-station vif blocks EMLSR. Upon activation
502 * block EMLSR on the bss vif. Upon deactivation, check if this link
503 * was the last non-station link active, and if so unblock the bss vif
504 */
505 int count = pending_link_changes;
506
507 /* No need to count if we are activating a non-BSS link */
508 if (count <= 0)
509 ieee80211_iterate_active_interfaces_mtx(mld->hw,
510 IEEE80211_IFACE_ITER_NORMAL,
511 iwl_mld_count_non_bss_links,
512 &count);
513
514 /*
515 * We could skip updating it if the block change did not change (and
516 * pending_link_changes is non-zero).
517 */
518 return iwl_mld_update_emlsr_block(mld, !!count,
519 IWL_MLD_EMLSR_BLOCKED_NON_BSS);
520 }
521
522 #define EMLSR_SEC_LINK_MIN_PERC 10
523 #define EMLSR_MIN_TX 3000
524 #define EMLSR_MIN_RX 400
525
iwl_mld_emlsr_check_tpt(struct wiphy * wiphy,struct wiphy_work * wk)526 void iwl_mld_emlsr_check_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
527 {
528 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
529 emlsr.check_tpt_wk.work);
530 struct ieee80211_vif *vif =
531 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
532 struct iwl_mld *mld = mld_vif->mld;
533 struct iwl_mld_sta *mld_sta;
534 struct iwl_mld_link *sec_link;
535 unsigned long total_tx = 0, total_rx = 0;
536 unsigned long sec_link_tx = 0, sec_link_rx = 0;
537 u8 sec_link_tx_perc, sec_link_rx_perc;
538 s8 sec_link_id;
539
540 if (!iwl_mld_vif_has_emlsr_cap(vif) || !mld_vif->ap_sta)
541 return;
542
543 mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
544
545 /* We only count for the AP sta in a MLO connection */
546 if (!mld_sta->mpdu_counters)
547 return;
548
549 /* This wk should only run when the TPT blocker isn't set.
550 * When the blocker is set, the decision to remove it, as well as
551 * clearing the counters is done in DP (to avoid having a wk every
552 * 5 seconds when idle. When the blocker is unset, we are not idle anyway)
553 */
554 if (WARN_ON(mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT))
555 return;
556 /*
557 * TPT is unblocked, need to check if the TPT criteria is still met.
558 *
559 * If EMLSR is active for at least 5 seconds, then we also
560 * need to check the secondary link requirements.
561 */
562 if (iwl_mld_emlsr_active(vif) &&
563 time_is_before_jiffies(mld_vif->emlsr.last_entry_ts +
564 IWL_MLD_TPT_COUNT_WINDOW)) {
565 sec_link_id = iwl_mld_get_other_link(vif, iwl_mld_get_primary_link(vif));
566 sec_link = iwl_mld_link_dereference_check(mld_vif, sec_link_id);
567 if (WARN_ON_ONCE(!sec_link))
568 return;
569 /* We need the FW ID here */
570 sec_link_id = sec_link->fw_id;
571 } else {
572 sec_link_id = -1;
573 }
574
575 /* Sum up RX and TX MPDUs from the different queues/links */
576 for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
577 struct iwl_mld_per_q_mpdu_counter *queue_counter =
578 &mld_sta->mpdu_counters[q];
579
580 spin_lock_bh(&queue_counter->lock);
581
582 /* The link IDs that doesn't exist will contain 0 */
583 for (int link = 0;
584 link < ARRAY_SIZE(queue_counter->per_link);
585 link++) {
586 total_tx += queue_counter->per_link[link].tx;
587 total_rx += queue_counter->per_link[link].rx;
588 }
589
590 if (sec_link_id != -1) {
591 sec_link_tx += queue_counter->per_link[sec_link_id].tx;
592 sec_link_rx += queue_counter->per_link[sec_link_id].rx;
593 }
594
595 memset(queue_counter->per_link, 0,
596 sizeof(queue_counter->per_link));
597
598 spin_unlock_bh(&queue_counter->lock);
599 }
600
601 IWL_DEBUG_EHT(mld, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
602 total_tx, total_rx);
603
604 /* If we don't have enough MPDUs - exit EMLSR */
605 if (total_tx < IWL_MLD_ENTER_EMLSR_TPT_THRESH &&
606 total_rx < IWL_MLD_ENTER_EMLSR_TPT_THRESH) {
607 iwl_mld_block_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT,
608 iwl_mld_get_primary_link(vif));
609 return;
610 }
611
612 /* EMLSR is not active */
613 if (sec_link_id == -1)
614 goto schedule;
615
616 IWL_DEBUG_EHT(mld, "Secondary Link %d: Tx MPDUs: %ld. Rx MPDUs: %ld\n",
617 sec_link_id, sec_link_tx, sec_link_rx);
618
619 /* Calculate the percentage of the secondary link TX/RX */
620 sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
621 sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
622
623 /*
624 * The TX/RX percentage is checked only if it exceeds the required
625 * minimum. In addition, RX is checked only if the TX check failed.
626 */
627 if ((total_tx > EMLSR_MIN_TX &&
628 sec_link_tx_perc < EMLSR_SEC_LINK_MIN_PERC) ||
629 (total_rx > EMLSR_MIN_RX &&
630 sec_link_rx_perc < EMLSR_SEC_LINK_MIN_PERC)) {
631 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_LINK_USAGE,
632 iwl_mld_get_primary_link(vif));
633 return;
634 }
635
636 schedule:
637 /* Check again when the next window ends */
638 wiphy_delayed_work_queue(mld_vif->mld->wiphy,
639 &mld_vif->emlsr.check_tpt_wk,
640 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
641 }
642
iwl_mld_emlsr_unblock_tpt_wk(struct wiphy * wiphy,struct wiphy_work * wk)643 void iwl_mld_emlsr_unblock_tpt_wk(struct wiphy *wiphy, struct wiphy_work *wk)
644 {
645 struct iwl_mld_vif *mld_vif = container_of(wk, struct iwl_mld_vif,
646 emlsr.unblock_tpt_wk);
647 struct ieee80211_vif *vif =
648 container_of((void *)mld_vif, struct ieee80211_vif, drv_priv);
649
650 iwl_mld_unblock_emlsr(mld_vif->mld, vif, IWL_MLD_EMLSR_BLOCKED_TPT);
651 }
652
653 /*
654 * Link selection
655 */
656
iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld * mld,const struct cfg80211_chan_def * chandef,bool low)657 s8 iwl_mld_get_emlsr_rssi_thresh(struct iwl_mld *mld,
658 const struct cfg80211_chan_def *chandef,
659 bool low)
660 {
661 if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
662 chandef->chan->band != NL80211_BAND_5GHZ &&
663 chandef->chan->band != NL80211_BAND_6GHZ))
664 return S8_MAX;
665
666 #define RSSI_THRESHOLD(_low, _bw) \
667 (_low) ? IWL_MLD_LOW_RSSI_THRESH_##_bw##MHZ \
668 : IWL_MLD_HIGH_RSSI_THRESH_##_bw##MHZ
669
670 switch (chandef->width) {
671 case NL80211_CHAN_WIDTH_20_NOHT:
672 case NL80211_CHAN_WIDTH_20:
673 /* 320 MHz has the same thresholds as 20 MHz */
674 case NL80211_CHAN_WIDTH_320:
675 return RSSI_THRESHOLD(low, 20);
676 case NL80211_CHAN_WIDTH_40:
677 return RSSI_THRESHOLD(low, 40);
678 case NL80211_CHAN_WIDTH_80:
679 return RSSI_THRESHOLD(low, 80);
680 case NL80211_CHAN_WIDTH_160:
681 return RSSI_THRESHOLD(low, 160);
682 default:
683 WARN_ON(1);
684 return S8_MAX;
685 }
686 #undef RSSI_THRESHOLD
687 }
688
689 static u32
iwl_mld_emlsr_disallowed_with_link(struct iwl_mld * mld,struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * link,bool primary)690 iwl_mld_emlsr_disallowed_with_link(struct iwl_mld *mld,
691 struct ieee80211_vif *vif,
692 struct iwl_mld_link_sel_data *link,
693 bool primary)
694 {
695 struct wiphy *wiphy = mld->wiphy;
696 struct ieee80211_bss_conf *conf;
697 u32 ret = 0;
698
699 conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
700 if (WARN_ON_ONCE(!conf))
701 return IWL_MLD_EMLSR_EXIT_INVALID;
702
703 if (link->chandef->chan->band == NL80211_BAND_2GHZ && mld->bt_is_active)
704 ret |= IWL_MLD_EMLSR_EXIT_BT_COEX;
705
706 if (link->signal <
707 iwl_mld_get_emlsr_rssi_thresh(mld, link->chandef, false))
708 ret |= IWL_MLD_EMLSR_EXIT_LOW_RSSI;
709
710 if (conf->csa_active)
711 ret |= IWL_MLD_EMLSR_EXIT_CSA;
712
713 if (ret) {
714 IWL_DEBUG_EHT(mld, "Link %d is not allowed for EMLSR as %s\n",
715 link->link_id, primary ? "primary" : "secondary");
716 iwl_mld_print_emlsr_exit(mld, ret);
717 }
718
719 return ret;
720 }
721
722 static u8
iwl_mld_set_link_sel_data(struct iwl_mld * mld,struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * data,unsigned long usable_links,u8 * best_link_idx)723 iwl_mld_set_link_sel_data(struct iwl_mld *mld,
724 struct ieee80211_vif *vif,
725 struct iwl_mld_link_sel_data *data,
726 unsigned long usable_links,
727 u8 *best_link_idx)
728 {
729 u8 n_data = 0;
730 u16 max_grade = 0;
731 unsigned long link_id;
732
733 for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
734 struct ieee80211_bss_conf *link_conf =
735 link_conf_dereference_protected(vif, link_id);
736
737 if (WARN_ON_ONCE(!link_conf))
738 continue;
739
740 /* Ignore any BSS that was not seen in the last MLO scan */
741 if (ktime_before(link_conf->bss->ts_boottime,
742 mld->scan.last_mlo_scan_start_time))
743 continue;
744
745 data[n_data].link_id = link_id;
746 data[n_data].chandef = &link_conf->chanreq.oper;
747 data[n_data].signal = MBM_TO_DBM(link_conf->bss->signal);
748 data[n_data].grade = iwl_mld_get_link_grade(mld, link_conf);
749
750 if (n_data == 0 || data[n_data].grade > max_grade) {
751 max_grade = data[n_data].grade;
752 *best_link_idx = n_data;
753 }
754 n_data++;
755 }
756
757 return n_data;
758 }
759
760 static u32
iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf * chanctx)761 iwl_mld_get_min_chan_load_thresh(struct ieee80211_chanctx_conf *chanctx)
762 {
763 const struct iwl_mld_phy *phy = iwl_mld_phy_from_mac80211(chanctx);
764
765 switch (phy->chandef.width) {
766 case NL80211_CHAN_WIDTH_320:
767 case NL80211_CHAN_WIDTH_160:
768 return 5;
769 case NL80211_CHAN_WIDTH_80:
770 return 7;
771 default:
772 break;
773 }
774 return 10;
775 }
776
777 static bool
iwl_mld_channel_load_allows_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif,const struct iwl_mld_link_sel_data * a,const struct iwl_mld_link_sel_data * b)778 iwl_mld_channel_load_allows_emlsr(struct iwl_mld *mld,
779 struct ieee80211_vif *vif,
780 const struct iwl_mld_link_sel_data *a,
781 const struct iwl_mld_link_sel_data *b)
782 {
783 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
784 struct iwl_mld_link *link_a =
785 iwl_mld_link_dereference_check(mld_vif, a->link_id);
786 struct ieee80211_chanctx_conf *chanctx_a = NULL;
787 u32 bw_a, bw_b, ratio;
788 u32 primary_load_perc;
789
790 if (!link_a || !link_a->active) {
791 IWL_DEBUG_EHT(mld, "Primary link is not active. Can't enter EMLSR\n");
792 return false;
793 }
794
795 chanctx_a = wiphy_dereference(mld->wiphy, link_a->chan_ctx);
796
797 if (WARN_ON(!chanctx_a))
798 return false;
799
800 primary_load_perc =
801 iwl_mld_phy_from_mac80211(chanctx_a)->avg_channel_load_not_by_us;
802
803 IWL_DEBUG_EHT(mld, "Average channel load not by us: %u\n", primary_load_perc);
804
805 if (primary_load_perc < iwl_mld_get_min_chan_load_thresh(chanctx_a)) {
806 IWL_DEBUG_EHT(mld, "Channel load is below the minimum threshold\n");
807 return false;
808 }
809
810 if (iwl_mld_vif_low_latency(mld_vif)) {
811 IWL_DEBUG_EHT(mld, "Low latency vif, EMLSR is allowed\n");
812 return true;
813 }
814
815 if (a->chandef->width <= b->chandef->width)
816 return true;
817
818 bw_a = cfg80211_chandef_get_width(a->chandef);
819 bw_b = cfg80211_chandef_get_width(b->chandef);
820 ratio = bw_a / bw_b;
821
822 switch (ratio) {
823 case 2:
824 return primary_load_perc > 25;
825 case 4:
826 return primary_load_perc > 40;
827 case 8:
828 case 16:
829 return primary_load_perc > 50;
830 }
831
832 return false;
833 }
834
835 VISIBLE_IF_IWLWIFI_KUNIT u32
iwl_mld_emlsr_pair_state(struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * a,struct iwl_mld_link_sel_data * b)836 iwl_mld_emlsr_pair_state(struct ieee80211_vif *vif,
837 struct iwl_mld_link_sel_data *a,
838 struct iwl_mld_link_sel_data *b)
839 {
840 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
841 struct iwl_mld *mld = mld_vif->mld;
842 u32 reason_mask = 0;
843
844 /* Per-link considerations */
845 reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, a, true);
846 if (reason_mask)
847 return reason_mask;
848
849 reason_mask = iwl_mld_emlsr_disallowed_with_link(mld, vif, b, false);
850 if (reason_mask)
851 return reason_mask;
852
853 if (a->chandef->chan->band == b->chandef->chan->band) {
854 const struct cfg80211_chan_def *c_low = a->chandef;
855 const struct cfg80211_chan_def *c_high = b->chandef;
856 u32 c_low_upper_edge, c_high_lower_edge;
857
858 if (c_low->chan->center_freq > c_high->chan->center_freq)
859 swap(c_low, c_high);
860
861 c_low_upper_edge = c_low->center_freq1 +
862 cfg80211_chandef_get_width(c_low) / 2;
863 c_high_lower_edge = c_high->center_freq1 -
864 cfg80211_chandef_get_width(c_high) / 2;
865
866 if (a->chandef->chan->band == NL80211_BAND_5GHZ &&
867 c_low_upper_edge <= 5330 && c_high_lower_edge >= 5490) {
868 /* This case is fine - HW/FW can deal with it, there's
869 * enough separation between the two channels.
870 */
871 } else {
872 reason_mask |= IWL_MLD_EMLSR_EXIT_EQUAL_BAND;
873 }
874 }
875 if (!iwl_mld_channel_load_allows_emlsr(mld, vif, a, b))
876 reason_mask |= IWL_MLD_EMLSR_EXIT_CHAN_LOAD;
877
878 if (reason_mask) {
879 IWL_DEBUG_EHT(mld,
880 "Links %d and %d are not a valid pair for EMLSR\n",
881 a->link_id, b->link_id);
882 IWL_DEBUG_EHT(mld, "Links bandwidth are: %d and %d\n",
883 nl80211_chan_width_to_mhz(a->chandef->width),
884 nl80211_chan_width_to_mhz(b->chandef->width));
885 iwl_mld_print_emlsr_exit(mld, reason_mask);
886 }
887
888 return reason_mask;
889 }
890 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_emlsr_pair_state);
891
892 /* Calculation is done with fixed-point with a scaling factor of 1/256 */
893 #define SCALE_FACTOR 256
894
895 /*
896 * Returns the combined grade of two given links.
897 * Returns 0 if EMLSR is not allowed with these 2 links.
898 */
899 static
iwl_mld_get_emlsr_grade(struct iwl_mld * mld,struct ieee80211_vif * vif,struct iwl_mld_link_sel_data * a,struct iwl_mld_link_sel_data * b,u8 * primary_id)900 unsigned int iwl_mld_get_emlsr_grade(struct iwl_mld *mld,
901 struct ieee80211_vif *vif,
902 struct iwl_mld_link_sel_data *a,
903 struct iwl_mld_link_sel_data *b,
904 u8 *primary_id)
905 {
906 struct ieee80211_bss_conf *primary_conf;
907 struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
908 unsigned int primary_load;
909
910 lockdep_assert_wiphy(wiphy);
911
912 /* a is always primary, b is always secondary */
913 if (b->grade > a->grade)
914 swap(a, b);
915
916 *primary_id = a->link_id;
917
918 if (iwl_mld_emlsr_pair_state(vif, a, b))
919 return 0;
920
921 primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
922
923 if (WARN_ON_ONCE(!primary_conf))
924 return 0;
925
926 primary_load = iwl_mld_get_chan_load(mld, primary_conf);
927
928 /* The more the primary link is loaded, the more worthwhile EMLSR becomes */
929 return a->grade + ((b->grade * primary_load) / SCALE_FACTOR);
930 }
931
_iwl_mld_select_links(struct iwl_mld * mld,struct ieee80211_vif * vif)932 static void _iwl_mld_select_links(struct iwl_mld *mld,
933 struct ieee80211_vif *vif)
934 {
935 struct iwl_mld_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
936 struct iwl_mld_link_sel_data *best_link;
937 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
938 int max_active_links = iwl_mld_max_active_links(mld, vif);
939 u16 new_active, usable_links = ieee80211_vif_usable_links(vif);
940 u8 best_idx, new_primary, n_data;
941 u16 max_grade;
942
943 lockdep_assert_wiphy(mld->wiphy);
944
945 if (!mld_vif->authorized || hweight16(usable_links) <= 1)
946 return;
947
948 if (WARN(ktime_before(mld->scan.last_mlo_scan_start_time,
949 ktime_sub_ns(ktime_get_boottime_ns(),
950 5ULL * NSEC_PER_SEC)),
951 "Last MLO scan was too long ago, can't select links\n"))
952 return;
953
954 /* The logic below is simple and not suited for more than 2 links */
955 WARN_ON_ONCE(max_active_links > 2);
956
957 n_data = iwl_mld_set_link_sel_data(mld, vif, data, usable_links,
958 &best_idx);
959
960 if (!n_data) {
961 IWL_DEBUG_EHT(mld,
962 "Couldn't find a valid grade for any link!\n");
963 return;
964 }
965
966 /* Default to selecting the single best link */
967 best_link = &data[best_idx];
968 new_primary = best_link->link_id;
969 new_active = BIT(best_link->link_id);
970 max_grade = best_link->grade;
971
972 /* If EMLSR is not possible, activate the best link */
973 if (max_active_links == 1 || n_data == 1 ||
974 !iwl_mld_vif_has_emlsr_cap(vif) || !IWL_MLD_AUTO_EML_ENABLE ||
975 mld_vif->emlsr.blocked_reasons)
976 goto set_active;
977
978 /* Try to find the best link combination */
979 for (u8 a = 0; a < n_data; a++) {
980 for (u8 b = a + 1; b < n_data; b++) {
981 u8 best_in_pair;
982 u16 emlsr_grade =
983 iwl_mld_get_emlsr_grade(mld, vif,
984 &data[a], &data[b],
985 &best_in_pair);
986
987 /*
988 * Prefer (new) EMLSR combination to prefer EMLSR over
989 * a single link.
990 */
991 if (emlsr_grade < max_grade)
992 continue;
993
994 max_grade = emlsr_grade;
995 new_primary = best_in_pair;
996 new_active = BIT(data[a].link_id) |
997 BIT(data[b].link_id);
998 }
999 }
1000
1001 set_active:
1002 IWL_DEBUG_EHT(mld, "Link selection result: 0x%x. Primary = %d\n",
1003 new_active, new_primary);
1004
1005 mld_vif->emlsr.selected_primary = new_primary;
1006 mld_vif->emlsr.selected_links = new_active;
1007
1008 ieee80211_set_active_links_async(vif, new_active);
1009 }
1010
iwl_mld_vif_iter_select_links(void * _data,u8 * mac,struct ieee80211_vif * vif)1011 static void iwl_mld_vif_iter_select_links(void *_data, u8 *mac,
1012 struct ieee80211_vif *vif)
1013 {
1014 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1015 struct iwl_mld *mld = mld_vif->mld;
1016
1017 _iwl_mld_select_links(mld, vif);
1018 }
1019
iwl_mld_select_links(struct iwl_mld * mld)1020 void iwl_mld_select_links(struct iwl_mld *mld)
1021 {
1022 ieee80211_iterate_active_interfaces_mtx(mld->hw,
1023 IEEE80211_IFACE_ITER_NORMAL,
1024 iwl_mld_vif_iter_select_links,
1025 NULL);
1026 }
1027
iwl_mld_emlsr_check_bt_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)1028 static void iwl_mld_emlsr_check_bt_iter(void *_data, u8 *mac,
1029 struct ieee80211_vif *vif)
1030 {
1031 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1032 struct iwl_mld *mld = mld_vif->mld;
1033 struct ieee80211_bss_conf *link;
1034 unsigned int link_id;
1035
1036 if (!iwl_mld_vif_has_emlsr_cap(vif))
1037 return;
1038
1039 if (!mld->bt_is_active) {
1040 iwl_mld_retry_emlsr(mld, vif);
1041 return;
1042 }
1043
1044 /* BT is turned ON but we are not in EMLSR, nothing to do */
1045 if (!iwl_mld_emlsr_active(vif))
1046 return;
1047
1048 /* In EMLSR and BT is turned ON */
1049
1050 for_each_vif_active_link(vif, link, link_id) {
1051 if (WARN_ON(!link->chanreq.oper.chan))
1052 continue;
1053
1054 if (link->chanreq.oper.chan->band == NL80211_BAND_2GHZ) {
1055 iwl_mld_exit_emlsr(mld, vif, IWL_MLD_EMLSR_EXIT_BT_COEX,
1056 iwl_mld_get_primary_link(vif));
1057 return;
1058 }
1059 }
1060 }
1061
iwl_mld_emlsr_check_bt(struct iwl_mld * mld)1062 void iwl_mld_emlsr_check_bt(struct iwl_mld *mld)
1063 {
1064 ieee80211_iterate_active_interfaces_mtx(mld->hw,
1065 IEEE80211_IFACE_ITER_NORMAL,
1066 iwl_mld_emlsr_check_bt_iter,
1067 NULL);
1068 }
1069
1070 struct iwl_mld_chan_load_data {
1071 struct iwl_mld_phy *phy;
1072 u32 prev_chan_load_not_by_us;
1073 };
1074
iwl_mld_chan_load_update_iter(void * _data,u8 * mac,struct ieee80211_vif * vif)1075 static void iwl_mld_chan_load_update_iter(void *_data, u8 *mac,
1076 struct ieee80211_vif *vif)
1077 {
1078 struct iwl_mld_chan_load_data *data = _data;
1079 const struct iwl_mld_phy *phy = data->phy;
1080 struct ieee80211_chanctx_conf *chanctx =
1081 container_of((const void *)phy, struct ieee80211_chanctx_conf,
1082 drv_priv);
1083 struct iwl_mld *mld = iwl_mld_vif_from_mac80211(vif)->mld;
1084 struct ieee80211_bss_conf *prim_link;
1085 unsigned int prim_link_id;
1086
1087 prim_link_id = iwl_mld_get_primary_link(vif);
1088 prim_link = link_conf_dereference_protected(vif, prim_link_id);
1089
1090 if (WARN_ON(!prim_link))
1091 return;
1092
1093 if (chanctx != rcu_access_pointer(prim_link->chanctx_conf))
1094 return;
1095
1096 if (iwl_mld_emlsr_active(vif)) {
1097 int chan_load = iwl_mld_get_chan_load_by_others(mld, prim_link,
1098 true);
1099
1100 if (chan_load < 0)
1101 return;
1102
1103 /* chan_load is in range [0,255] */
1104 if (chan_load < NORMALIZE_PERCENT_TO_255(IWL_MLD_EXIT_EMLSR_CHAN_LOAD))
1105 iwl_mld_exit_emlsr(mld, vif,
1106 IWL_MLD_EMLSR_EXIT_CHAN_LOAD,
1107 prim_link_id);
1108 } else {
1109 u32 old_chan_load = data->prev_chan_load_not_by_us;
1110 u32 new_chan_load = phy->avg_channel_load_not_by_us;
1111 u32 min_thresh = iwl_mld_get_min_chan_load_thresh(chanctx);
1112
1113 #define THRESHOLD_CROSSED(threshold) \
1114 (old_chan_load <= (threshold) && new_chan_load > (threshold))
1115
1116 if (THRESHOLD_CROSSED(min_thresh) || THRESHOLD_CROSSED(25) ||
1117 THRESHOLD_CROSSED(40) || THRESHOLD_CROSSED(50))
1118 iwl_mld_retry_emlsr(mld, vif);
1119 #undef THRESHOLD_CROSSED
1120 }
1121 }
1122
iwl_mld_emlsr_check_chan_load(struct ieee80211_hw * hw,struct iwl_mld_phy * phy,u32 prev_chan_load_not_by_us)1123 void iwl_mld_emlsr_check_chan_load(struct ieee80211_hw *hw,
1124 struct iwl_mld_phy *phy,
1125 u32 prev_chan_load_not_by_us)
1126 {
1127 struct iwl_mld_chan_load_data data = {
1128 .phy = phy,
1129 .prev_chan_load_not_by_us = prev_chan_load_not_by_us,
1130 };
1131
1132 ieee80211_iterate_active_interfaces_mtx(hw,
1133 IEEE80211_IFACE_ITER_NORMAL,
1134 iwl_mld_chan_load_update_iter,
1135 &data);
1136 }
1137
iwl_mld_retry_emlsr(struct iwl_mld * mld,struct ieee80211_vif * vif)1138 void iwl_mld_retry_emlsr(struct iwl_mld *mld, struct ieee80211_vif *vif)
1139 {
1140 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1141
1142 if (!IWL_MLD_AUTO_EML_ENABLE || !iwl_mld_vif_has_emlsr_cap(vif) ||
1143 iwl_mld_emlsr_active(vif) || mld_vif->emlsr.blocked_reasons)
1144 return;
1145
1146 iwl_mld_int_mlo_scan(mld, vif);
1147 }
1148
iwl_mld_ignore_tpt_iter(void * data,u8 * mac,struct ieee80211_vif * vif)1149 static void iwl_mld_ignore_tpt_iter(void *data, u8 *mac,
1150 struct ieee80211_vif *vif)
1151 {
1152 struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(vif);
1153 struct iwl_mld *mld = mld_vif->mld;
1154 struct iwl_mld_sta *mld_sta;
1155 bool *start = (void *)data;
1156
1157 /* check_tpt_wk is only used when TPT block isn't set */
1158 if (mld_vif->emlsr.blocked_reasons & IWL_MLD_EMLSR_BLOCKED_TPT ||
1159 !IWL_MLD_AUTO_EML_ENABLE || !mld_vif->ap_sta)
1160 return;
1161
1162 mld_sta = iwl_mld_sta_from_mac80211(mld_vif->ap_sta);
1163
1164 /* We only count for the AP sta in a MLO connection */
1165 if (!mld_sta->mpdu_counters)
1166 return;
1167
1168 if (*start) {
1169 wiphy_delayed_work_cancel(mld_vif->mld->wiphy,
1170 &mld_vif->emlsr.check_tpt_wk);
1171 IWL_DEBUG_EHT(mld, "TPT check disabled\n");
1172 return;
1173 }
1174
1175 /* Clear the counters so we start from the beginning */
1176 for (int q = 0; q < mld->trans->info.num_rxqs; q++) {
1177 struct iwl_mld_per_q_mpdu_counter *queue_counter =
1178 &mld_sta->mpdu_counters[q];
1179
1180 spin_lock_bh(&queue_counter->lock);
1181
1182 memset(queue_counter->per_link, 0,
1183 sizeof(queue_counter->per_link));
1184
1185 spin_unlock_bh(&queue_counter->lock);
1186 }
1187
1188 /* Schedule the check in 5 seconds */
1189 wiphy_delayed_work_queue(mld_vif->mld->wiphy,
1190 &mld_vif->emlsr.check_tpt_wk,
1191 round_jiffies_relative(IWL_MLD_TPT_COUNT_WINDOW));
1192 IWL_DEBUG_EHT(mld, "TPT check enabled\n");
1193 }
1194
iwl_mld_start_ignoring_tpt_updates(struct iwl_mld * mld)1195 void iwl_mld_start_ignoring_tpt_updates(struct iwl_mld *mld)
1196 {
1197 bool start = true;
1198
1199 ieee80211_iterate_active_interfaces_mtx(mld->hw,
1200 IEEE80211_IFACE_ITER_NORMAL,
1201 iwl_mld_ignore_tpt_iter,
1202 &start);
1203 }
1204
iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld * mld)1205 void iwl_mld_stop_ignoring_tpt_updates(struct iwl_mld *mld)
1206 {
1207 bool start = false;
1208
1209 ieee80211_iterate_active_interfaces_mtx(mld->hw,
1210 IEEE80211_IFACE_ITER_NORMAL,
1211 iwl_mld_ignore_tpt_iter,
1212 &start);
1213 }
1214
iwl_mld_emlsr_check_nan_block(struct iwl_mld * mld,struct ieee80211_vif * vif)1215 int iwl_mld_emlsr_check_nan_block(struct iwl_mld *mld, struct ieee80211_vif *vif)
1216 {
1217 if (mld->nan_device_vif &&
1218 ieee80211_vif_nan_started(mld->nan_device_vif))
1219 return iwl_mld_block_emlsr_sync(mld, vif,
1220 IWL_MLD_EMLSR_BLOCKED_NAN,
1221 iwl_mld_get_primary_link(vif));
1222
1223 iwl_mld_unblock_emlsr(mld, vif, IWL_MLD_EMLSR_BLOCKED_NAN);
1224
1225 return 0;
1226 }
1227