1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5 #include <linux/rtnetlink.h>
6 #include <net/mac80211.h>
7
8 #include "fw/api/rx.h"
9 #include "fw/api/datapath.h"
10 #include "fw/api/commands.h"
11 #include "fw/api/offload.h"
12 #include "fw/api/coex.h"
13 #include "fw/dbg.h"
14 #include "fw/uefi.h"
15
16 #include "mld.h"
17 #include "mlo.h"
18 #include "mac80211.h"
19 #include "led.h"
20 #include "scan.h"
21 #include "tx.h"
22 #include "sta.h"
23 #include "regulatory.h"
24 #include "thermal.h"
25 #include "low_latency.h"
26 #include "hcmd.h"
27 #include "fw/api/location.h"
28
29 #define DRV_DESCRIPTION "Intel(R) MLD wireless driver for Linux"
30 MODULE_DESCRIPTION(DRV_DESCRIPTION);
31 MODULE_LICENSE("GPL");
32 MODULE_IMPORT_NS("IWLWIFI");
33
34 static const struct iwl_op_mode_ops iwl_mld_ops;
35
iwl_mld_init(void)36 static int __init iwl_mld_init(void)
37 {
38 int ret = iwl_opmode_register("iwlmld", &iwl_mld_ops);
39
40 if (ret)
41 pr_err("Unable to register MLD op_mode: %d\n", ret);
42
43 return ret;
44 }
45 module_init(iwl_mld_init);
46
iwl_mld_exit(void)47 static void __exit iwl_mld_exit(void)
48 {
49 iwl_opmode_deregister("iwlmld");
50 }
51 module_exit(iwl_mld_exit);
52
iwl_mld_hw_set_regulatory(struct iwl_mld * mld)53 static void iwl_mld_hw_set_regulatory(struct iwl_mld *mld)
54 {
55 struct wiphy *wiphy = mld->wiphy;
56
57 wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
58 wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
59 }
60
61 VISIBLE_IF_IWLWIFI_KUNIT
iwl_construct_mld(struct iwl_mld * mld,struct iwl_trans * trans,const struct iwl_cfg * cfg,const struct iwl_fw * fw,struct ieee80211_hw * hw,struct dentry * dbgfs_dir)62 void iwl_construct_mld(struct iwl_mld *mld, struct iwl_trans *trans,
63 const struct iwl_cfg *cfg, const struct iwl_fw *fw,
64 struct ieee80211_hw *hw, struct dentry *dbgfs_dir)
65 {
66 mld->dev = trans->dev;
67 mld->trans = trans;
68 mld->cfg = cfg;
69 mld->fw = fw;
70 mld->hw = hw;
71 mld->wiphy = hw->wiphy;
72 mld->debugfs_dir = dbgfs_dir;
73
74 iwl_notification_wait_init(&mld->notif_wait);
75
76 /* Setup async RX handling */
77 spin_lock_init(&mld->async_handlers_lock);
78 INIT_LIST_HEAD(&mld->async_handlers_list);
79 wiphy_work_init(&mld->async_handlers_wk,
80 iwl_mld_async_handlers_wk);
81
82 /* Dynamic Queue Allocation */
83 spin_lock_init(&mld->add_txqs_lock);
84 INIT_LIST_HEAD(&mld->txqs_to_add);
85 wiphy_work_init(&mld->add_txqs_wk, iwl_mld_add_txqs_wk);
86
87 /* Setup RX queues sync wait queue */
88 init_waitqueue_head(&mld->rxq_sync.waitq);
89 }
90 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_construct_mld);
91
92 static void __acquires(&mld->wiphy->mtx)
iwl_mld_fwrt_dump_start(void * ctx)93 iwl_mld_fwrt_dump_start(void *ctx)
94 {
95 struct iwl_mld *mld = ctx;
96
97 wiphy_lock(mld->wiphy);
98 }
99
100 static void __releases(&mld->wiphy->mtx)
iwl_mld_fwrt_dump_end(void * ctx)101 iwl_mld_fwrt_dump_end(void *ctx)
102 {
103 struct iwl_mld *mld = ctx;
104
105 wiphy_unlock(mld->wiphy);
106 }
107
iwl_mld_d3_debug_enable(void * ctx)108 static bool iwl_mld_d3_debug_enable(void *ctx)
109 {
110 return IWL_MLD_D3_DEBUG;
111 }
112
iwl_mld_fwrt_send_hcmd(void * ctx,struct iwl_host_cmd * host_cmd)113 static int iwl_mld_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
114 {
115 struct iwl_mld *mld = (struct iwl_mld *)ctx;
116 int ret;
117
118 wiphy_lock(mld->wiphy);
119 ret = iwl_mld_send_cmd(mld, host_cmd);
120 wiphy_unlock(mld->wiphy);
121
122 return ret;
123 }
124
125 static const struct iwl_fw_runtime_ops iwl_mld_fwrt_ops = {
126 .dump_start = iwl_mld_fwrt_dump_start,
127 .dump_end = iwl_mld_fwrt_dump_end,
128 .send_hcmd = iwl_mld_fwrt_send_hcmd,
129 .d3_debug_enable = iwl_mld_d3_debug_enable,
130 };
131
132 static void
iwl_mld_construct_fw_runtime(struct iwl_mld * mld,struct iwl_trans * trans,const struct iwl_fw * fw,struct dentry * debugfs_dir)133 iwl_mld_construct_fw_runtime(struct iwl_mld *mld, struct iwl_trans *trans,
134 const struct iwl_fw *fw,
135 struct dentry *debugfs_dir)
136 {
137 iwl_fw_runtime_init(&mld->fwrt, trans, fw, &iwl_mld_fwrt_ops, mld,
138 NULL, NULL, debugfs_dir);
139
140 iwl_fw_set_current_image(&mld->fwrt, IWL_UCODE_REGULAR);
141 }
142
143 /* Please keep this array *SORTED* by hex value.
144 * Access is done through binary search
145 */
146 static const struct iwl_hcmd_names iwl_mld_legacy_names[] = {
147 HCMD_NAME(UCODE_ALIVE_NTFY),
148 HCMD_NAME(INIT_COMPLETE_NOTIF),
149 HCMD_NAME(PHY_CONTEXT_CMD),
150 HCMD_NAME(SCAN_CFG_CMD),
151 HCMD_NAME(SCAN_REQ_UMAC),
152 HCMD_NAME(SCAN_ABORT_UMAC),
153 HCMD_NAME(SCAN_COMPLETE_UMAC),
154 HCMD_NAME(TX_CMD),
155 HCMD_NAME(TXPATH_FLUSH),
156 HCMD_NAME(LEDS_CMD),
157 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_NOTIFICATION),
158 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIRM_NOTIFICATION),
159 HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
160 HCMD_NAME(POWER_TABLE_CMD),
161 HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
162 HCMD_NAME(BEACON_NOTIFICATION),
163 HCMD_NAME(BEACON_TEMPLATE_CMD),
164 HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
165 HCMD_NAME(REDUCE_TX_POWER_CMD),
166 HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
167 HCMD_NAME(MAC_PM_POWER_TABLE),
168 HCMD_NAME(MFUART_LOAD_NOTIFICATION),
169 HCMD_NAME(RSS_CONFIG_CMD),
170 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
171 HCMD_NAME(REPLY_RX_MPDU_CMD),
172 HCMD_NAME(BA_NOTIF),
173 HCMD_NAME(MCC_UPDATE_CMD),
174 HCMD_NAME(MCC_CHUB_UPDATE_CMD),
175 HCMD_NAME(MCAST_FILTER_CMD),
176 HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
177 HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
178 HCMD_NAME(MATCH_FOUND_NOTIFICATION),
179 HCMD_NAME(WOWLAN_PATTERNS),
180 HCMD_NAME(WOWLAN_CONFIGURATION),
181 HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
182 HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
183 HCMD_NAME(DEBUG_HOST_COMMAND),
184 HCMD_NAME(LDBG_CONFIG_CMD),
185 };
186
187 /* Please keep this array *SORTED* by hex value.
188 * Access is done through binary search
189 */
190 static const struct iwl_hcmd_names iwl_mld_system_names[] = {
191 HCMD_NAME(SHARED_MEM_CFG_CMD),
192 HCMD_NAME(SOC_CONFIGURATION_CMD),
193 HCMD_NAME(INIT_EXTENDED_CFG_CMD),
194 HCMD_NAME(FW_ERROR_RECOVERY_CMD),
195 HCMD_NAME(RFI_GET_FREQ_TABLE_CMD),
196 HCMD_NAME(SYSTEM_STATISTICS_CMD),
197 HCMD_NAME(SYSTEM_STATISTICS_END_NOTIF),
198 };
199
200 /* Please keep this array *SORTED* by hex value.
201 * Access is done through binary search
202 */
203 static const struct iwl_hcmd_names iwl_mld_reg_and_nvm_names[] = {
204 HCMD_NAME(LARI_CONFIG_CHANGE),
205 HCMD_NAME(NVM_GET_INFO),
206 HCMD_NAME(TAS_CONFIG),
207 HCMD_NAME(SAR_OFFSET_MAPPING_TABLE_CMD),
208 HCMD_NAME(MCC_ALLOWED_AP_TYPE_CMD),
209 };
210
211 /* Please keep this array *SORTED* by hex value.
212 * Access is done through binary search
213 */
214 static const struct iwl_hcmd_names iwl_mld_debug_names[] = {
215 HCMD_NAME(HOST_EVENT_CFG),
216 HCMD_NAME(DBGC_SUSPEND_RESUME),
217 };
218
219 /* Please keep this array *SORTED* by hex value.
220 * Access is done through binary search
221 */
222 static const struct iwl_hcmd_names iwl_mld_mac_conf_names[] = {
223 HCMD_NAME(LOW_LATENCY_CMD),
224 HCMD_NAME(SESSION_PROTECTION_CMD),
225 HCMD_NAME(MAC_CONFIG_CMD),
226 HCMD_NAME(LINK_CONFIG_CMD),
227 HCMD_NAME(STA_CONFIG_CMD),
228 HCMD_NAME(AUX_STA_CMD),
229 HCMD_NAME(STA_REMOVE_CMD),
230 HCMD_NAME(ROC_CMD),
231 HCMD_NAME(MISSED_BEACONS_NOTIF),
232 HCMD_NAME(EMLSR_TRANS_FAIL_NOTIF),
233 HCMD_NAME(ROC_NOTIF),
234 HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
235 HCMD_NAME(SESSION_PROTECTION_NOTIF),
236 HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
237 HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
238 };
239
240 /* Please keep this array *SORTED* by hex value.
241 * Access is done through binary search
242 */
243 static const struct iwl_hcmd_names iwl_mld_data_path_names[] = {
244 HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
245 HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
246 HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
247 HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
248 HCMD_NAME(TLC_MNG_CONFIG_CMD),
249 HCMD_NAME(RX_BAID_ALLOCATION_CONFIG_CMD),
250 HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
251 HCMD_NAME(OMI_SEND_STATUS_NOTIF),
252 HCMD_NAME(ESR_MODE_NOTIF),
253 HCMD_NAME(MONITOR_NOTIF),
254 HCMD_NAME(TLC_MNG_UPDATE_NOTIF),
255 HCMD_NAME(MU_GROUP_MGMT_NOTIF),
256 };
257
258 /* Please keep this array *SORTED* by hex value.
259 * Access is done through binary search
260 */
261 static const struct iwl_hcmd_names iwl_mld_location_names[] = {
262 HCMD_NAME(TOF_RANGE_REQ_CMD),
263 HCMD_NAME(TOF_RANGE_RESPONSE_NOTIF),
264 };
265
266 /* Please keep this array *SORTED* by hex value.
267 * Access is done through binary search
268 */
269 static const struct iwl_hcmd_names iwl_mld_phy_names[] = {
270 HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
271 HCMD_NAME(CTDP_CONFIG_CMD),
272 HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
273 HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
274 HCMD_NAME(CT_KILL_NOTIFICATION),
275 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
276 };
277
278 /* Please keep this array *SORTED* by hex value.
279 * Access is done through binary search
280 */
281 static const struct iwl_hcmd_names iwl_mld_statistics_names[] = {
282 HCMD_NAME(STATISTICS_OPER_NOTIF),
283 HCMD_NAME(STATISTICS_OPER_PART1_NOTIF),
284 };
285
286 /* Please keep this array *SORTED* by hex value.
287 * Access is done through binary search
288 */
289 static const struct iwl_hcmd_names iwl_mld_prot_offload_names[] = {
290 HCMD_NAME(STORED_BEACON_NTF),
291 };
292
293 /* Please keep this array *SORTED* by hex value.
294 * Access is done through binary search
295 */
296 static const struct iwl_hcmd_names iwl_mld_coex_names[] = {
297 HCMD_NAME(PROFILE_NOTIF),
298 };
299
300 VISIBLE_IF_IWLWIFI_KUNIT
301 const struct iwl_hcmd_arr iwl_mld_groups[] = {
302 [LEGACY_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
303 [LONG_GROUP] = HCMD_ARR(iwl_mld_legacy_names),
304 [SYSTEM_GROUP] = HCMD_ARR(iwl_mld_system_names),
305 [MAC_CONF_GROUP] = HCMD_ARR(iwl_mld_mac_conf_names),
306 [DATA_PATH_GROUP] = HCMD_ARR(iwl_mld_data_path_names),
307 [LOCATION_GROUP] = HCMD_ARR(iwl_mld_location_names),
308 [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mld_reg_and_nvm_names),
309 [DEBUG_GROUP] = HCMD_ARR(iwl_mld_debug_names),
310 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mld_phy_names),
311 [STATISTICS_GROUP] = HCMD_ARR(iwl_mld_statistics_names),
312 [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mld_prot_offload_names),
313 [BT_COEX_GROUP] = HCMD_ARR(iwl_mld_coex_names),
314 };
315 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_groups);
316
317 #if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
318 const unsigned int global_iwl_mld_goups_size = ARRAY_SIZE(iwl_mld_groups);
319 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(global_iwl_mld_goups_size);
320 #endif
321
322 static void
iwl_mld_configure_trans(struct iwl_op_mode * op_mode)323 iwl_mld_configure_trans(struct iwl_op_mode *op_mode)
324 {
325 const struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
326 static const u8 no_reclaim_cmds[] = {TX_CMD};
327 struct iwl_trans_config trans_cfg = {
328 .op_mode = op_mode,
329 /* Rx is not supported yet, but add it to avoid warnings */
330 .rx_buf_size = iwl_amsdu_size_to_rxb_size(),
331 .command_groups = iwl_mld_groups,
332 .command_groups_size = ARRAY_SIZE(iwl_mld_groups),
333 .fw_reset_handshake = true,
334 .queue_alloc_cmd_ver =
335 iwl_fw_lookup_cmd_ver(mld->fw,
336 WIDE_ID(DATA_PATH_GROUP,
337 SCD_QUEUE_CONFIG_CMD),
338 0),
339 .no_reclaim_cmds = no_reclaim_cmds,
340 .n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds),
341 .cb_data_offs = offsetof(struct ieee80211_tx_info,
342 driver_data[2]),
343 };
344 struct iwl_trans *trans = mld->trans;
345
346 trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
347 trans->iml = mld->fw->iml;
348 trans->iml_len = mld->fw->iml_len;
349 trans->wide_cmd_header = true;
350
351 iwl_trans_configure(trans, &trans_cfg);
352 }
353
354 /*
355 *****************************************************
356 * op mode ops functions
357 *****************************************************
358 */
359
360 #define NUM_FW_LOAD_RETRIES 3
361 static struct iwl_op_mode *
iwl_op_mode_mld_start(struct iwl_trans * trans,const struct iwl_cfg * cfg,const struct iwl_fw * fw,struct dentry * dbgfs_dir)362 iwl_op_mode_mld_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
363 const struct iwl_fw *fw, struct dentry *dbgfs_dir)
364 {
365 struct ieee80211_hw *hw;
366 struct iwl_op_mode *op_mode;
367 struct iwl_mld *mld;
368 u32 eckv_value;
369 int ret;
370
371 /* Allocate and initialize a new hardware device */
372 hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
373 sizeof(struct iwl_mld),
374 &iwl_mld_hw_ops);
375 if (!hw)
376 return ERR_PTR(-ENOMEM);
377
378 op_mode = hw->priv;
379
380 op_mode->ops = &iwl_mld_ops;
381
382 mld = IWL_OP_MODE_GET_MLD(op_mode);
383
384 iwl_construct_mld(mld, trans, cfg, fw, hw, dbgfs_dir);
385
386 iwl_mld_construct_fw_runtime(mld, trans, fw, dbgfs_dir);
387
388 iwl_mld_get_bios_tables(mld);
389 iwl_uefi_get_sgom_table(trans, &mld->fwrt);
390 iwl_uefi_get_step_table(trans);
391 if (iwl_bios_get_eckv(&mld->fwrt, &eckv_value))
392 IWL_DEBUG_RADIO(mld, "ECKV table doesn't exist in BIOS\n");
393 else
394 trans->ext_32khz_clock_valid = !!eckv_value;
395 iwl_bios_setup_step(trans, &mld->fwrt);
396 mld->bios_enable_puncturing = iwl_uefi_get_puncturing(&mld->fwrt);
397
398 iwl_mld_hw_set_regulatory(mld);
399
400 /* Configure transport layer with the opmode specific params */
401 iwl_mld_configure_trans(op_mode);
402
403 /* needed for regulatory init */
404 rtnl_lock();
405 /* Needed for sending commands */
406 wiphy_lock(mld->wiphy);
407
408 for (int i = 0; i < NUM_FW_LOAD_RETRIES; i++) {
409 ret = iwl_mld_load_fw(mld);
410 if (!ret)
411 break;
412 }
413
414 if (ret) {
415 wiphy_unlock(mld->wiphy);
416 rtnl_unlock();
417 iwl_fw_flush_dumps(&mld->fwrt);
418 goto err;
419 }
420
421 /* We are about to stop the FW. Notifications may require an
422 * operational FW, so handle them all here before we stop.
423 */
424 wiphy_work_flush(mld->wiphy, &mld->async_handlers_wk);
425
426 iwl_mld_stop_fw(mld);
427
428 wiphy_unlock(mld->wiphy);
429 rtnl_unlock();
430
431 ret = iwl_mld_leds_init(mld);
432 if (ret)
433 goto free_nvm;
434
435 ret = iwl_mld_alloc_scan_cmd(mld);
436 if (ret)
437 goto leds_exit;
438
439 ret = iwl_mld_low_latency_init(mld);
440 if (ret)
441 goto free_scan_cmd;
442
443 ret = iwl_mld_register_hw(mld);
444 if (ret)
445 goto low_latency_free;
446
447 iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
448
449 iwl_mld_add_debugfs_files(mld, dbgfs_dir);
450 iwl_mld_thermal_initialize(mld);
451
452 iwl_mld_ptp_init(mld);
453
454 return op_mode;
455
456 low_latency_free:
457 iwl_mld_low_latency_free(mld);
458 free_scan_cmd:
459 kfree(mld->scan.cmd);
460 leds_exit:
461 iwl_mld_leds_exit(mld);
462 free_nvm:
463 kfree(mld->nvm_data);
464 err:
465 iwl_trans_op_mode_leave(mld->trans);
466 ieee80211_free_hw(mld->hw);
467 return ERR_PTR(ret);
468 }
469
470 static void
iwl_op_mode_mld_stop(struct iwl_op_mode * op_mode)471 iwl_op_mode_mld_stop(struct iwl_op_mode *op_mode)
472 {
473 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
474
475 iwl_mld_ptp_remove(mld);
476 iwl_mld_leds_exit(mld);
477
478 wiphy_lock(mld->wiphy);
479 iwl_mld_thermal_exit(mld);
480 iwl_mld_low_latency_stop(mld);
481 iwl_mld_deinit_time_sync(mld);
482 wiphy_unlock(mld->wiphy);
483
484 ieee80211_unregister_hw(mld->hw);
485
486 iwl_fw_runtime_free(&mld->fwrt);
487 iwl_mld_low_latency_free(mld);
488
489 iwl_trans_op_mode_leave(mld->trans);
490
491 kfree(mld->nvm_data);
492 kfree(mld->scan.cmd);
493 kfree(mld->error_recovery_buf);
494 kfree(mld->mcast_filter_cmd);
495
496 ieee80211_free_hw(mld->hw);
497 }
498
iwl_mld_queue_state_change(struct iwl_op_mode * op_mode,int hw_queue,bool queue_full)499 static void iwl_mld_queue_state_change(struct iwl_op_mode *op_mode,
500 int hw_queue, bool queue_full)
501 {
502 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
503 struct ieee80211_txq *txq;
504 struct iwl_mld_sta *mld_sta;
505 struct iwl_mld_txq *mld_txq;
506
507 rcu_read_lock();
508
509 txq = rcu_dereference(mld->fw_id_to_txq[hw_queue]);
510 if (!txq) {
511 rcu_read_unlock();
512
513 if (queue_full) {
514 /* An internal queue is not expected to become full */
515 IWL_WARN(mld,
516 "Internal hw_queue %d is full! stopping all queues\n",
517 hw_queue);
518 /* Stop all queues, as an internal queue is not
519 * mapped to a mac80211 one
520 */
521 ieee80211_stop_queues(mld->hw);
522 } else {
523 ieee80211_wake_queues(mld->hw);
524 }
525
526 return;
527 }
528
529 mld_txq = iwl_mld_txq_from_mac80211(txq);
530 mld_sta = txq->sta ? iwl_mld_sta_from_mac80211(txq->sta) : NULL;
531
532 mld_txq->status.stop_full = queue_full;
533
534 if (!queue_full && mld_sta &&
535 mld_sta->sta_state != IEEE80211_STA_NOTEXIST) {
536 local_bh_disable();
537 iwl_mld_tx_from_txq(mld, txq);
538 local_bh_enable();
539 }
540
541 rcu_read_unlock();
542 }
543
544 static void
iwl_mld_queue_full(struct iwl_op_mode * op_mode,int hw_queue)545 iwl_mld_queue_full(struct iwl_op_mode *op_mode, int hw_queue)
546 {
547 iwl_mld_queue_state_change(op_mode, hw_queue, true);
548 }
549
550 static void
iwl_mld_queue_not_full(struct iwl_op_mode * op_mode,int hw_queue)551 iwl_mld_queue_not_full(struct iwl_op_mode *op_mode, int hw_queue)
552 {
553 iwl_mld_queue_state_change(op_mode, hw_queue, false);
554 }
555
556 static bool
iwl_mld_set_hw_rfkill_state(struct iwl_op_mode * op_mode,bool state)557 iwl_mld_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
558 {
559 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
560
561 iwl_mld_set_hwkill(mld, state);
562
563 return false;
564 }
565
566 static void
iwl_mld_free_skb(struct iwl_op_mode * op_mode,struct sk_buff * skb)567 iwl_mld_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
568 {
569 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
570 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
571
572 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
573 ieee80211_free_txskb(mld->hw, skb);
574 }
575
iwl_mld_read_error_recovery_buffer(struct iwl_mld * mld)576 static void iwl_mld_read_error_recovery_buffer(struct iwl_mld *mld)
577 {
578 u32 src_size = mld->fw->ucode_capa.error_log_size;
579 u32 src_addr = mld->fw->ucode_capa.error_log_addr;
580 u8 *recovery_buf;
581 int ret;
582
583 /* no recovery buffer size defined in a TLV */
584 if (!src_size)
585 return;
586
587 recovery_buf = kzalloc(src_size, GFP_ATOMIC);
588 if (!recovery_buf)
589 return;
590
591 ret = iwl_trans_read_mem_bytes(mld->trans, src_addr,
592 recovery_buf, src_size);
593 if (ret) {
594 IWL_ERR(mld, "Failed to read error recovery buffer (%d)\n",
595 ret);
596 kfree(recovery_buf);
597 return;
598 }
599
600 mld->error_recovery_buf = recovery_buf;
601 }
602
iwl_mld_restart_nic(struct iwl_mld * mld)603 static void iwl_mld_restart_nic(struct iwl_mld *mld)
604 {
605 iwl_mld_read_error_recovery_buffer(mld);
606
607 mld->fwrt.trans->dbg.restart_required = false;
608
609 ieee80211_restart_hw(mld->hw);
610 }
611
612 static void
iwl_mld_nic_error(struct iwl_op_mode * op_mode,enum iwl_fw_error_type type)613 iwl_mld_nic_error(struct iwl_op_mode *op_mode,
614 enum iwl_fw_error_type type)
615 {
616 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
617 bool trans_dead = test_bit(STATUS_TRANS_DEAD, &mld->trans->status);
618
619 if (type == IWL_ERR_TYPE_CMD_QUEUE_FULL)
620 IWL_ERR(mld, "Command queue full!\n");
621 else if (!trans_dead && !mld->fw_status.do_not_dump_once)
622 iwl_fwrt_dump_error_logs(&mld->fwrt);
623
624 mld->fw_status.do_not_dump_once = false;
625
626 /* It is necessary to abort any os scan here because mac80211 requires
627 * having the scan cleared before restarting.
628 * We'll reset the scan_status to NONE in restart cleanup in
629 * the next drv_start() call from mac80211. If ieee80211_hw_restart
630 * isn't called scan status will stay busy.
631 */
632 iwl_mld_report_scan_aborted(mld);
633
634 /*
635 * This should be first thing before trying to collect any
636 * data to avoid endless loops if any HW error happens while
637 * collecting debug data.
638 * It might not actually be true that we'll restart, but the
639 * setting doesn't matter if we're going to be unbound either.
640 */
641 if (type != IWL_ERR_TYPE_RESET_HS_TIMEOUT)
642 mld->fw_status.in_hw_restart = true;
643 }
644
iwl_mld_dump_error(struct iwl_op_mode * op_mode,struct iwl_fw_error_dump_mode * mode)645 static void iwl_mld_dump_error(struct iwl_op_mode *op_mode,
646 struct iwl_fw_error_dump_mode *mode)
647 {
648 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
649
650 /* if we come in from opmode we have the mutex held */
651 if (mode->context == IWL_ERR_CONTEXT_FROM_OPMODE) {
652 lockdep_assert_wiphy(mld->wiphy);
653 iwl_fw_error_collect(&mld->fwrt);
654 } else {
655 wiphy_lock(mld->wiphy);
656 if (mode->context != IWL_ERR_CONTEXT_ABORT)
657 iwl_fw_error_collect(&mld->fwrt);
658 wiphy_unlock(mld->wiphy);
659 }
660 }
661
iwl_mld_sw_reset(struct iwl_op_mode * op_mode,enum iwl_fw_error_type type)662 static bool iwl_mld_sw_reset(struct iwl_op_mode *op_mode,
663 enum iwl_fw_error_type type)
664 {
665 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
666
667 /* Do restart only in the following conditions are met:
668 * - we consider the FW as running
669 * - The trigger that brought us here is defined as one that requires
670 * a restart (in the debug TLVs)
671 */
672 if (!mld->fw_status.running || !mld->fwrt.trans->dbg.restart_required)
673 return false;
674
675 iwl_mld_restart_nic(mld);
676 return true;
677 }
678
679 static void
iwl_mld_time_point(struct iwl_op_mode * op_mode,enum iwl_fw_ini_time_point tp_id,union iwl_dbg_tlv_tp_data * tp_data)680 iwl_mld_time_point(struct iwl_op_mode *op_mode,
681 enum iwl_fw_ini_time_point tp_id,
682 union iwl_dbg_tlv_tp_data *tp_data)
683 {
684 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
685
686 iwl_dbg_tlv_time_point(&mld->fwrt, tp_id, tp_data);
687 }
688
689 #ifdef CONFIG_PM_SLEEP
iwl_mld_device_powered_off(struct iwl_op_mode * op_mode)690 static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
691 {
692 struct iwl_mld *mld = IWL_OP_MODE_GET_MLD(op_mode);
693
694 wiphy_lock(mld->wiphy);
695 mld->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
696 iwl_mld_stop_fw(mld);
697 mld->fw_status.in_d3 = false;
698 wiphy_unlock(mld->wiphy);
699 }
700 #else
iwl_mld_device_powered_off(struct iwl_op_mode * op_mode)701 static void iwl_mld_device_powered_off(struct iwl_op_mode *op_mode)
702 {}
703 #endif
704
705 static const struct iwl_op_mode_ops iwl_mld_ops = {
706 .start = iwl_op_mode_mld_start,
707 .stop = iwl_op_mode_mld_stop,
708 .rx = iwl_mld_rx,
709 .rx_rss = iwl_mld_rx_rss,
710 .queue_full = iwl_mld_queue_full,
711 .queue_not_full = iwl_mld_queue_not_full,
712 .hw_rf_kill = iwl_mld_set_hw_rfkill_state,
713 .free_skb = iwl_mld_free_skb,
714 .nic_error = iwl_mld_nic_error,
715 .dump_error = iwl_mld_dump_error,
716 .sw_reset = iwl_mld_sw_reset,
717 .time_point = iwl_mld_time_point,
718 .device_powered_off = pm_sleep_ptr(iwl_mld_device_powered_off),
719 };
720
721 struct iwl_mld_mod_params iwlmld_mod_params = {
722 .power_scheme = IWL_POWER_SCHEME_BPS,
723 };
724
725 module_param_named(power_scheme, iwlmld_mod_params.power_scheme, int, 0444);
726 MODULE_PARM_DESC(power_scheme,
727 "power management scheme: 1-active, 2-balanced, default: 2");
728