1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 
6 #include "mld.h"
7 
8 #include "fw/api/alive.h"
9 #include "fw/api/scan.h"
10 #include "fw/api/rx.h"
11 #include "fw/dbg.h"
12 #include "fw/pnvm.h"
13 #include "hcmd.h"
14 #include "iwl-nvm-parse.h"
15 #include "power.h"
16 #include "mcc.h"
17 #include "led.h"
18 #include "coex.h"
19 #include "regulatory.h"
20 #include "thermal.h"
21 
iwl_mld_send_tx_ant_cfg(struct iwl_mld * mld)22 static int iwl_mld_send_tx_ant_cfg(struct iwl_mld *mld)
23 {
24 	struct iwl_tx_ant_cfg_cmd cmd;
25 
26 	lockdep_assert_wiphy(mld->wiphy);
27 
28 	cmd.valid = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld));
29 
30 	IWL_DEBUG_FW(mld, "select valid tx ant: %u\n", cmd.valid);
31 
32 	return iwl_mld_send_cmd_pdu(mld, TX_ANT_CONFIGURATION_CMD, &cmd);
33 }
34 
iwl_mld_send_rss_cfg_cmd(struct iwl_mld * mld)35 static int iwl_mld_send_rss_cfg_cmd(struct iwl_mld *mld)
36 {
37 	struct iwl_rss_config_cmd cmd = {
38 		.flags = cpu_to_le32(IWL_RSS_ENABLE),
39 		.hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
40 			     BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
41 			     BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
42 			     BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
43 			     BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
44 			     BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
45 	};
46 
47 	lockdep_assert_wiphy(mld->wiphy);
48 
49 	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
50 	for (int i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
51 		cmd.indirection_table[i] =
52 			1 + (i % (mld->trans->num_rx_queues - 1));
53 	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
54 
55 	return iwl_mld_send_cmd_pdu(mld, RSS_CONFIG_CMD, &cmd);
56 }
57 
iwl_mld_config_scan(struct iwl_mld * mld)58 static int iwl_mld_config_scan(struct iwl_mld *mld)
59 {
60 	struct iwl_scan_config cmd = {
61 		.tx_chains = cpu_to_le32(iwl_mld_get_valid_tx_ant(mld)),
62 		.rx_chains = cpu_to_le32(iwl_mld_get_valid_rx_ant(mld))
63 	};
64 
65 	return iwl_mld_send_cmd_pdu(mld, WIDE_ID(LONG_GROUP, SCAN_CFG_CMD),
66 				    &cmd);
67 }
68 
iwl_mld_alive_imr_data(struct iwl_trans * trans,const struct iwl_imr_alive_info * imr_info)69 static void iwl_mld_alive_imr_data(struct iwl_trans *trans,
70 				   const struct iwl_imr_alive_info *imr_info)
71 {
72 	struct iwl_imr_data *imr_data = &trans->dbg.imr_data;
73 
74 	imr_data->imr_enable = le32_to_cpu(imr_info->enabled);
75 	imr_data->imr_size = le32_to_cpu(imr_info->size);
76 	imr_data->imr2sram_remainbyte = imr_data->imr_size;
77 	imr_data->imr_base_addr = imr_info->base_addr;
78 	imr_data->imr_curr_addr = le64_to_cpu(imr_data->imr_base_addr);
79 
80 	if (imr_data->imr_enable)
81 		return;
82 
83 	for (int i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
84 		struct iwl_fw_ini_region_tlv *reg;
85 
86 		if (!trans->dbg.active_regions[i])
87 			continue;
88 
89 		reg = (void *)trans->dbg.active_regions[i]->data;
90 
91 		/* We have only one DRAM IMR region, so we
92 		 * can break as soon as we find the first
93 		 * one.
94 		 */
95 		if (reg->type == IWL_FW_INI_REGION_DRAM_IMR) {
96 			trans->dbg.unsupported_region_msk |= BIT(i);
97 			break;
98 		}
99 	}
100 }
101 
iwl_alive_fn(struct iwl_notif_wait_data * notif_wait,struct iwl_rx_packet * pkt,void * data)102 static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
103 			 struct iwl_rx_packet *pkt, void *data)
104 {
105 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
106 	struct iwl_mld *mld =
107 		container_of(notif_wait, struct iwl_mld, notif_wait);
108 	struct iwl_trans *trans = mld->trans;
109 	u32 version = iwl_fw_lookup_notif_ver(mld->fw, LEGACY_GROUP,
110 					      UCODE_ALIVE_NTFY, 0);
111 	struct iwl_alive_ntf_v6 *palive;
112 	bool *alive_valid = data;
113 	struct iwl_umac_alive *umac;
114 	struct iwl_lmac_alive *lmac1;
115 	struct iwl_lmac_alive *lmac2 = NULL;
116 	u32 lmac_error_event_table;
117 	u32 umac_error_table;
118 	u16 status;
119 
120 	if (version < 6 || version > 7 || pkt_len != sizeof(*palive))
121 		return false;
122 
123 	palive = (void *)pkt->data;
124 
125 	iwl_mld_alive_imr_data(trans, &palive->imr);
126 
127 	umac = &palive->umac_data;
128 	lmac1 = &palive->lmac_data[0];
129 	lmac2 = &palive->lmac_data[1];
130 	status = le16_to_cpu(palive->status);
131 
132 	trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
133 	trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
134 	trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
135 
136 	IWL_DEBUG_FW(mld, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
137 		     trans->sku_id[0], trans->sku_id[1], trans->sku_id[2]);
138 
139 	lmac_error_event_table =
140 		le32_to_cpu(lmac1->dbg_ptrs.error_event_table_ptr);
141 	iwl_fw_lmac1_set_alive_err_table(trans, lmac_error_event_table);
142 
143 	if (lmac2)
144 		trans->dbg.lmac_error_event_table[1] =
145 			le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
146 
147 	umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr) &
148 		~FW_ADDR_CACHE_CONTROL;
149 
150 	if (umac_error_table >= trans->cfg->min_umac_error_event_table)
151 		iwl_fw_umac_set_alive_err_table(trans, umac_error_table);
152 	else
153 		IWL_ERR(mld, "Not valid error log pointer 0x%08X\n",
154 			umac_error_table);
155 
156 	*alive_valid = status == IWL_ALIVE_STATUS_OK;
157 
158 	IWL_DEBUG_FW(mld,
159 		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
160 		     status, lmac1->ver_type, lmac1->ver_subtype);
161 
162 	if (lmac2)
163 		IWL_DEBUG_FW(mld, "Alive ucode CDB\n");
164 
165 	IWL_DEBUG_FW(mld,
166 		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
167 		     le32_to_cpu(umac->umac_major),
168 		     le32_to_cpu(umac->umac_minor));
169 
170 	if (version >= 7)
171 		IWL_DEBUG_FW(mld, "FW alive flags 0x%x\n",
172 			     le16_to_cpu(palive->flags));
173 
174 	iwl_fwrt_update_fw_versions(&mld->fwrt, lmac1, umac);
175 
176 	return true;
177 }
178 
179 #define MLD_ALIVE_TIMEOUT		(2 * HZ)
180 #define MLD_INIT_COMPLETE_TIMEOUT	(2 * HZ)
181 
iwl_mld_print_alive_notif_timeout(struct iwl_mld * mld)182 static void iwl_mld_print_alive_notif_timeout(struct iwl_mld *mld)
183 {
184 	struct iwl_trans *trans = mld->trans;
185 	struct iwl_pc_data *pc_data;
186 	u8 count;
187 
188 	IWL_ERR(mld,
189 		"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
190 		iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
191 		iwl_read_umac_prph(trans,
192 				   UMAG_SB_CPU_2_STATUS));
193 #define IWL_FW_PRINT_REG_INFO(reg_name) \
194 	IWL_ERR(mld, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
195 
196 	IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION);
197 
198 	IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE);
199 
200 	/* print OTP info */
201 	IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR);
202 	IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA);
203 #undef IWL_FW_PRINT_REG_INFO
204 
205 	pc_data = trans->dbg.pc_data;
206 	for (count = 0; count < trans->dbg.num_pc; count++, pc_data++)
207 		IWL_ERR(mld, "%s: 0x%x\n", pc_data->pc_name,
208 			pc_data->pc_address);
209 }
210 
iwl_mld_load_fw_wait_alive(struct iwl_mld * mld)211 static int iwl_mld_load_fw_wait_alive(struct iwl_mld *mld)
212 {
213 	const struct fw_img *fw =
214 		iwl_get_ucode_image(mld->fw, IWL_UCODE_REGULAR);
215 	static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
216 	struct iwl_notification_wait alive_wait;
217 	bool alive_valid = false;
218 	int ret;
219 
220 	lockdep_assert_wiphy(mld->wiphy);
221 
222 	iwl_init_notification_wait(&mld->notif_wait, &alive_wait,
223 				   alive_cmd, ARRAY_SIZE(alive_cmd),
224 				   iwl_alive_fn, &alive_valid);
225 
226 	iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_EARLY, NULL);
227 
228 	ret = iwl_trans_start_fw(mld->trans, fw, true);
229 	if (ret) {
230 		iwl_remove_notification(&mld->notif_wait, &alive_wait);
231 		return ret;
232 	}
233 
234 	ret = iwl_wait_notification(&mld->notif_wait, &alive_wait,
235 				    MLD_ALIVE_TIMEOUT);
236 
237 	if (ret) {
238 		if (ret == -ETIMEDOUT)
239 			iwl_fw_dbg_error_collect(&mld->fwrt,
240 						 FW_DBG_TRIGGER_ALIVE_TIMEOUT);
241 		iwl_mld_print_alive_notif_timeout(mld);
242 		goto alive_failure;
243 	}
244 
245 	if (!alive_valid) {
246 		IWL_ERR(mld, "Loaded firmware is not valid!\n");
247 		ret = -EIO;
248 		goto alive_failure;
249 	}
250 
251 	iwl_trans_fw_alive(mld->trans, 0);
252 
253 	return 0;
254 
255 alive_failure:
256 	iwl_trans_stop_device(mld->trans);
257 	return ret;
258 }
259 
iwl_mld_run_fw_init_sequence(struct iwl_mld * mld)260 static int iwl_mld_run_fw_init_sequence(struct iwl_mld *mld)
261 {
262 	struct iwl_notification_wait init_wait;
263 	struct iwl_init_extended_cfg_cmd init_cfg = {};
264 	static const u16 init_complete[] = {
265 		INIT_COMPLETE_NOTIF,
266 	};
267 	int ret;
268 
269 	lockdep_assert_wiphy(mld->wiphy);
270 
271 	ret = iwl_mld_load_fw_wait_alive(mld);
272 	if (ret)
273 		return ret;
274 
275 	mld->trans->step_urm =
276 		!!(iwl_read_umac_prph(mld->trans, CNVI_PMU_STEP_FLOW) &
277 		   CNVI_PMU_STEP_FLOW_FORCE_URM);
278 
279 	ret = iwl_pnvm_load(mld->trans, &mld->notif_wait,
280 			    &mld->fw->ucode_capa);
281 	if (ret) {
282 		IWL_ERR(mld, "Timeout waiting for PNVM load %d\n", ret);
283 		goto init_failure;
284 	}
285 
286 	iwl_dbg_tlv_time_point(&mld->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
287 			       NULL);
288 
289 	iwl_init_notification_wait(&mld->notif_wait,
290 				   &init_wait,
291 				   init_complete,
292 				   ARRAY_SIZE(init_complete),
293 				   NULL, NULL);
294 
295 	ret = iwl_mld_send_cmd_pdu(mld,
296 				   WIDE_ID(SYSTEM_GROUP, INIT_EXTENDED_CFG_CMD),
297 				   &init_cfg);
298 	if (ret) {
299 		IWL_ERR(mld, "Failed to send init config command: %d\n", ret);
300 		iwl_remove_notification(&mld->notif_wait, &init_wait);
301 		goto init_failure;
302 	}
303 
304 	ret = iwl_wait_notification(&mld->notif_wait, &init_wait,
305 				    MLD_INIT_COMPLETE_TIMEOUT);
306 	if (ret) {
307 		IWL_ERR(mld, "Failed to get INIT_COMPLETE %d\n", ret);
308 		goto init_failure;
309 	}
310 
311 	if (!mld->nvm_data) {
312 		mld->nvm_data = iwl_get_nvm(mld->trans, mld->fw, 0, 0);
313 		if (IS_ERR(mld->nvm_data)) {
314 			ret = PTR_ERR(mld->nvm_data);
315 			mld->nvm_data = NULL;
316 			IWL_ERR(mld, "Failed to read NVM: %d\n", ret);
317 			goto init_failure;
318 		}
319 	}
320 
321 	return 0;
322 
323 init_failure:
324 	iwl_trans_stop_device(mld->trans);
325 	return ret;
326 }
327 
iwl_mld_load_fw(struct iwl_mld * mld)328 int iwl_mld_load_fw(struct iwl_mld *mld)
329 {
330 	int ret;
331 
332 	lockdep_assert_wiphy(mld->wiphy);
333 
334 	ret = iwl_trans_start_hw(mld->trans);
335 	if (ret)
336 		goto err;
337 
338 	ret = iwl_mld_run_fw_init_sequence(mld);
339 	if (ret)
340 		goto err;
341 
342 	ret = iwl_mld_init_mcc(mld);
343 	if (ret)
344 		goto err;
345 
346 	mld->fw_status.running = true;
347 
348 	return 0;
349 err:
350 	iwl_mld_stop_fw(mld);
351 	return ret;
352 }
353 
iwl_mld_stop_fw(struct iwl_mld * mld)354 void iwl_mld_stop_fw(struct iwl_mld *mld)
355 {
356 	lockdep_assert_wiphy(mld->wiphy);
357 
358 	iwl_abort_notification_waits(&mld->notif_wait);
359 
360 	iwl_fw_dbg_stop_sync(&mld->fwrt);
361 
362 	iwl_trans_stop_device(mld->trans);
363 
364 	wiphy_work_cancel(mld->wiphy, &mld->async_handlers_wk);
365 
366 	iwl_mld_purge_async_handlers_list(mld);
367 
368 	mld->fw_status.running = false;
369 }
370 
iwl_mld_restart_disconnect_iter(void * data,u8 * mac,struct ieee80211_vif * vif)371 static void iwl_mld_restart_disconnect_iter(void *data, u8 *mac,
372 					    struct ieee80211_vif *vif)
373 {
374 	if (vif->type == NL80211_IFTYPE_STATION)
375 		ieee80211_hw_restart_disconnect(vif);
376 }
377 
iwl_mld_send_recovery_cmd(struct iwl_mld * mld,u32 flags)378 void iwl_mld_send_recovery_cmd(struct iwl_mld *mld, u32 flags)
379 {
380 	u32 error_log_size = mld->fw->ucode_capa.error_log_size;
381 	struct iwl_fw_error_recovery_cmd recovery_cmd = {
382 		.flags = cpu_to_le32(flags),
383 	};
384 	struct iwl_host_cmd cmd = {
385 		.id = WIDE_ID(SYSTEM_GROUP, FW_ERROR_RECOVERY_CMD),
386 		.flags = CMD_WANT_SKB,
387 		.data = {&recovery_cmd, },
388 		.len = {sizeof(recovery_cmd), },
389 	};
390 	int ret;
391 
392 	/* no error log was defined in TLV */
393 	if (!error_log_size)
394 		return;
395 
396 	if (flags & ERROR_RECOVERY_UPDATE_DB) {
397 		/* no buf was allocated upon NIC error */
398 		if (!mld->error_recovery_buf)
399 			return;
400 
401 		cmd.data[1] = mld->error_recovery_buf;
402 		cmd.len[1] =  error_log_size;
403 		cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
404 		recovery_cmd.buf_size = cpu_to_le32(error_log_size);
405 	}
406 
407 	ret = iwl_mld_send_cmd(mld, &cmd);
408 
409 	/* we no longer need the recovery buffer */
410 	kfree(mld->error_recovery_buf);
411 	mld->error_recovery_buf = NULL;
412 
413 	if (ret) {
414 		IWL_ERR(mld, "Failed to send recovery cmd %d\n", ret);
415 		return;
416 	}
417 
418 	if (flags & ERROR_RECOVERY_UPDATE_DB) {
419 		struct iwl_rx_packet *pkt = cmd.resp_pkt;
420 		u32 pkt_len = iwl_rx_packet_payload_len(pkt);
421 		u32 resp;
422 
423 		if (IWL_FW_CHECK(mld, pkt_len != sizeof(resp),
424 				 "Unexpected recovery cmd response size %u (expected %zu)\n",
425 				 pkt_len, sizeof(resp)))
426 			goto out;
427 
428 		resp = le32_to_cpup((__le32 *)cmd.resp_pkt->data);
429 		if (!resp)
430 			goto out;
431 
432 		IWL_ERR(mld,
433 			"Failed to send recovery cmd blob was invalid %d\n",
434 			resp);
435 
436 		ieee80211_iterate_interfaces(mld->hw, 0,
437 					     iwl_mld_restart_disconnect_iter,
438 					     NULL);
439 	}
440 
441 out:
442 	iwl_free_resp(&cmd);
443 }
444 
iwl_mld_config_fw(struct iwl_mld * mld)445 static int iwl_mld_config_fw(struct iwl_mld *mld)
446 {
447 	int ret;
448 
449 	lockdep_assert_wiphy(mld->wiphy);
450 
451 	iwl_fw_disable_dbg_asserts(&mld->fwrt);
452 	iwl_get_shared_mem_conf(&mld->fwrt);
453 
454 	ret = iwl_mld_send_tx_ant_cfg(mld);
455 	if (ret)
456 		return ret;
457 
458 	ret = iwl_mld_send_bt_init_conf(mld);
459 	if (ret)
460 		return ret;
461 
462 	ret = iwl_set_soc_latency(&mld->fwrt);
463 	if (ret)
464 		return ret;
465 
466 	iwl_mld_configure_lari(mld);
467 
468 	ret = iwl_mld_config_temp_report_ths(mld);
469 	if (ret)
470 		return ret;
471 
472 #ifdef CONFIG_THERMAL
473 	ret = iwl_mld_config_ctdp(mld, mld->cooling_dev.cur_state,
474 				  CTDP_CMD_OPERATION_START);
475 	if (ret)
476 		return ret;
477 #endif
478 
479 	ret = iwl_configure_rxq(&mld->fwrt);
480 	if (ret)
481 		return ret;
482 
483 	ret = iwl_mld_send_rss_cfg_cmd(mld);
484 	if (ret)
485 		return ret;
486 
487 	ret = iwl_mld_config_scan(mld);
488 	if (ret)
489 		return ret;
490 
491 	ret = iwl_mld_update_device_power(mld, false);
492 	if (ret)
493 		return ret;
494 
495 	if (mld->fw_status.in_hw_restart) {
496 		iwl_mld_send_recovery_cmd(mld, ERROR_RECOVERY_UPDATE_DB);
497 		iwl_mld_time_sync_fw_config(mld);
498 	}
499 
500 	iwl_mld_led_config_fw(mld);
501 
502 	ret = iwl_mld_init_ppag(mld);
503 	if (ret)
504 		return ret;
505 
506 	ret = iwl_mld_init_sar(mld);
507 	if (ret)
508 		return ret;
509 
510 	ret = iwl_mld_init_sgom(mld);
511 	if (ret)
512 		return ret;
513 
514 	iwl_mld_init_tas(mld);
515 	iwl_mld_init_uats(mld);
516 
517 	return 0;
518 }
519 
iwl_mld_start_fw(struct iwl_mld * mld)520 int iwl_mld_start_fw(struct iwl_mld *mld)
521 {
522 	int ret;
523 
524 	lockdep_assert_wiphy(mld->wiphy);
525 
526 	ret = iwl_mld_load_fw(mld);
527 	if (IWL_FW_CHECK(mld, ret, "Failed to start firmware %d\n", ret)) {
528 		iwl_fw_dbg_error_collect(&mld->fwrt, FW_DBG_TRIGGER_DRIVER);
529 		goto error;
530 	}
531 
532 	IWL_DEBUG_INFO(mld, "uCode started.\n");
533 
534 	ret = iwl_mld_config_fw(mld);
535 	if (ret)
536 		goto error;
537 
538 	return 0;
539 
540 error:
541 	iwl_mld_stop_fw(mld);
542 	return ret;
543 }
544