xref: /linux/drivers/net/wireless/ath/ath12k/core.c (revision b803c4a4f78834b31ebfbbcea350473333760559)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include "ahb.h"
14 #include "core.h"
15 #include "dp_tx.h"
16 #include "dp_rx.h"
17 #include "debug.h"
18 #include "debugfs.h"
19 #include "fw.h"
20 #include "hif.h"
21 #include "pci.h"
22 #include "wow.h"
23 
24 static int ahb_err, pci_err;
25 unsigned int ath12k_debug_mask;
26 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
27 MODULE_PARM_DESC(debug_mask, "Debugging mask");
28 
29 bool ath12k_ftm_mode;
30 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
31 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
32 
33 /* protected with ath12k_hw_group_mutex */
34 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
35 
36 static DEFINE_MUTEX(ath12k_hw_group_mutex);
37 
38 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
39 {
40 	struct ath12k *ar;
41 	int ret = 0, i;
42 
43 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
44 		return 0;
45 
46 	if (ath12k_acpi_get_disable_rfkill(ab))
47 		return 0;
48 
49 	for (i = 0; i < ab->num_radios; i++) {
50 		ar = ab->pdevs[i].ar;
51 
52 		ret = ath12k_mac_rfkill_config(ar);
53 		if (ret && ret != -EOPNOTSUPP) {
54 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
55 			return ret;
56 		}
57 	}
58 
59 	return ret;
60 }
61 
62 /* Check if we need to continue with suspend/resume operation.
63  * Return:
64  *	a negative value: error happens and don't continue.
65  *	0:  no error but don't continue.
66  *	positive value: no error and do continue.
67  */
68 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
69 {
70 	struct ath12k *ar;
71 
72 	if (!ab->hw_params->supports_suspend)
73 		return -EOPNOTSUPP;
74 
75 	/* so far single_pdev_only chips have supports_suspend as true
76 	 * so pass 0 as a dummy pdev_id here.
77 	 */
78 	ar = ab->pdevs[0].ar;
79 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
80 		return 0;
81 
82 	return 1;
83 }
84 
85 int ath12k_core_suspend(struct ath12k_base *ab)
86 {
87 	struct ath12k *ar;
88 	int ret, i;
89 
90 	ret = ath12k_core_continue_suspend_resume(ab);
91 	if (ret <= 0)
92 		return ret;
93 
94 	for (i = 0; i < ab->num_radios; i++) {
95 		ar = ab->pdevs[i].ar;
96 		if (!ar)
97 			continue;
98 
99 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
100 
101 		ret = ath12k_mac_wait_tx_complete(ar);
102 		if (ret) {
103 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
104 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
105 			return ret;
106 		}
107 
108 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
109 	}
110 
111 	/* PM framework skips suspend_late/resume_early callbacks
112 	 * if other devices report errors in their suspend callbacks.
113 	 * However ath12k_core_resume() would still be called because
114 	 * here we return success thus kernel put us on dpm_suspended_list.
115 	 * Since we won't go through a power down/up cycle, there is
116 	 * no chance to call complete(&ab->restart_completed) in
117 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
118 	 * So call it here to avoid this issue. This also works in case
119 	 * no error happens thus suspend_late/resume_early get called,
120 	 * because it will be reinitialized in ath12k_core_resume_early().
121 	 */
122 	complete(&ab->restart_completed);
123 
124 	return 0;
125 }
126 EXPORT_SYMBOL(ath12k_core_suspend);
127 
128 int ath12k_core_suspend_late(struct ath12k_base *ab)
129 {
130 	int ret;
131 
132 	ret = ath12k_core_continue_suspend_resume(ab);
133 	if (ret <= 0)
134 		return ret;
135 
136 	ath12k_acpi_stop(ab);
137 
138 	ath12k_hif_irq_disable(ab);
139 	ath12k_hif_ce_irq_disable(ab);
140 
141 	ath12k_hif_power_down(ab, true);
142 
143 	return 0;
144 }
145 EXPORT_SYMBOL(ath12k_core_suspend_late);
146 
147 int ath12k_core_resume_early(struct ath12k_base *ab)
148 {
149 	int ret;
150 
151 	ret = ath12k_core_continue_suspend_resume(ab);
152 	if (ret <= 0)
153 		return ret;
154 
155 	reinit_completion(&ab->restart_completed);
156 	ret = ath12k_hif_power_up(ab);
157 	if (ret)
158 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
159 
160 	return ret;
161 }
162 EXPORT_SYMBOL(ath12k_core_resume_early);
163 
164 int ath12k_core_resume(struct ath12k_base *ab)
165 {
166 	long time_left;
167 	int ret;
168 
169 	ret = ath12k_core_continue_suspend_resume(ab);
170 	if (ret <= 0)
171 		return ret;
172 
173 	time_left = wait_for_completion_timeout(&ab->restart_completed,
174 						ATH12K_RESET_TIMEOUT_HZ);
175 	if (time_left == 0) {
176 		ath12k_warn(ab, "timeout while waiting for restart complete");
177 		return -ETIMEDOUT;
178 	}
179 
180 	return 0;
181 }
182 EXPORT_SYMBOL(ath12k_core_resume);
183 
184 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
185 					   size_t name_len, bool with_variant,
186 					   bool bus_type_mode, bool with_default)
187 {
188 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
189 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
190 
191 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
192 		scnprintf(variant, sizeof(variant), ",variant=%s",
193 			  ab->qmi.target.bdf_ext);
194 
195 	switch (ab->id.bdf_search) {
196 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
197 		if (bus_type_mode)
198 			scnprintf(name, name_len,
199 				  "bus=%s",
200 				  ath12k_bus_str(ab->hif.bus));
201 		else
202 			scnprintf(name, name_len,
203 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
204 				  ath12k_bus_str(ab->hif.bus),
205 				  ab->id.vendor, ab->id.device,
206 				  ab->id.subsystem_vendor,
207 				  ab->id.subsystem_device,
208 				  ab->qmi.target.chip_id,
209 				  ab->qmi.target.board_id,
210 				  variant);
211 		break;
212 	default:
213 		scnprintf(name, name_len,
214 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
215 			  ath12k_bus_str(ab->hif.bus),
216 			  ab->qmi.target.chip_id,
217 			  with_default ?
218 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
219 			  variant);
220 		break;
221 	}
222 
223 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
224 
225 	return 0;
226 }
227 
228 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
229 					 size_t name_len)
230 {
231 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
232 }
233 
234 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
235 						  size_t name_len)
236 {
237 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
238 }
239 
240 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
241 						  size_t name_len)
242 {
243 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
244 }
245 
246 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
247 						    const char *file)
248 {
249 	const struct firmware *fw;
250 	char path[100];
251 	int ret;
252 
253 	if (!file)
254 		return ERR_PTR(-ENOENT);
255 
256 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
257 
258 	ret = firmware_request_nowarn(&fw, path, ab->dev);
259 	if (ret)
260 		return ERR_PTR(ret);
261 
262 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
263 		   path, fw->size);
264 
265 	return fw;
266 }
267 
268 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
269 {
270 	if (!IS_ERR(bd->fw))
271 		release_firmware(bd->fw);
272 
273 	memset(bd, 0, sizeof(*bd));
274 }
275 
276 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
277 					 struct ath12k_board_data *bd,
278 					 const void *buf, size_t buf_len,
279 					 const char *boardname,
280 					 int ie_id,
281 					 int name_id,
282 					 int data_id)
283 {
284 	const struct ath12k_fw_ie *hdr;
285 	bool name_match_found;
286 	int ret, board_ie_id;
287 	size_t board_ie_len;
288 	const void *board_ie_data;
289 
290 	name_match_found = false;
291 
292 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
293 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
294 		hdr = buf;
295 		board_ie_id = le32_to_cpu(hdr->id);
296 		board_ie_len = le32_to_cpu(hdr->len);
297 		board_ie_data = hdr->data;
298 
299 		buf_len -= sizeof(*hdr);
300 		buf += sizeof(*hdr);
301 
302 		if (buf_len < ALIGN(board_ie_len, 4)) {
303 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
304 				   ath12k_bd_ie_type_str(ie_id),
305 				   buf_len, ALIGN(board_ie_len, 4));
306 			ret = -EINVAL;
307 			goto out;
308 		}
309 
310 		if (board_ie_id == name_id) {
311 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
312 					board_ie_data, board_ie_len);
313 
314 			if (board_ie_len != strlen(boardname))
315 				goto next;
316 
317 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
318 			if (ret)
319 				goto next;
320 
321 			name_match_found = true;
322 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
323 				   "boot found match %s for name '%s'",
324 				   ath12k_bd_ie_type_str(ie_id),
325 				   boardname);
326 		} else if (board_ie_id == data_id) {
327 			if (!name_match_found)
328 				/* no match found */
329 				goto next;
330 
331 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
332 				   "boot found %s for '%s'",
333 				   ath12k_bd_ie_type_str(ie_id),
334 				   boardname);
335 
336 			bd->data = board_ie_data;
337 			bd->len = board_ie_len;
338 
339 			ret = 0;
340 			goto out;
341 		} else {
342 			ath12k_warn(ab, "unknown %s id found: %d\n",
343 				    ath12k_bd_ie_type_str(ie_id),
344 				    board_ie_id);
345 		}
346 next:
347 		/* jump over the padding */
348 		board_ie_len = ALIGN(board_ie_len, 4);
349 
350 		buf_len -= board_ie_len;
351 		buf += board_ie_len;
352 	}
353 
354 	/* no match found */
355 	ret = -ENOENT;
356 
357 out:
358 	return ret;
359 }
360 
361 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
362 					      struct ath12k_board_data *bd,
363 					      const char *boardname,
364 					      int ie_id_match,
365 					      int name_id,
366 					      int data_id)
367 {
368 	size_t len, magic_len;
369 	const u8 *data;
370 	char *filename, filepath[100];
371 	size_t ie_len;
372 	struct ath12k_fw_ie *hdr;
373 	int ret, ie_id;
374 
375 	filename = ATH12K_BOARD_API2_FILE;
376 
377 	if (!bd->fw)
378 		bd->fw = ath12k_core_firmware_request(ab, filename);
379 
380 	if (IS_ERR(bd->fw))
381 		return PTR_ERR(bd->fw);
382 
383 	data = bd->fw->data;
384 	len = bd->fw->size;
385 
386 	ath12k_core_create_firmware_path(ab, filename,
387 					 filepath, sizeof(filepath));
388 
389 	/* magic has extra null byte padded */
390 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
391 	if (len < magic_len) {
392 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
393 			   filepath, len);
394 		ret = -EINVAL;
395 		goto err;
396 	}
397 
398 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
399 		ath12k_err(ab, "found invalid board magic\n");
400 		ret = -EINVAL;
401 		goto err;
402 	}
403 
404 	/* magic is padded to 4 bytes */
405 	magic_len = ALIGN(magic_len, 4);
406 	if (len < magic_len) {
407 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
408 			   filepath, len);
409 		ret = -EINVAL;
410 		goto err;
411 	}
412 
413 	data += magic_len;
414 	len -= magic_len;
415 
416 	while (len > sizeof(struct ath12k_fw_ie)) {
417 		hdr = (struct ath12k_fw_ie *)data;
418 		ie_id = le32_to_cpu(hdr->id);
419 		ie_len = le32_to_cpu(hdr->len);
420 
421 		len -= sizeof(*hdr);
422 		data = hdr->data;
423 
424 		if (len < ALIGN(ie_len, 4)) {
425 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
426 				   ie_id, ie_len, len);
427 			ret = -EINVAL;
428 			goto err;
429 		}
430 
431 		if (ie_id == ie_id_match) {
432 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
433 							    ie_len,
434 							    boardname,
435 							    ie_id_match,
436 							    name_id,
437 							    data_id);
438 			if (ret == -ENOENT)
439 				/* no match found, continue */
440 				goto next;
441 			else if (ret)
442 				/* there was an error, bail out */
443 				goto err;
444 			/* either found or error, so stop searching */
445 			goto out;
446 		}
447 next:
448 		/* jump over the padding */
449 		ie_len = ALIGN(ie_len, 4);
450 
451 		len -= ie_len;
452 		data += ie_len;
453 	}
454 
455 out:
456 	if (!bd->data || !bd->len) {
457 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
458 			   "failed to fetch %s for %s from %s\n",
459 			   ath12k_bd_ie_type_str(ie_id_match),
460 			   boardname, filepath);
461 		ret = -ENODATA;
462 		goto err;
463 	}
464 
465 	return 0;
466 
467 err:
468 	ath12k_core_free_bdf(ab, bd);
469 	return ret;
470 }
471 
472 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
473 				       struct ath12k_board_data *bd,
474 				       char *filename)
475 {
476 	bd->fw = ath12k_core_firmware_request(ab, filename);
477 	if (IS_ERR(bd->fw))
478 		return PTR_ERR(bd->fw);
479 
480 	bd->data = bd->fw->data;
481 	bd->len = bd->fw->size;
482 
483 	return 0;
484 }
485 
486 #define BOARD_NAME_SIZE 200
487 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
488 {
489 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
490 	char *filename, filepath[100];
491 	int bd_api;
492 	int ret;
493 
494 	filename = ATH12K_BOARD_API2_FILE;
495 
496 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
497 	if (ret) {
498 		ath12k_err(ab, "failed to create board name: %d", ret);
499 		return ret;
500 	}
501 
502 	bd_api = 2;
503 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
504 						 ATH12K_BD_IE_BOARD,
505 						 ATH12K_BD_IE_BOARD_NAME,
506 						 ATH12K_BD_IE_BOARD_DATA);
507 	if (!ret)
508 		goto success;
509 
510 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
511 						     sizeof(fallback_boardname));
512 	if (ret) {
513 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
514 		return ret;
515 	}
516 
517 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
518 						 ATH12K_BD_IE_BOARD,
519 						 ATH12K_BD_IE_BOARD_NAME,
520 						 ATH12K_BD_IE_BOARD_DATA);
521 	if (!ret)
522 		goto success;
523 
524 	bd_api = 1;
525 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
526 	if (ret) {
527 		ath12k_core_create_firmware_path(ab, filename,
528 						 filepath, sizeof(filepath));
529 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
530 			   boardname, filepath);
531 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
532 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
533 				   fallback_boardname, filepath);
534 
535 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
536 			   ab->hw_params->fw.dir);
537 		return ret;
538 	}
539 
540 success:
541 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
542 	return 0;
543 }
544 
545 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
546 {
547 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
548 	int ret;
549 
550 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
551 	if (ret) {
552 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
553 			   "failed to create board name for regdb: %d", ret);
554 		goto exit;
555 	}
556 
557 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
558 						 ATH12K_BD_IE_REGDB,
559 						 ATH12K_BD_IE_REGDB_NAME,
560 						 ATH12K_BD_IE_REGDB_DATA);
561 	if (!ret)
562 		goto exit;
563 
564 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
565 						     BOARD_NAME_SIZE);
566 	if (ret) {
567 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
568 			   "failed to create default board name for regdb: %d", ret);
569 		goto exit;
570 	}
571 
572 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
573 						 ATH12K_BD_IE_REGDB,
574 						 ATH12K_BD_IE_REGDB_NAME,
575 						 ATH12K_BD_IE_REGDB_DATA);
576 	if (!ret)
577 		goto exit;
578 
579 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
580 	if (ret)
581 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
582 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
583 
584 exit:
585 	if (!ret)
586 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
587 
588 	return ret;
589 }
590 
591 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
592 {
593 	if (ab->num_radios == 2)
594 		return TARGET_NUM_STATIONS_DBS;
595 	else if (ab->num_radios == 3)
596 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
597 	return TARGET_NUM_STATIONS_SINGLE;
598 }
599 
600 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
601 {
602 	if (ab->num_radios == 2)
603 		return TARGET_NUM_PEERS_PDEV_DBS;
604 	else if (ab->num_radios == 3)
605 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
606 	return TARGET_NUM_PEERS_PDEV_SINGLE;
607 }
608 
609 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
610 {
611 	if (ab->num_radios == 2)
612 		return TARGET_NUM_TIDS(DBS);
613 	else if (ab->num_radios == 3)
614 		return TARGET_NUM_TIDS(DBS_SBS);
615 	return TARGET_NUM_TIDS(SINGLE);
616 }
617 
618 struct reserved_mem *ath12k_core_get_reserved_mem(struct ath12k_base *ab,
619 						  int index)
620 {
621 	struct device *dev = ab->dev;
622 	struct reserved_mem *rmem;
623 	struct device_node *node;
624 
625 	node = of_parse_phandle(dev->of_node, "memory-region", index);
626 	if (!node) {
627 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
628 			   "failed to parse memory-region for index %d\n", index);
629 		return NULL;
630 	}
631 
632 	rmem = of_reserved_mem_lookup(node);
633 	of_node_put(node);
634 	if (!rmem) {
635 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
636 			   "unable to get memory-region for index %d\n", index);
637 		return NULL;
638 	}
639 
640 	return rmem;
641 }
642 
643 static inline
644 void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
645 {
646 	struct ath12k_hw_group *ag = ab->ag;
647 
648 	lockdep_assert_held(&ag->mutex);
649 
650 	if (ab->hw_group_ref) {
651 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
652 			   ag->id);
653 		return;
654 	}
655 
656 	ab->hw_group_ref = true;
657 	ag->num_started++;
658 
659 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
660 		   ag->id, ag->num_started);
661 }
662 
663 static inline
664 void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
665 {
666 	struct ath12k_hw_group *ag = ab->ag;
667 
668 	lockdep_assert_held(&ag->mutex);
669 
670 	if (!ab->hw_group_ref) {
671 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
672 			   ag->id);
673 		return;
674 	}
675 
676 	ab->hw_group_ref = false;
677 	ag->num_started--;
678 
679 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
680 		   ag->id, ag->num_started);
681 }
682 
683 static void ath12k_core_stop(struct ath12k_base *ab)
684 {
685 	ath12k_core_to_group_ref_put(ab);
686 
687 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
688 		ath12k_qmi_firmware_stop(ab);
689 
690 	ath12k_acpi_stop(ab);
691 
692 	ath12k_dp_rx_pdev_reo_cleanup(ab);
693 	ath12k_hif_stop(ab);
694 	ath12k_wmi_detach(ab);
695 	ath12k_dp_free(ab);
696 
697 	/* De-Init of components as needed */
698 }
699 
700 static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
701 {
702 	struct ath12k_base *ab = data;
703 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
704 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
705 	ssize_t copied;
706 	size_t len;
707 	int i;
708 
709 	if (ab->qmi.target.bdf_ext[0] != '\0')
710 		return;
711 
712 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
713 		return;
714 
715 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
716 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
717 			   "wrong smbios bdf ext type length (%d).\n",
718 			   hdr->length);
719 		return;
720 	}
721 
722 	if (!smbios->bdf_enabled) {
723 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
724 		return;
725 	}
726 
727 	/* Only one string exists (per spec) */
728 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
729 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
730 			   "bdf variant magic does not match.\n");
731 		return;
732 	}
733 
734 	len = min_t(size_t,
735 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
736 	for (i = 0; i < len; i++) {
737 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
738 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
739 				   "bdf variant name contains non ascii chars.\n");
740 			return;
741 		}
742 	}
743 
744 	/* Copy extension name without magic prefix */
745 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
746 			 sizeof(ab->qmi.target.bdf_ext));
747 	if (copied < 0) {
748 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
749 			   "bdf variant string is longer than the buffer can accommodate\n");
750 		return;
751 	}
752 
753 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
754 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
755 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
756 }
757 
758 int ath12k_core_check_smbios(struct ath12k_base *ab)
759 {
760 	ab->qmi.target.bdf_ext[0] = '\0';
761 	dmi_walk(ath12k_core_check_bdfext, ab);
762 
763 	if (ab->qmi.target.bdf_ext[0] == '\0')
764 		return -ENODATA;
765 
766 	return 0;
767 }
768 
769 static int ath12k_core_soc_create(struct ath12k_base *ab)
770 {
771 	int ret;
772 
773 	if (ath12k_ftm_mode) {
774 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
775 		ath12k_info(ab, "Booting in ftm mode\n");
776 	}
777 
778 	ret = ath12k_qmi_init_service(ab);
779 	if (ret) {
780 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
781 		return ret;
782 	}
783 
784 	ath12k_debugfs_soc_create(ab);
785 
786 	ret = ath12k_hif_power_up(ab);
787 	if (ret) {
788 		ath12k_err(ab, "failed to power up :%d\n", ret);
789 		goto err_qmi_deinit;
790 	}
791 
792 	return 0;
793 
794 err_qmi_deinit:
795 	ath12k_debugfs_soc_destroy(ab);
796 	ath12k_qmi_deinit_service(ab);
797 	return ret;
798 }
799 
800 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
801 {
802 	ath12k_hif_power_down(ab, false);
803 	ath12k_reg_free(ab);
804 	ath12k_debugfs_soc_destroy(ab);
805 	ath12k_qmi_deinit_service(ab);
806 }
807 
808 static int ath12k_core_pdev_create(struct ath12k_base *ab)
809 {
810 	int ret;
811 
812 	ret = ath12k_dp_pdev_alloc(ab);
813 	if (ret) {
814 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
815 		return ret;
816 	}
817 
818 	return 0;
819 }
820 
821 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
822 {
823 	ath12k_dp_pdev_free(ab);
824 }
825 
826 static int ath12k_core_start(struct ath12k_base *ab)
827 {
828 	int ret;
829 
830 	lockdep_assert_held(&ab->core_lock);
831 
832 	ret = ath12k_wmi_attach(ab);
833 	if (ret) {
834 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
835 		return ret;
836 	}
837 
838 	ret = ath12k_htc_init(ab);
839 	if (ret) {
840 		ath12k_err(ab, "failed to init htc: %d\n", ret);
841 		goto err_wmi_detach;
842 	}
843 
844 	ret = ath12k_hif_start(ab);
845 	if (ret) {
846 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
847 		goto err_wmi_detach;
848 	}
849 
850 	ret = ath12k_htc_wait_target(&ab->htc);
851 	if (ret) {
852 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
853 		goto err_hif_stop;
854 	}
855 
856 	ret = ath12k_dp_htt_connect(&ab->dp);
857 	if (ret) {
858 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
859 		goto err_hif_stop;
860 	}
861 
862 	ret = ath12k_wmi_connect(ab);
863 	if (ret) {
864 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
865 		goto err_hif_stop;
866 	}
867 
868 	ret = ath12k_htc_start(&ab->htc);
869 	if (ret) {
870 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
871 		goto err_hif_stop;
872 	}
873 
874 	ret = ath12k_wmi_wait_for_service_ready(ab);
875 	if (ret) {
876 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
877 			   ret);
878 		goto err_hif_stop;
879 	}
880 
881 	ath12k_dp_cc_config(ab);
882 
883 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
884 	if (ret) {
885 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
886 		goto err_hif_stop;
887 	}
888 
889 	ath12k_dp_hal_rx_desc_init(ab);
890 
891 	ret = ath12k_wmi_cmd_init(ab);
892 	if (ret) {
893 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
894 		goto err_reo_cleanup;
895 	}
896 
897 	ret = ath12k_wmi_wait_for_unified_ready(ab);
898 	if (ret) {
899 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
900 			   ret);
901 		goto err_reo_cleanup;
902 	}
903 
904 	/* put hardware to DBS mode */
905 	if (ab->hw_params->single_pdev_only) {
906 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
907 		if (ret) {
908 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
909 			goto err_reo_cleanup;
910 		}
911 	}
912 
913 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
914 	if (ret) {
915 		ath12k_err(ab, "failed to send htt version request message: %d\n",
916 			   ret);
917 		goto err_reo_cleanup;
918 	}
919 
920 	ath12k_acpi_set_dsm_func(ab);
921 
922 	/* Indicate the core start in the appropriate group */
923 	ath12k_core_to_group_ref_get(ab);
924 
925 	return 0;
926 
927 err_reo_cleanup:
928 	ath12k_dp_rx_pdev_reo_cleanup(ab);
929 err_hif_stop:
930 	ath12k_hif_stop(ab);
931 err_wmi_detach:
932 	ath12k_wmi_detach(ab);
933 	return ret;
934 }
935 
936 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
937 {
938 	mutex_lock(&ab->core_lock);
939 
940 	ath12k_hif_irq_disable(ab);
941 	ath12k_core_pdev_destroy(ab);
942 
943 	mutex_unlock(&ab->core_lock);
944 }
945 
946 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
947 {
948 	struct ath12k_base *ab;
949 	int i;
950 
951 	lockdep_assert_held(&ag->mutex);
952 
953 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
954 
955 	ath12k_mac_unregister(ag);
956 
957 	for (i = ag->num_devices - 1; i >= 0; i--) {
958 		ab = ag->ab[i];
959 		if (!ab)
960 			continue;
961 
962 		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
963 
964 		ath12k_core_device_cleanup(ab);
965 	}
966 
967 	ath12k_mac_destroy(ag);
968 }
969 
970 u8 ath12k_get_num_partner_link(struct ath12k *ar)
971 {
972 	struct ath12k_base *partner_ab, *ab = ar->ab;
973 	struct ath12k_hw_group *ag = ab->ag;
974 	struct ath12k_pdev *pdev;
975 	u8 num_link = 0;
976 	int i, j;
977 
978 	lockdep_assert_held(&ag->mutex);
979 
980 	for (i = 0; i < ag->num_devices; i++) {
981 		partner_ab = ag->ab[i];
982 
983 		for (j = 0; j < partner_ab->num_radios; j++) {
984 			pdev = &partner_ab->pdevs[j];
985 
986 			/* Avoid the self link */
987 			if (ar == pdev->ar)
988 				continue;
989 
990 			num_link++;
991 		}
992 	}
993 
994 	return num_link;
995 }
996 
997 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
998 {
999 	u8 num_link = ath12k_get_num_partner_link(ar);
1000 	int ret;
1001 
1002 	if (num_link == 0)
1003 		return 0;
1004 
1005 	ret = ath12k_wmi_mlo_ready(ar);
1006 	if (ret) {
1007 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
1008 			   ar->pdev_idx, ret);
1009 		return ret;
1010 	}
1011 
1012 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
1013 		   ar->pdev_idx);
1014 
1015 	return 0;
1016 }
1017 
1018 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
1019 {
1020 	struct ath12k_hw *ah;
1021 	struct ath12k *ar;
1022 	int ret;
1023 	int i, j;
1024 
1025 	for (i = 0; i < ag->num_hw; i++) {
1026 		ah = ag->ah[i];
1027 		if (!ah)
1028 			continue;
1029 
1030 		for_each_ar(ah, ar, j) {
1031 			ar = &ah->radio[j];
1032 			ret = __ath12k_mac_mlo_ready(ar);
1033 			if (ret)
1034 				return ret;
1035 		}
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
1042 {
1043 	int ret, i;
1044 
1045 	if (!ag->mlo_capable)
1046 		return 0;
1047 
1048 	ret = ath12k_mac_mlo_setup(ag);
1049 	if (ret)
1050 		return ret;
1051 
1052 	for (i = 0; i < ag->num_devices; i++)
1053 		ath12k_dp_partner_cc_init(ag->ab[i]);
1054 
1055 	ret = ath12k_mac_mlo_ready(ag);
1056 	if (ret)
1057 		goto err_mlo_teardown;
1058 
1059 	return 0;
1060 
1061 err_mlo_teardown:
1062 	ath12k_mac_mlo_teardown(ag);
1063 
1064 	return ret;
1065 }
1066 
1067 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
1068 {
1069 	struct ath12k_base *ab;
1070 	int ret, i;
1071 
1072 	lockdep_assert_held(&ag->mutex);
1073 
1074 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1075 		goto core_pdev_create;
1076 
1077 	ret = ath12k_mac_allocate(ag);
1078 	if (WARN_ON(ret))
1079 		return ret;
1080 
1081 	ret = ath12k_core_mlo_setup(ag);
1082 	if (WARN_ON(ret))
1083 		goto err_mac_destroy;
1084 
1085 	ret = ath12k_mac_register(ag);
1086 	if (WARN_ON(ret))
1087 		goto err_mlo_teardown;
1088 
1089 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1090 
1091 core_pdev_create:
1092 	for (i = 0; i < ag->num_devices; i++) {
1093 		ab = ag->ab[i];
1094 		if (!ab)
1095 			continue;
1096 
1097 		mutex_lock(&ab->core_lock);
1098 
1099 		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
1100 
1101 		ret = ath12k_core_pdev_create(ab);
1102 		if (ret) {
1103 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1104 			mutex_unlock(&ab->core_lock);
1105 			goto err;
1106 		}
1107 
1108 		ath12k_hif_irq_enable(ab);
1109 
1110 		ret = ath12k_core_rfkill_config(ab);
1111 		if (ret && ret != -EOPNOTSUPP) {
1112 			mutex_unlock(&ab->core_lock);
1113 			goto err;
1114 		}
1115 
1116 		mutex_unlock(&ab->core_lock);
1117 	}
1118 
1119 	return 0;
1120 
1121 err:
1122 	ath12k_core_hw_group_stop(ag);
1123 	return ret;
1124 
1125 err_mlo_teardown:
1126 	ath12k_mac_mlo_teardown(ag);
1127 
1128 err_mac_destroy:
1129 	ath12k_mac_destroy(ag);
1130 
1131 	return ret;
1132 }
1133 
1134 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1135 				      enum ath12k_firmware_mode mode)
1136 {
1137 	int ret;
1138 
1139 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1140 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1141 
1142 	ret = ath12k_qmi_firmware_start(ab, mode);
1143 	if (ret) {
1144 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1145 		return ret;
1146 	}
1147 
1148 	return ret;
1149 }
1150 
1151 static inline
1152 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1153 {
1154 	lockdep_assert_held(&ag->mutex);
1155 
1156 	return (ag->num_started == ag->num_devices);
1157 }
1158 
1159 static void ath12k_fw_stats_pdevs_free(struct list_head *head)
1160 {
1161 	struct ath12k_fw_stats_pdev *i, *tmp;
1162 
1163 	list_for_each_entry_safe(i, tmp, head, list) {
1164 		list_del(&i->list);
1165 		kfree(i);
1166 	}
1167 }
1168 
1169 void ath12k_fw_stats_bcn_free(struct list_head *head)
1170 {
1171 	struct ath12k_fw_stats_bcn *i, *tmp;
1172 
1173 	list_for_each_entry_safe(i, tmp, head, list) {
1174 		list_del(&i->list);
1175 		kfree(i);
1176 	}
1177 }
1178 
1179 static void ath12k_fw_stats_vdevs_free(struct list_head *head)
1180 {
1181 	struct ath12k_fw_stats_vdev *i, *tmp;
1182 
1183 	list_for_each_entry_safe(i, tmp, head, list) {
1184 		list_del(&i->list);
1185 		kfree(i);
1186 	}
1187 }
1188 
1189 void ath12k_fw_stats_init(struct ath12k *ar)
1190 {
1191 	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
1192 	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
1193 	INIT_LIST_HEAD(&ar->fw_stats.bcn);
1194 	init_completion(&ar->fw_stats_complete);
1195 }
1196 
1197 void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
1198 {
1199 	ath12k_fw_stats_pdevs_free(&stats->pdevs);
1200 	ath12k_fw_stats_vdevs_free(&stats->vdevs);
1201 	ath12k_fw_stats_bcn_free(&stats->bcn);
1202 }
1203 
1204 void ath12k_fw_stats_reset(struct ath12k *ar)
1205 {
1206 	spin_lock_bh(&ar->data_lock);
1207 	ar->fw_stats.fw_stats_done = false;
1208 	ath12k_fw_stats_free(&ar->fw_stats);
1209 	spin_unlock_bh(&ar->data_lock);
1210 }
1211 
1212 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1213 {
1214 	struct ath12k_hw_group *ag = ab->ag;
1215 	struct ath12k_base *partner_ab;
1216 	bool found = false;
1217 	int i;
1218 
1219 	for (i = 0; i < ag->num_devices; i++) {
1220 		partner_ab = ag->ab[i];
1221 		if (!partner_ab)
1222 			continue;
1223 
1224 		if (found)
1225 			ath12k_qmi_trigger_host_cap(partner_ab);
1226 
1227 		found = (partner_ab == ab);
1228 	}
1229 }
1230 
1231 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1232 {
1233 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1234 	int ret, i;
1235 
1236 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1237 	if (ret) {
1238 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1239 		return ret;
1240 	}
1241 
1242 	ret = ath12k_ce_init_pipes(ab);
1243 	if (ret) {
1244 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1245 		goto err_firmware_stop;
1246 	}
1247 
1248 	ret = ath12k_dp_alloc(ab);
1249 	if (ret) {
1250 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1251 		goto err_firmware_stop;
1252 	}
1253 
1254 	mutex_lock(&ag->mutex);
1255 	mutex_lock(&ab->core_lock);
1256 
1257 	ret = ath12k_core_start(ab);
1258 	if (ret) {
1259 		ath12k_err(ab, "failed to start core: %d\n", ret);
1260 		goto err_dp_free;
1261 	}
1262 
1263 	mutex_unlock(&ab->core_lock);
1264 
1265 	if (ath12k_core_hw_group_start_ready(ag)) {
1266 		ret = ath12k_core_hw_group_start(ag);
1267 		if (ret) {
1268 			ath12k_warn(ab, "unable to start hw group\n");
1269 			goto err_core_stop;
1270 		}
1271 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1272 	} else {
1273 		ath12k_core_trigger_partner(ab);
1274 	}
1275 
1276 	mutex_unlock(&ag->mutex);
1277 
1278 	return 0;
1279 
1280 err_core_stop:
1281 	for (i = ag->num_devices - 1; i >= 0; i--) {
1282 		ab = ag->ab[i];
1283 		if (!ab)
1284 			continue;
1285 
1286 		mutex_lock(&ab->core_lock);
1287 		ath12k_core_stop(ab);
1288 		mutex_unlock(&ab->core_lock);
1289 	}
1290 	mutex_unlock(&ag->mutex);
1291 	goto exit;
1292 
1293 err_dp_free:
1294 	ath12k_dp_free(ab);
1295 	mutex_unlock(&ab->core_lock);
1296 	mutex_unlock(&ag->mutex);
1297 
1298 err_firmware_stop:
1299 	ath12k_qmi_firmware_stop(ab);
1300 
1301 exit:
1302 	return ret;
1303 }
1304 
1305 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1306 {
1307 	int ret;
1308 
1309 	mutex_lock(&ab->core_lock);
1310 	ath12k_dp_pdev_free(ab);
1311 	ath12k_ce_cleanup_pipes(ab);
1312 	ath12k_wmi_detach(ab);
1313 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1314 	mutex_unlock(&ab->core_lock);
1315 
1316 	ath12k_dp_free(ab);
1317 	ath12k_hal_srng_deinit(ab);
1318 
1319 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1320 
1321 	ret = ath12k_hal_srng_init(ab);
1322 	if (ret)
1323 		return ret;
1324 
1325 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1326 
1327 	ret = ath12k_core_qmi_firmware_ready(ab);
1328 	if (ret)
1329 		goto err_hal_srng_deinit;
1330 
1331 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1332 
1333 	return 0;
1334 
1335 err_hal_srng_deinit:
1336 	ath12k_hal_srng_deinit(ab);
1337 	return ret;
1338 }
1339 
1340 static void ath12k_rfkill_work(struct work_struct *work)
1341 {
1342 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1343 	struct ath12k_hw_group *ag = ab->ag;
1344 	struct ath12k *ar;
1345 	struct ath12k_hw *ah;
1346 	struct ieee80211_hw *hw;
1347 	bool rfkill_radio_on;
1348 	int i, j;
1349 
1350 	spin_lock_bh(&ab->base_lock);
1351 	rfkill_radio_on = ab->rfkill_radio_on;
1352 	spin_unlock_bh(&ab->base_lock);
1353 
1354 	for (i = 0; i < ag->num_hw; i++) {
1355 		ah = ath12k_ag_to_ah(ag, i);
1356 		if (!ah)
1357 			continue;
1358 
1359 		for (j = 0; j < ah->num_radio; j++) {
1360 			ar = &ah->radio[j];
1361 			if (!ar)
1362 				continue;
1363 
1364 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1365 		}
1366 
1367 		hw = ah->hw;
1368 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1369 	}
1370 }
1371 
1372 void ath12k_core_halt(struct ath12k *ar)
1373 {
1374 	struct list_head *pos, *n;
1375 	struct ath12k_base *ab = ar->ab;
1376 
1377 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1378 
1379 	ar->num_created_vdevs = 0;
1380 	ar->allocated_vdev_map = 0;
1381 
1382 	ath12k_mac_scan_finish(ar);
1383 	ath12k_mac_peer_cleanup_all(ar);
1384 	cancel_delayed_work_sync(&ar->scan.timeout);
1385 	cancel_work_sync(&ar->regd_update_work);
1386 	cancel_work_sync(&ab->rfkill_work);
1387 	cancel_work_sync(&ab->update_11d_work);
1388 
1389 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1390 	synchronize_rcu();
1391 
1392 	spin_lock_bh(&ar->data_lock);
1393 	list_for_each_safe(pos, n, &ar->arvifs)
1394 		list_del_init(pos);
1395 	spin_unlock_bh(&ar->data_lock);
1396 
1397 	idr_init(&ar->txmgmt_idr);
1398 }
1399 
1400 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1401 {
1402 	struct ath12k_hw_group *ag = ab->ag;
1403 	struct ath12k *ar;
1404 	struct ath12k_hw *ah;
1405 	int i, j;
1406 
1407 	spin_lock_bh(&ab->base_lock);
1408 	ab->stats.fw_crash_counter++;
1409 	spin_unlock_bh(&ab->base_lock);
1410 
1411 	if (ab->is_reset)
1412 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1413 
1414 	for (i = 0; i < ag->num_hw; i++) {
1415 		ah = ath12k_ag_to_ah(ag, i);
1416 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1417 		    ah->state == ATH12K_HW_STATE_TM)
1418 			continue;
1419 
1420 		wiphy_lock(ah->hw->wiphy);
1421 
1422 		/* If queue 0 is stopped, it is safe to assume that all
1423 		 * other queues are stopped by driver via
1424 		 * ieee80211_stop_queues() below. This means, there is
1425 		 * no need to stop it again and hence continue
1426 		 */
1427 		if (ieee80211_queue_stopped(ah->hw, 0)) {
1428 			wiphy_unlock(ah->hw->wiphy);
1429 			continue;
1430 		}
1431 
1432 		ieee80211_stop_queues(ah->hw);
1433 
1434 		for (j = 0; j < ah->num_radio; j++) {
1435 			ar = &ah->radio[j];
1436 
1437 			ath12k_mac_drain_tx(ar);
1438 			ar->state_11d = ATH12K_11D_IDLE;
1439 			complete(&ar->completed_11d_scan);
1440 			complete(&ar->scan.started);
1441 			complete_all(&ar->scan.completed);
1442 			complete(&ar->scan.on_channel);
1443 			complete(&ar->peer_assoc_done);
1444 			complete(&ar->peer_delete_done);
1445 			complete(&ar->install_key_done);
1446 			complete(&ar->vdev_setup_done);
1447 			complete(&ar->vdev_delete_done);
1448 			complete(&ar->bss_survey_done);
1449 
1450 			wake_up(&ar->dp.tx_empty_waitq);
1451 			idr_for_each(&ar->txmgmt_idr,
1452 				     ath12k_mac_tx_mgmt_pending_free, ar);
1453 			idr_destroy(&ar->txmgmt_idr);
1454 			wake_up(&ar->txmgmt_empty_waitq);
1455 
1456 			ar->monitor_vdev_id = -1;
1457 			ar->monitor_vdev_created = false;
1458 			ar->monitor_started = false;
1459 		}
1460 
1461 		wiphy_unlock(ah->hw->wiphy);
1462 	}
1463 
1464 	wake_up(&ab->wmi_ab.tx_credits_wq);
1465 	wake_up(&ab->peer_mapping_wq);
1466 }
1467 
1468 static void ath12k_update_11d(struct work_struct *work)
1469 {
1470 	struct ath12k_base *ab = container_of(work, struct ath12k_base, update_11d_work);
1471 	struct ath12k *ar;
1472 	struct ath12k_pdev *pdev;
1473 	struct wmi_set_current_country_arg arg = {};
1474 	int ret, i;
1475 
1476 	spin_lock_bh(&ab->base_lock);
1477 	memcpy(&arg.alpha2, &ab->new_alpha2, 2);
1478 	spin_unlock_bh(&ab->base_lock);
1479 
1480 	ath12k_dbg(ab, ATH12K_DBG_WMI, "update 11d new cc %c%c\n",
1481 		   arg.alpha2[0], arg.alpha2[1]);
1482 
1483 	for (i = 0; i < ab->num_radios; i++) {
1484 		pdev = &ab->pdevs[i];
1485 		ar = pdev->ar;
1486 
1487 		memcpy(&ar->alpha2, &arg.alpha2, 2);
1488 		ret = ath12k_wmi_send_set_current_country_cmd(ar, &arg);
1489 		if (ret)
1490 			ath12k_warn(ar->ab,
1491 				    "pdev id %d failed set current country code: %d\n",
1492 				    i, ret);
1493 	}
1494 }
1495 
1496 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1497 {
1498 	struct ath12k_hw_group *ag = ab->ag;
1499 	struct ath12k_hw *ah;
1500 	struct ath12k *ar;
1501 	int i, j;
1502 
1503 	for (i = 0; i < ag->num_hw; i++) {
1504 		ah = ath12k_ag_to_ah(ag, i);
1505 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1506 			continue;
1507 
1508 		wiphy_lock(ah->hw->wiphy);
1509 		mutex_lock(&ah->hw_mutex);
1510 
1511 		switch (ah->state) {
1512 		case ATH12K_HW_STATE_ON:
1513 			ah->state = ATH12K_HW_STATE_RESTARTING;
1514 
1515 			for (j = 0; j < ah->num_radio; j++) {
1516 				ar = &ah->radio[j];
1517 				ath12k_core_halt(ar);
1518 			}
1519 
1520 			break;
1521 		case ATH12K_HW_STATE_OFF:
1522 			ath12k_warn(ab,
1523 				    "cannot restart hw %d that hasn't been started\n",
1524 				    i);
1525 			break;
1526 		case ATH12K_HW_STATE_RESTARTING:
1527 			break;
1528 		case ATH12K_HW_STATE_RESTARTED:
1529 			ah->state = ATH12K_HW_STATE_WEDGED;
1530 			fallthrough;
1531 		case ATH12K_HW_STATE_WEDGED:
1532 			ath12k_warn(ab,
1533 				    "device is wedged, will not restart hw %d\n", i);
1534 			break;
1535 		case ATH12K_HW_STATE_TM:
1536 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1537 			break;
1538 		}
1539 
1540 		mutex_unlock(&ah->hw_mutex);
1541 		wiphy_unlock(ah->hw->wiphy);
1542 	}
1543 
1544 	complete(&ab->driver_recovery);
1545 }
1546 
1547 static void ath12k_core_restart(struct work_struct *work)
1548 {
1549 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1550 	struct ath12k_hw_group *ag = ab->ag;
1551 	struct ath12k_hw *ah;
1552 	int ret, i;
1553 
1554 	ret = ath12k_core_reconfigure_on_crash(ab);
1555 	if (ret) {
1556 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1557 		return;
1558 	}
1559 
1560 	if (ab->is_reset) {
1561 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1562 			atomic_dec(&ab->reset_count);
1563 			complete(&ab->reset_complete);
1564 			ab->is_reset = false;
1565 			atomic_set(&ab->fail_cont_count, 0);
1566 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1567 		}
1568 
1569 		mutex_lock(&ag->mutex);
1570 
1571 		if (!ath12k_core_hw_group_start_ready(ag)) {
1572 			mutex_unlock(&ag->mutex);
1573 			goto exit_restart;
1574 		}
1575 
1576 		for (i = 0; i < ag->num_hw; i++) {
1577 			ah = ath12k_ag_to_ah(ag, i);
1578 			ieee80211_restart_hw(ah->hw);
1579 		}
1580 
1581 		mutex_unlock(&ag->mutex);
1582 	}
1583 
1584 exit_restart:
1585 	complete(&ab->restart_completed);
1586 }
1587 
1588 static void ath12k_core_reset(struct work_struct *work)
1589 {
1590 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1591 	struct ath12k_hw_group *ag = ab->ag;
1592 	int reset_count, fail_cont_count, i;
1593 	long time_left;
1594 
1595 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1596 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1597 		return;
1598 	}
1599 
1600 	/* Sometimes the recovery will fail and then the next all recovery fail,
1601 	 * this is to avoid infinite recovery since it can not recovery success
1602 	 */
1603 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1604 
1605 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1606 		return;
1607 
1608 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1609 	    time_before(jiffies, ab->reset_fail_timeout))
1610 		return;
1611 
1612 	reset_count = atomic_inc_return(&ab->reset_count);
1613 
1614 	if (reset_count > 1) {
1615 		/* Sometimes it happened another reset worker before the previous one
1616 		 * completed, then the second reset worker will destroy the previous one,
1617 		 * thus below is to avoid that.
1618 		 */
1619 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1620 
1621 		reinit_completion(&ab->reset_complete);
1622 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1623 							ATH12K_RESET_TIMEOUT_HZ);
1624 		if (time_left) {
1625 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1626 			atomic_dec(&ab->reset_count);
1627 			return;
1628 		}
1629 
1630 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1631 		/* Record the continuous recovery fail count when recovery failed*/
1632 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1633 	}
1634 
1635 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1636 
1637 	ab->is_reset = true;
1638 	atomic_set(&ab->recovery_count, 0);
1639 
1640 	ath12k_coredump_collect(ab);
1641 	ath12k_core_pre_reconfigure_recovery(ab);
1642 
1643 	ath12k_core_post_reconfigure_recovery(ab);
1644 
1645 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1646 
1647 	ath12k_hif_irq_disable(ab);
1648 	ath12k_hif_ce_irq_disable(ab);
1649 
1650 	ath12k_hif_power_down(ab, false);
1651 
1652 	/* prepare for power up */
1653 	ab->qmi.num_radios = U8_MAX;
1654 
1655 	mutex_lock(&ag->mutex);
1656 	ath12k_core_to_group_ref_put(ab);
1657 
1658 	if (ag->num_started > 0) {
1659 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
1660 			   "waiting for %d partner device(s) to reset\n",
1661 			   ag->num_started);
1662 		mutex_unlock(&ag->mutex);
1663 		return;
1664 	}
1665 
1666 	/* Prepare MLO global memory region for power up */
1667 	ath12k_qmi_reset_mlo_mem(ag);
1668 
1669 	for (i = 0; i < ag->num_devices; i++) {
1670 		ab = ag->ab[i];
1671 		if (!ab)
1672 			continue;
1673 
1674 		ath12k_hif_power_up(ab);
1675 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1676 	}
1677 
1678 	mutex_unlock(&ag->mutex);
1679 }
1680 
1681 int ath12k_core_pre_init(struct ath12k_base *ab)
1682 {
1683 	int ret;
1684 
1685 	ret = ath12k_hw_init(ab);
1686 	if (ret) {
1687 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1688 		return ret;
1689 	}
1690 
1691 	ath12k_fw_map(ab);
1692 
1693 	return 0;
1694 }
1695 
1696 static int ath12k_core_panic_handler(struct notifier_block *nb,
1697 				     unsigned long action, void *data)
1698 {
1699 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1700 					      panic_nb);
1701 
1702 	return ath12k_hif_panic_handler(ab);
1703 }
1704 
1705 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1706 {
1707 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1708 
1709 	return atomic_notifier_chain_register(&panic_notifier_list,
1710 					      &ab->panic_nb);
1711 }
1712 
1713 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1714 {
1715 	atomic_notifier_chain_unregister(&panic_notifier_list,
1716 					 &ab->panic_nb);
1717 }
1718 
1719 static inline
1720 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1721 {
1722 	lockdep_assert_held(&ag->mutex);
1723 
1724 	return (ag->num_probed == ag->num_devices);
1725 }
1726 
1727 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1728 {
1729 	struct ath12k_hw_group *ag;
1730 	int count = 0;
1731 
1732 	lockdep_assert_held(&ath12k_hw_group_mutex);
1733 
1734 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1735 		count++;
1736 
1737 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1738 	if (!ag)
1739 		return NULL;
1740 
1741 	ag->id = count;
1742 	list_add(&ag->list, &ath12k_hw_group_list);
1743 	mutex_init(&ag->mutex);
1744 	ag->mlo_capable = false;
1745 
1746 	return ag;
1747 }
1748 
1749 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1750 {
1751 	mutex_lock(&ath12k_hw_group_mutex);
1752 
1753 	list_del(&ag->list);
1754 	kfree(ag);
1755 
1756 	mutex_unlock(&ath12k_hw_group_mutex);
1757 }
1758 
1759 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1760 {
1761 	struct ath12k_hw_group *ag;
1762 	int i;
1763 
1764 	if (!ab->dev->of_node)
1765 		return NULL;
1766 
1767 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1768 		for (i = 0; i < ag->num_devices; i++)
1769 			if (ag->wsi_node[i] == ab->dev->of_node)
1770 				return ag;
1771 
1772 	return NULL;
1773 }
1774 
1775 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1776 				    struct ath12k_base *ab)
1777 {
1778 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1779 	struct device_node *tx_endpoint, *next_rx_endpoint;
1780 	int device_count = 0;
1781 
1782 	next_wsi_dev = wsi_dev;
1783 
1784 	if (!next_wsi_dev)
1785 		return -ENODEV;
1786 
1787 	do {
1788 		ag->wsi_node[device_count] = next_wsi_dev;
1789 
1790 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1791 		if (!tx_endpoint) {
1792 			of_node_put(next_wsi_dev);
1793 			return -ENODEV;
1794 		}
1795 
1796 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1797 		if (!next_rx_endpoint) {
1798 			of_node_put(next_wsi_dev);
1799 			of_node_put(tx_endpoint);
1800 			return -ENODEV;
1801 		}
1802 
1803 		of_node_put(tx_endpoint);
1804 		of_node_put(next_wsi_dev);
1805 
1806 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1807 		if (!next_wsi_dev) {
1808 			of_node_put(next_rx_endpoint);
1809 			return -ENODEV;
1810 		}
1811 
1812 		of_node_put(next_rx_endpoint);
1813 
1814 		device_count++;
1815 		if (device_count > ATH12K_MAX_SOCS) {
1816 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1817 				    device_count, ATH12K_MAX_SOCS);
1818 			of_node_put(next_wsi_dev);
1819 			return -EINVAL;
1820 		}
1821 	} while (wsi_dev != next_wsi_dev);
1822 
1823 	of_node_put(next_wsi_dev);
1824 	ag->num_devices = device_count;
1825 
1826 	return 0;
1827 }
1828 
1829 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1830 				     struct ath12k_base *ab)
1831 {
1832 	int i, wsi_controller_index = -1, node_index = -1;
1833 	bool control;
1834 
1835 	for (i = 0; i < ag->num_devices; i++) {
1836 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1837 		if (control)
1838 			wsi_controller_index = i;
1839 
1840 		if (ag->wsi_node[i] == ab->dev->of_node)
1841 			node_index = i;
1842 	}
1843 
1844 	if (wsi_controller_index == -1) {
1845 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1846 		return -EINVAL;
1847 	}
1848 
1849 	if (node_index == -1) {
1850 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1851 		return -EINVAL;
1852 	}
1853 
1854 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1855 		ag->num_devices;
1856 
1857 	return 0;
1858 }
1859 
1860 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1861 {
1862 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1863 	struct ath12k_hw_group *ag;
1864 
1865 	lockdep_assert_held(&ath12k_hw_group_mutex);
1866 
1867 	if (ath12k_ftm_mode)
1868 		goto invalid_group;
1869 
1870 	/* The grouping of multiple devices will be done based on device tree file.
1871 	 * The platforms that do not have any valid group information would have
1872 	 * each device to be part of its own invalid group.
1873 	 *
1874 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1875 	 * which didn't have dt entry or wrong dt entry, there could be many
1876 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1877 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1878 	 * num devices in ath12k_hw_group determines if the group is
1879 	 * multi device or single device group
1880 	 */
1881 
1882 	ag = ath12k_core_hw_group_find_by_dt(ab);
1883 	if (!ag) {
1884 		ag = ath12k_core_hw_group_alloc(ab);
1885 		if (!ag) {
1886 			ath12k_warn(ab, "unable to create new hw group\n");
1887 			return NULL;
1888 		}
1889 
1890 		if (ath12k_core_get_wsi_info(ag, ab) ||
1891 		    ath12k_core_get_wsi_index(ag, ab)) {
1892 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1893 				   "unable to get wsi info from dt, grouping single device");
1894 			ag->id = ATH12K_INVALID_GROUP_ID;
1895 			ag->num_devices = 1;
1896 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1897 			wsi->index = 0;
1898 		}
1899 
1900 		goto exit;
1901 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1902 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1903 			   ag->id);
1904 		goto invalid_group;
1905 	} else {
1906 		if (ath12k_core_get_wsi_index(ag, ab))
1907 			goto invalid_group;
1908 		goto exit;
1909 	}
1910 
1911 invalid_group:
1912 	ag = ath12k_core_hw_group_alloc(ab);
1913 	if (!ag) {
1914 		ath12k_warn(ab, "unable to create new hw group\n");
1915 		return NULL;
1916 	}
1917 
1918 	ag->id = ATH12K_INVALID_GROUP_ID;
1919 	ag->num_devices = 1;
1920 	wsi->index = 0;
1921 
1922 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1923 
1924 exit:
1925 	if (ag->num_probed >= ag->num_devices) {
1926 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1927 		goto invalid_group;
1928 	}
1929 
1930 	ab->device_id = ag->num_probed++;
1931 	ag->ab[ab->device_id] = ab;
1932 	ab->ag = ag;
1933 
1934 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1935 		   ag->id, ag->num_devices, wsi->index);
1936 
1937 	return ag;
1938 }
1939 
1940 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1941 {
1942 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1943 	u8 device_id = ab->device_id;
1944 	int num_probed;
1945 
1946 	if (!ag)
1947 		return;
1948 
1949 	mutex_lock(&ag->mutex);
1950 
1951 	if (WARN_ON(device_id >= ag->num_devices)) {
1952 		mutex_unlock(&ag->mutex);
1953 		return;
1954 	}
1955 
1956 	if (WARN_ON(ag->ab[device_id] != ab)) {
1957 		mutex_unlock(&ag->mutex);
1958 		return;
1959 	}
1960 
1961 	ag->ab[device_id] = NULL;
1962 	ab->ag = NULL;
1963 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1964 
1965 	if (ag->num_probed)
1966 		ag->num_probed--;
1967 
1968 	num_probed = ag->num_probed;
1969 
1970 	mutex_unlock(&ag->mutex);
1971 
1972 	if (!num_probed)
1973 		ath12k_core_hw_group_free(ag);
1974 }
1975 
1976 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
1977 {
1978 	struct ath12k_base *ab;
1979 	int i;
1980 
1981 	if (WARN_ON(!ag))
1982 		return;
1983 
1984 	for (i = 0; i < ag->num_devices; i++) {
1985 		ab = ag->ab[i];
1986 		if (!ab)
1987 			continue;
1988 
1989 		ath12k_core_soc_destroy(ab);
1990 	}
1991 }
1992 
1993 static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
1994 {
1995 	struct ath12k_base *ab;
1996 	int i;
1997 
1998 	if (!ag)
1999 		return;
2000 
2001 	mutex_lock(&ag->mutex);
2002 
2003 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
2004 		mutex_unlock(&ag->mutex);
2005 		return;
2006 	}
2007 
2008 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
2009 
2010 	ath12k_core_hw_group_stop(ag);
2011 
2012 	for (i = 0; i < ag->num_devices; i++) {
2013 		ab = ag->ab[i];
2014 		if (!ab)
2015 			continue;
2016 
2017 		mutex_lock(&ab->core_lock);
2018 		ath12k_core_stop(ab);
2019 		mutex_unlock(&ab->core_lock);
2020 	}
2021 
2022 	mutex_unlock(&ag->mutex);
2023 }
2024 
2025 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
2026 {
2027 	struct ath12k_base *ab;
2028 	int i, ret;
2029 
2030 	lockdep_assert_held(&ag->mutex);
2031 
2032 	for (i = 0; i < ag->num_devices; i++) {
2033 		ab = ag->ab[i];
2034 		if (!ab)
2035 			continue;
2036 
2037 		mutex_lock(&ab->core_lock);
2038 
2039 		ret = ath12k_core_soc_create(ab);
2040 		if (ret) {
2041 			mutex_unlock(&ab->core_lock);
2042 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
2043 			return ret;
2044 		}
2045 
2046 		mutex_unlock(&ab->core_lock);
2047 	}
2048 
2049 	return 0;
2050 }
2051 
2052 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
2053 {
2054 	struct ath12k_base *ab;
2055 	int i;
2056 
2057 	if (ath12k_ftm_mode)
2058 		return;
2059 
2060 	lockdep_assert_held(&ag->mutex);
2061 
2062 	if (ag->num_devices == 1) {
2063 		ab = ag->ab[0];
2064 		/* QCN9274 firmware uses firmware IE for MLO advertisement */
2065 		if (ab->fw.fw_features_valid) {
2066 			ag->mlo_capable =
2067 				ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO);
2068 			return;
2069 		}
2070 
2071 		/* while WCN7850 firmware uses QMI single_chip_mlo_support bit */
2072 		ag->mlo_capable = ab->single_chip_mlo_support;
2073 		return;
2074 	}
2075 
2076 	ag->mlo_capable = true;
2077 
2078 	for (i = 0; i < ag->num_devices; i++) {
2079 		ab = ag->ab[i];
2080 		if (!ab)
2081 			continue;
2082 
2083 		/* even if 1 device's firmware feature indicates MLO
2084 		 * unsupported, make MLO unsupported for the whole group
2085 		 */
2086 		if (!ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MLO)) {
2087 			ag->mlo_capable = false;
2088 			return;
2089 		}
2090 	}
2091 }
2092 
2093 int ath12k_core_init(struct ath12k_base *ab)
2094 {
2095 	struct ath12k_hw_group *ag;
2096 	int ret;
2097 
2098 	ret = ath12k_core_panic_notifier_register(ab);
2099 	if (ret)
2100 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
2101 
2102 	mutex_lock(&ath12k_hw_group_mutex);
2103 
2104 	ag = ath12k_core_hw_group_assign(ab);
2105 	if (!ag) {
2106 		mutex_unlock(&ath12k_hw_group_mutex);
2107 		ath12k_warn(ab, "unable to get hw group\n");
2108 		return -ENODEV;
2109 	}
2110 
2111 	mutex_unlock(&ath12k_hw_group_mutex);
2112 
2113 	mutex_lock(&ag->mutex);
2114 
2115 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
2116 		   ag->num_devices, ag->num_probed);
2117 
2118 	if (ath12k_core_hw_group_create_ready(ag)) {
2119 		ret = ath12k_core_hw_group_create(ag);
2120 		if (ret) {
2121 			mutex_unlock(&ag->mutex);
2122 			ath12k_warn(ab, "unable to create hw group\n");
2123 			goto err;
2124 		}
2125 	}
2126 
2127 	mutex_unlock(&ag->mutex);
2128 
2129 	return 0;
2130 
2131 err:
2132 	ath12k_core_hw_group_destroy(ab->ag);
2133 	ath12k_core_hw_group_unassign(ab);
2134 	return ret;
2135 }
2136 
2137 void ath12k_core_deinit(struct ath12k_base *ab)
2138 {
2139 	ath12k_core_panic_notifier_unregister(ab);
2140 	ath12k_core_hw_group_cleanup(ab->ag);
2141 	ath12k_core_hw_group_destroy(ab->ag);
2142 	ath12k_core_hw_group_unassign(ab);
2143 }
2144 
2145 void ath12k_core_free(struct ath12k_base *ab)
2146 {
2147 	timer_delete_sync(&ab->rx_replenish_retry);
2148 	destroy_workqueue(ab->workqueue_aux);
2149 	destroy_workqueue(ab->workqueue);
2150 	kfree(ab);
2151 }
2152 
2153 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
2154 				      enum ath12k_bus bus)
2155 {
2156 	struct ath12k_base *ab;
2157 
2158 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
2159 	if (!ab)
2160 		return NULL;
2161 
2162 	init_completion(&ab->driver_recovery);
2163 
2164 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
2165 	if (!ab->workqueue)
2166 		goto err_sc_free;
2167 
2168 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
2169 	if (!ab->workqueue_aux)
2170 		goto err_free_wq;
2171 
2172 	mutex_init(&ab->core_lock);
2173 	spin_lock_init(&ab->base_lock);
2174 	init_completion(&ab->reset_complete);
2175 
2176 	INIT_LIST_HEAD(&ab->peers);
2177 	init_waitqueue_head(&ab->peer_mapping_wq);
2178 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
2179 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
2180 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
2181 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
2182 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
2183 	INIT_WORK(&ab->update_11d_work, ath12k_update_11d);
2184 
2185 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
2186 	init_completion(&ab->htc_suspend);
2187 	init_completion(&ab->restart_completed);
2188 	init_completion(&ab->wow.wakeup_completed);
2189 
2190 	ab->dev = dev;
2191 	ab->hif.bus = bus;
2192 	ab->qmi.num_radios = U8_MAX;
2193 	ab->single_chip_mlo_support = false;
2194 
2195 	/* Device index used to identify the devices in a group.
2196 	 *
2197 	 * In Intra-device MLO, only one device present in a group,
2198 	 * so it is always zero.
2199 	 *
2200 	 * In Inter-device MLO, Multiple device present in a group,
2201 	 * expect non-zero value.
2202 	 */
2203 	ab->device_id = 0;
2204 
2205 	return ab;
2206 
2207 err_free_wq:
2208 	destroy_workqueue(ab->workqueue);
2209 err_sc_free:
2210 	kfree(ab);
2211 	return NULL;
2212 }
2213 
2214 static int ath12k_init(void)
2215 {
2216 	ahb_err = ath12k_ahb_init();
2217 	if (ahb_err)
2218 		pr_warn("Failed to initialize ath12k AHB device: %d\n", ahb_err);
2219 
2220 	pci_err = ath12k_pci_init();
2221 	if (pci_err)
2222 		pr_warn("Failed to initialize ath12k PCI device: %d\n", pci_err);
2223 
2224 	/* If both failed, return one of the failures (arbitrary) */
2225 	return ahb_err && pci_err ? ahb_err : 0;
2226 }
2227 
2228 static void ath12k_exit(void)
2229 {
2230 	if (!pci_err)
2231 		ath12k_pci_exit();
2232 
2233 	if (!ahb_err)
2234 		ath12k_ahb_exit();
2235 }
2236 
2237 module_init(ath12k_init);
2238 module_exit(ath12k_exit);
2239 
2240 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN devices");
2241 MODULE_LICENSE("Dual BSD/GPL");
2242