1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/remoteproc.h>
10 #include <linux/firmware.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include "core.h"
14 #include "dp_tx.h"
15 #include "dp_rx.h"
16 #include "debug.h"
17 #include "hif.h"
18 #include "fw.h"
19 #include "debugfs.h"
20 #include "wow.h"
21 
22 unsigned int ath12k_debug_mask;
23 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
24 MODULE_PARM_DESC(debug_mask, "Debugging mask");
25 
26 bool ath12k_ftm_mode;
27 module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
28 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
29 
30 /* protected with ath12k_hw_group_mutex */
31 static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
32 
33 static DEFINE_MUTEX(ath12k_hw_group_mutex);
34 
ath12k_core_rfkill_config(struct ath12k_base * ab)35 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
36 {
37 	struct ath12k *ar;
38 	int ret = 0, i;
39 
40 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
41 		return 0;
42 
43 	if (ath12k_acpi_get_disable_rfkill(ab))
44 		return 0;
45 
46 	for (i = 0; i < ab->num_radios; i++) {
47 		ar = ab->pdevs[i].ar;
48 
49 		ret = ath12k_mac_rfkill_config(ar);
50 		if (ret && ret != -EOPNOTSUPP) {
51 			ath12k_warn(ab, "failed to configure rfkill: %d", ret);
52 			return ret;
53 		}
54 	}
55 
56 	return ret;
57 }
58 
59 /* Check if we need to continue with suspend/resume operation.
60  * Return:
61  *	a negative value: error happens and don't continue.
62  *	0:  no error but don't continue.
63  *	positive value: no error and do continue.
64  */
ath12k_core_continue_suspend_resume(struct ath12k_base * ab)65 static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
66 {
67 	struct ath12k *ar;
68 
69 	if (!ab->hw_params->supports_suspend)
70 		return -EOPNOTSUPP;
71 
72 	/* so far single_pdev_only chips have supports_suspend as true
73 	 * so pass 0 as a dummy pdev_id here.
74 	 */
75 	ar = ab->pdevs[0].ar;
76 	if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
77 		return 0;
78 
79 	return 1;
80 }
81 
ath12k_core_suspend(struct ath12k_base * ab)82 int ath12k_core_suspend(struct ath12k_base *ab)
83 {
84 	struct ath12k *ar;
85 	int ret, i;
86 
87 	ret = ath12k_core_continue_suspend_resume(ab);
88 	if (ret <= 0)
89 		return ret;
90 
91 	for (i = 0; i < ab->num_radios; i++) {
92 		ar = ab->pdevs[i].ar;
93 		if (!ar)
94 			continue;
95 
96 		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
97 
98 		ret = ath12k_mac_wait_tx_complete(ar);
99 		if (ret) {
100 			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
101 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
102 			return ret;
103 		}
104 
105 		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
106 	}
107 
108 	/* PM framework skips suspend_late/resume_early callbacks
109 	 * if other devices report errors in their suspend callbacks.
110 	 * However ath12k_core_resume() would still be called because
111 	 * here we return success thus kernel put us on dpm_suspended_list.
112 	 * Since we won't go through a power down/up cycle, there is
113 	 * no chance to call complete(&ab->restart_completed) in
114 	 * ath12k_core_restart(), making ath12k_core_resume() timeout.
115 	 * So call it here to avoid this issue. This also works in case
116 	 * no error happens thus suspend_late/resume_early get called,
117 	 * because it will be reinitialized in ath12k_core_resume_early().
118 	 */
119 	complete(&ab->restart_completed);
120 
121 	return 0;
122 }
123 EXPORT_SYMBOL(ath12k_core_suspend);
124 
ath12k_core_suspend_late(struct ath12k_base * ab)125 int ath12k_core_suspend_late(struct ath12k_base *ab)
126 {
127 	int ret;
128 
129 	ret = ath12k_core_continue_suspend_resume(ab);
130 	if (ret <= 0)
131 		return ret;
132 
133 	ath12k_acpi_stop(ab);
134 
135 	ath12k_hif_irq_disable(ab);
136 	ath12k_hif_ce_irq_disable(ab);
137 
138 	ath12k_hif_power_down(ab, true);
139 
140 	return 0;
141 }
142 EXPORT_SYMBOL(ath12k_core_suspend_late);
143 
ath12k_core_resume_early(struct ath12k_base * ab)144 int ath12k_core_resume_early(struct ath12k_base *ab)
145 {
146 	int ret;
147 
148 	ret = ath12k_core_continue_suspend_resume(ab);
149 	if (ret <= 0)
150 		return ret;
151 
152 	reinit_completion(&ab->restart_completed);
153 	ret = ath12k_hif_power_up(ab);
154 	if (ret)
155 		ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
156 
157 	return ret;
158 }
159 EXPORT_SYMBOL(ath12k_core_resume_early);
160 
ath12k_core_resume(struct ath12k_base * ab)161 int ath12k_core_resume(struct ath12k_base *ab)
162 {
163 	long time_left;
164 	int ret;
165 
166 	ret = ath12k_core_continue_suspend_resume(ab);
167 	if (ret <= 0)
168 		return ret;
169 
170 	time_left = wait_for_completion_timeout(&ab->restart_completed,
171 						ATH12K_RESET_TIMEOUT_HZ);
172 	if (time_left == 0) {
173 		ath12k_warn(ab, "timeout while waiting for restart complete");
174 		return -ETIMEDOUT;
175 	}
176 
177 	return 0;
178 }
179 EXPORT_SYMBOL(ath12k_core_resume);
180 
__ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len,bool with_variant,bool bus_type_mode,bool with_default)181 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
182 					   size_t name_len, bool with_variant,
183 					   bool bus_type_mode, bool with_default)
184 {
185 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
186 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
187 
188 	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
189 		scnprintf(variant, sizeof(variant), ",variant=%s",
190 			  ab->qmi.target.bdf_ext);
191 
192 	switch (ab->id.bdf_search) {
193 	case ATH12K_BDF_SEARCH_BUS_AND_BOARD:
194 		if (bus_type_mode)
195 			scnprintf(name, name_len,
196 				  "bus=%s",
197 				  ath12k_bus_str(ab->hif.bus));
198 		else
199 			scnprintf(name, name_len,
200 				  "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
201 				  ath12k_bus_str(ab->hif.bus),
202 				  ab->id.vendor, ab->id.device,
203 				  ab->id.subsystem_vendor,
204 				  ab->id.subsystem_device,
205 				  ab->qmi.target.chip_id,
206 				  ab->qmi.target.board_id,
207 				  variant);
208 		break;
209 	default:
210 		scnprintf(name, name_len,
211 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
212 			  ath12k_bus_str(ab->hif.bus),
213 			  ab->qmi.target.chip_id,
214 			  with_default ?
215 			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
216 			  variant);
217 		break;
218 	}
219 
220 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
221 
222 	return 0;
223 }
224 
ath12k_core_create_board_name(struct ath12k_base * ab,char * name,size_t name_len)225 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
226 					 size_t name_len)
227 {
228 	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
229 }
230 
ath12k_core_create_fallback_board_name(struct ath12k_base * ab,char * name,size_t name_len)231 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
232 						  size_t name_len)
233 {
234 	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
235 }
236 
ath12k_core_create_bus_type_board_name(struct ath12k_base * ab,char * name,size_t name_len)237 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
238 						  size_t name_len)
239 {
240 	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
241 }
242 
ath12k_core_firmware_request(struct ath12k_base * ab,const char * file)243 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
244 						    const char *file)
245 {
246 	const struct firmware *fw;
247 	char path[100];
248 	int ret;
249 
250 	if (!file)
251 		return ERR_PTR(-ENOENT);
252 
253 	ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
254 
255 	ret = firmware_request_nowarn(&fw, path, ab->dev);
256 	if (ret)
257 		return ERR_PTR(ret);
258 
259 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
260 		   path, fw->size);
261 
262 	return fw;
263 }
264 
ath12k_core_free_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)265 void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
266 {
267 	if (!IS_ERR(bd->fw))
268 		release_firmware(bd->fw);
269 
270 	memset(bd, 0, sizeof(*bd));
271 }
272 
ath12k_core_parse_bd_ie_board(struct ath12k_base * ab,struct ath12k_board_data * bd,const void * buf,size_t buf_len,const char * boardname,int ie_id,int name_id,int data_id)273 static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
274 					 struct ath12k_board_data *bd,
275 					 const void *buf, size_t buf_len,
276 					 const char *boardname,
277 					 int ie_id,
278 					 int name_id,
279 					 int data_id)
280 {
281 	const struct ath12k_fw_ie *hdr;
282 	bool name_match_found;
283 	int ret, board_ie_id;
284 	size_t board_ie_len;
285 	const void *board_ie_data;
286 
287 	name_match_found = false;
288 
289 	/* go through ATH12K_BD_IE_BOARD_/ATH12K_BD_IE_REGDB_ elements */
290 	while (buf_len > sizeof(struct ath12k_fw_ie)) {
291 		hdr = buf;
292 		board_ie_id = le32_to_cpu(hdr->id);
293 		board_ie_len = le32_to_cpu(hdr->len);
294 		board_ie_data = hdr->data;
295 
296 		buf_len -= sizeof(*hdr);
297 		buf += sizeof(*hdr);
298 
299 		if (buf_len < ALIGN(board_ie_len, 4)) {
300 			ath12k_err(ab, "invalid %s length: %zu < %zu\n",
301 				   ath12k_bd_ie_type_str(ie_id),
302 				   buf_len, ALIGN(board_ie_len, 4));
303 			ret = -EINVAL;
304 			goto out;
305 		}
306 
307 		if (board_ie_id == name_id) {
308 			ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
309 					board_ie_data, board_ie_len);
310 
311 			if (board_ie_len != strlen(boardname))
312 				goto next;
313 
314 			ret = memcmp(board_ie_data, boardname, strlen(boardname));
315 			if (ret)
316 				goto next;
317 
318 			name_match_found = true;
319 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
320 				   "boot found match %s for name '%s'",
321 				   ath12k_bd_ie_type_str(ie_id),
322 				   boardname);
323 		} else if (board_ie_id == data_id) {
324 			if (!name_match_found)
325 				/* no match found */
326 				goto next;
327 
328 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
329 				   "boot found %s for '%s'",
330 				   ath12k_bd_ie_type_str(ie_id),
331 				   boardname);
332 
333 			bd->data = board_ie_data;
334 			bd->len = board_ie_len;
335 
336 			ret = 0;
337 			goto out;
338 		} else {
339 			ath12k_warn(ab, "unknown %s id found: %d\n",
340 				    ath12k_bd_ie_type_str(ie_id),
341 				    board_ie_id);
342 		}
343 next:
344 		/* jump over the padding */
345 		board_ie_len = ALIGN(board_ie_len, 4);
346 
347 		buf_len -= board_ie_len;
348 		buf += board_ie_len;
349 	}
350 
351 	/* no match found */
352 	ret = -ENOENT;
353 
354 out:
355 	return ret;
356 }
357 
ath12k_core_fetch_board_data_api_n(struct ath12k_base * ab,struct ath12k_board_data * bd,const char * boardname,int ie_id_match,int name_id,int data_id)358 static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
359 					      struct ath12k_board_data *bd,
360 					      const char *boardname,
361 					      int ie_id_match,
362 					      int name_id,
363 					      int data_id)
364 {
365 	size_t len, magic_len;
366 	const u8 *data;
367 	char *filename, filepath[100];
368 	size_t ie_len;
369 	struct ath12k_fw_ie *hdr;
370 	int ret, ie_id;
371 
372 	filename = ATH12K_BOARD_API2_FILE;
373 
374 	if (!bd->fw)
375 		bd->fw = ath12k_core_firmware_request(ab, filename);
376 
377 	if (IS_ERR(bd->fw))
378 		return PTR_ERR(bd->fw);
379 
380 	data = bd->fw->data;
381 	len = bd->fw->size;
382 
383 	ath12k_core_create_firmware_path(ab, filename,
384 					 filepath, sizeof(filepath));
385 
386 	/* magic has extra null byte padded */
387 	magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
388 	if (len < magic_len) {
389 		ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
390 			   filepath, len);
391 		ret = -EINVAL;
392 		goto err;
393 	}
394 
395 	if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
396 		ath12k_err(ab, "found invalid board magic\n");
397 		ret = -EINVAL;
398 		goto err;
399 	}
400 
401 	/* magic is padded to 4 bytes */
402 	magic_len = ALIGN(magic_len, 4);
403 	if (len < magic_len) {
404 		ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
405 			   filepath, len);
406 		ret = -EINVAL;
407 		goto err;
408 	}
409 
410 	data += magic_len;
411 	len -= magic_len;
412 
413 	while (len > sizeof(struct ath12k_fw_ie)) {
414 		hdr = (struct ath12k_fw_ie *)data;
415 		ie_id = le32_to_cpu(hdr->id);
416 		ie_len = le32_to_cpu(hdr->len);
417 
418 		len -= sizeof(*hdr);
419 		data = hdr->data;
420 
421 		if (len < ALIGN(ie_len, 4)) {
422 			ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
423 				   ie_id, ie_len, len);
424 			ret = -EINVAL;
425 			goto err;
426 		}
427 
428 		if (ie_id == ie_id_match) {
429 			ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
430 							    ie_len,
431 							    boardname,
432 							    ie_id_match,
433 							    name_id,
434 							    data_id);
435 			if (ret == -ENOENT)
436 				/* no match found, continue */
437 				goto next;
438 			else if (ret)
439 				/* there was an error, bail out */
440 				goto err;
441 			/* either found or error, so stop searching */
442 			goto out;
443 		}
444 next:
445 		/* jump over the padding */
446 		ie_len = ALIGN(ie_len, 4);
447 
448 		len -= ie_len;
449 		data += ie_len;
450 	}
451 
452 out:
453 	if (!bd->data || !bd->len) {
454 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
455 			   "failed to fetch %s for %s from %s\n",
456 			   ath12k_bd_ie_type_str(ie_id_match),
457 			   boardname, filepath);
458 		ret = -ENODATA;
459 		goto err;
460 	}
461 
462 	return 0;
463 
464 err:
465 	ath12k_core_free_bdf(ab, bd);
466 	return ret;
467 }
468 
ath12k_core_fetch_board_data_api_1(struct ath12k_base * ab,struct ath12k_board_data * bd,char * filename)469 int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
470 				       struct ath12k_board_data *bd,
471 				       char *filename)
472 {
473 	bd->fw = ath12k_core_firmware_request(ab, filename);
474 	if (IS_ERR(bd->fw))
475 		return PTR_ERR(bd->fw);
476 
477 	bd->data = bd->fw->data;
478 	bd->len = bd->fw->size;
479 
480 	return 0;
481 }
482 
483 #define BOARD_NAME_SIZE 200
ath12k_core_fetch_bdf(struct ath12k_base * ab,struct ath12k_board_data * bd)484 int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
485 {
486 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
487 	char *filename, filepath[100];
488 	int bd_api;
489 	int ret;
490 
491 	filename = ATH12K_BOARD_API2_FILE;
492 
493 	ret = ath12k_core_create_board_name(ab, boardname, sizeof(boardname));
494 	if (ret) {
495 		ath12k_err(ab, "failed to create board name: %d", ret);
496 		return ret;
497 	}
498 
499 	bd_api = 2;
500 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
501 						 ATH12K_BD_IE_BOARD,
502 						 ATH12K_BD_IE_BOARD_NAME,
503 						 ATH12K_BD_IE_BOARD_DATA);
504 	if (!ret)
505 		goto success;
506 
507 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
508 						     sizeof(fallback_boardname));
509 	if (ret) {
510 		ath12k_err(ab, "failed to create fallback board name: %d", ret);
511 		return ret;
512 	}
513 
514 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
515 						 ATH12K_BD_IE_BOARD,
516 						 ATH12K_BD_IE_BOARD_NAME,
517 						 ATH12K_BD_IE_BOARD_DATA);
518 	if (!ret)
519 		goto success;
520 
521 	bd_api = 1;
522 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
523 	if (ret) {
524 		ath12k_core_create_firmware_path(ab, filename,
525 						 filepath, sizeof(filepath));
526 		ath12k_err(ab, "failed to fetch board data for %s from %s\n",
527 			   boardname, filepath);
528 		if (memcmp(boardname, fallback_boardname, strlen(boardname)))
529 			ath12k_err(ab, "failed to fetch board data for %s from %s\n",
530 				   fallback_boardname, filepath);
531 
532 		ath12k_err(ab, "failed to fetch board.bin from %s\n",
533 			   ab->hw_params->fw.dir);
534 		return ret;
535 	}
536 
537 success:
538 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
539 	return 0;
540 }
541 
ath12k_core_fetch_regdb(struct ath12k_base * ab,struct ath12k_board_data * bd)542 int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd)
543 {
544 	char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
545 	int ret;
546 
547 	ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
548 	if (ret) {
549 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
550 			   "failed to create board name for regdb: %d", ret);
551 		goto exit;
552 	}
553 
554 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname,
555 						 ATH12K_BD_IE_REGDB,
556 						 ATH12K_BD_IE_REGDB_NAME,
557 						 ATH12K_BD_IE_REGDB_DATA);
558 	if (!ret)
559 		goto exit;
560 
561 	ret = ath12k_core_create_bus_type_board_name(ab, default_boardname,
562 						     BOARD_NAME_SIZE);
563 	if (ret) {
564 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
565 			   "failed to create default board name for regdb: %d", ret);
566 		goto exit;
567 	}
568 
569 	ret = ath12k_core_fetch_board_data_api_n(ab, bd, default_boardname,
570 						 ATH12K_BD_IE_REGDB,
571 						 ATH12K_BD_IE_REGDB_NAME,
572 						 ATH12K_BD_IE_REGDB_DATA);
573 	if (!ret)
574 		goto exit;
575 
576 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_REGDB_FILE_NAME);
577 	if (ret)
578 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to fetch %s from %s\n",
579 			   ATH12K_REGDB_FILE_NAME, ab->hw_params->fw.dir);
580 
581 exit:
582 	if (!ret)
583 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "fetched regdb\n");
584 
585 	return ret;
586 }
587 
ath12k_core_get_max_station_per_radio(struct ath12k_base * ab)588 u32 ath12k_core_get_max_station_per_radio(struct ath12k_base *ab)
589 {
590 	if (ab->num_radios == 2)
591 		return TARGET_NUM_STATIONS_DBS;
592 	else if (ab->num_radios == 3)
593 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
594 	return TARGET_NUM_STATIONS_SINGLE;
595 }
596 
ath12k_core_get_max_peers_per_radio(struct ath12k_base * ab)597 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab)
598 {
599 	if (ab->num_radios == 2)
600 		return TARGET_NUM_PEERS_PDEV_DBS;
601 	else if (ab->num_radios == 3)
602 		return TARGET_NUM_PEERS_PDEV_DBS_SBS;
603 	return TARGET_NUM_PEERS_PDEV_SINGLE;
604 }
605 
ath12k_core_get_max_num_tids(struct ath12k_base * ab)606 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab)
607 {
608 	if (ab->num_radios == 2)
609 		return TARGET_NUM_TIDS(DBS);
610 	else if (ab->num_radios == 3)
611 		return TARGET_NUM_TIDS(DBS_SBS);
612 	return TARGET_NUM_TIDS(SINGLE);
613 }
614 
ath12k_core_stop(struct ath12k_base * ab)615 static void ath12k_core_stop(struct ath12k_base *ab)
616 {
617 	ath12k_core_stopped(ab);
618 
619 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
620 		ath12k_qmi_firmware_stop(ab);
621 
622 	ath12k_acpi_stop(ab);
623 
624 	ath12k_dp_rx_pdev_reo_cleanup(ab);
625 	ath12k_hif_stop(ab);
626 	ath12k_wmi_detach(ab);
627 	ath12k_dp_free(ab);
628 
629 	/* De-Init of components as needed */
630 }
631 
ath12k_core_check_bdfext(const struct dmi_header * hdr,void * data)632 static void ath12k_core_check_bdfext(const struct dmi_header *hdr, void *data)
633 {
634 	struct ath12k_base *ab = data;
635 	const char *magic = ATH12K_SMBIOS_BDF_EXT_MAGIC;
636 	struct ath12k_smbios_bdf *smbios = (struct ath12k_smbios_bdf *)hdr;
637 	ssize_t copied;
638 	size_t len;
639 	int i;
640 
641 	if (ab->qmi.target.bdf_ext[0] != '\0')
642 		return;
643 
644 	if (hdr->type != ATH12K_SMBIOS_BDF_EXT_TYPE)
645 		return;
646 
647 	if (hdr->length != ATH12K_SMBIOS_BDF_EXT_LENGTH) {
648 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
649 			   "wrong smbios bdf ext type length (%d).\n",
650 			   hdr->length);
651 		return;
652 	}
653 
654 	if (!smbios->bdf_enabled) {
655 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "bdf variant name not found.\n");
656 		return;
657 	}
658 
659 	/* Only one string exists (per spec) */
660 	if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
661 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
662 			   "bdf variant magic does not match.\n");
663 		return;
664 	}
665 
666 	len = min_t(size_t,
667 		    strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
668 	for (i = 0; i < len; i++) {
669 		if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
670 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
671 				   "bdf variant name contains non ascii chars.\n");
672 			return;
673 		}
674 	}
675 
676 	/* Copy extension name without magic prefix */
677 	copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
678 			 sizeof(ab->qmi.target.bdf_ext));
679 	if (copied < 0) {
680 		ath12k_dbg(ab, ATH12K_DBG_BOOT,
681 			   "bdf variant string is longer than the buffer can accommodate\n");
682 		return;
683 	}
684 
685 	ath12k_dbg(ab, ATH12K_DBG_BOOT,
686 		   "found and validated bdf variant smbios_type 0x%x bdf %s\n",
687 		   ATH12K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
688 }
689 
ath12k_core_check_smbios(struct ath12k_base * ab)690 int ath12k_core_check_smbios(struct ath12k_base *ab)
691 {
692 	ab->qmi.target.bdf_ext[0] = '\0';
693 	dmi_walk(ath12k_core_check_bdfext, ab);
694 
695 	if (ab->qmi.target.bdf_ext[0] == '\0')
696 		return -ENODATA;
697 
698 	return 0;
699 }
700 
ath12k_core_soc_create(struct ath12k_base * ab)701 static int ath12k_core_soc_create(struct ath12k_base *ab)
702 {
703 	int ret;
704 
705 	if (ath12k_ftm_mode) {
706 		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
707 		ath12k_info(ab, "Booting in ftm mode\n");
708 	}
709 
710 	ret = ath12k_qmi_init_service(ab);
711 	if (ret) {
712 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
713 		return ret;
714 	}
715 
716 	ath12k_debugfs_soc_create(ab);
717 
718 	ret = ath12k_hif_power_up(ab);
719 	if (ret) {
720 		ath12k_err(ab, "failed to power up :%d\n", ret);
721 		goto err_qmi_deinit;
722 	}
723 
724 	return 0;
725 
726 err_qmi_deinit:
727 	ath12k_debugfs_soc_destroy(ab);
728 	ath12k_qmi_deinit_service(ab);
729 	return ret;
730 }
731 
ath12k_core_soc_destroy(struct ath12k_base * ab)732 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
733 {
734 	ath12k_hif_power_down(ab, false);
735 	ath12k_reg_free(ab);
736 	ath12k_debugfs_soc_destroy(ab);
737 	ath12k_qmi_deinit_service(ab);
738 }
739 
ath12k_core_pdev_create(struct ath12k_base * ab)740 static int ath12k_core_pdev_create(struct ath12k_base *ab)
741 {
742 	int ret;
743 
744 	ret = ath12k_dp_pdev_alloc(ab);
745 	if (ret) {
746 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
747 		return ret;
748 	}
749 
750 	return 0;
751 }
752 
ath12k_core_pdev_destroy(struct ath12k_base * ab)753 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
754 {
755 	ath12k_dp_pdev_free(ab);
756 }
757 
ath12k_core_start(struct ath12k_base * ab)758 static int ath12k_core_start(struct ath12k_base *ab)
759 {
760 	int ret;
761 
762 	lockdep_assert_held(&ab->core_lock);
763 
764 	ret = ath12k_wmi_attach(ab);
765 	if (ret) {
766 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
767 		return ret;
768 	}
769 
770 	ret = ath12k_htc_init(ab);
771 	if (ret) {
772 		ath12k_err(ab, "failed to init htc: %d\n", ret);
773 		goto err_wmi_detach;
774 	}
775 
776 	ret = ath12k_hif_start(ab);
777 	if (ret) {
778 		ath12k_err(ab, "failed to start HIF: %d\n", ret);
779 		goto err_wmi_detach;
780 	}
781 
782 	ret = ath12k_htc_wait_target(&ab->htc);
783 	if (ret) {
784 		ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
785 		goto err_hif_stop;
786 	}
787 
788 	ret = ath12k_dp_htt_connect(&ab->dp);
789 	if (ret) {
790 		ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
791 		goto err_hif_stop;
792 	}
793 
794 	ret = ath12k_wmi_connect(ab);
795 	if (ret) {
796 		ath12k_err(ab, "failed to connect wmi: %d\n", ret);
797 		goto err_hif_stop;
798 	}
799 
800 	ret = ath12k_htc_start(&ab->htc);
801 	if (ret) {
802 		ath12k_err(ab, "failed to start HTC: %d\n", ret);
803 		goto err_hif_stop;
804 	}
805 
806 	ret = ath12k_wmi_wait_for_service_ready(ab);
807 	if (ret) {
808 		ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
809 			   ret);
810 		goto err_hif_stop;
811 	}
812 
813 	ath12k_dp_cc_config(ab);
814 
815 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
816 	if (ret) {
817 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
818 		goto err_hif_stop;
819 	}
820 
821 	ath12k_dp_hal_rx_desc_init(ab);
822 
823 	ret = ath12k_wmi_cmd_init(ab);
824 	if (ret) {
825 		ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
826 		goto err_reo_cleanup;
827 	}
828 
829 	ret = ath12k_wmi_wait_for_unified_ready(ab);
830 	if (ret) {
831 		ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
832 			   ret);
833 		goto err_reo_cleanup;
834 	}
835 
836 	/* put hardware to DBS mode */
837 	if (ab->hw_params->single_pdev_only) {
838 		ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
839 		if (ret) {
840 			ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
841 			goto err_reo_cleanup;
842 		}
843 	}
844 
845 	ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
846 	if (ret) {
847 		ath12k_err(ab, "failed to send htt version request message: %d\n",
848 			   ret);
849 		goto err_reo_cleanup;
850 	}
851 
852 	ath12k_acpi_set_dsm_func(ab);
853 
854 	if (!test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
855 		/* Indicate the core start in the appropriate group */
856 		ath12k_core_started(ab);
857 
858 	return 0;
859 
860 err_reo_cleanup:
861 	ath12k_dp_rx_pdev_reo_cleanup(ab);
862 err_hif_stop:
863 	ath12k_hif_stop(ab);
864 err_wmi_detach:
865 	ath12k_wmi_detach(ab);
866 	return ret;
867 }
868 
ath12k_core_device_cleanup(struct ath12k_base * ab)869 static void ath12k_core_device_cleanup(struct ath12k_base *ab)
870 {
871 	mutex_lock(&ab->core_lock);
872 
873 	ath12k_hif_irq_disable(ab);
874 	ath12k_core_pdev_destroy(ab);
875 
876 	mutex_unlock(&ab->core_lock);
877 }
878 
ath12k_core_hw_group_stop(struct ath12k_hw_group * ag)879 static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
880 {
881 	struct ath12k_base *ab;
882 	int i;
883 
884 	lockdep_assert_held(&ag->mutex);
885 
886 	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
887 
888 	ath12k_mac_unregister(ag);
889 
890 	for (i = ag->num_devices - 1; i >= 0; i--) {
891 		ab = ag->ab[i];
892 		if (!ab)
893 			continue;
894 		ath12k_core_device_cleanup(ab);
895 	}
896 
897 	ath12k_mac_destroy(ag);
898 }
899 
ath12k_get_num_partner_link(struct ath12k * ar)900 u8 ath12k_get_num_partner_link(struct ath12k *ar)
901 {
902 	struct ath12k_base *partner_ab, *ab = ar->ab;
903 	struct ath12k_hw_group *ag = ab->ag;
904 	struct ath12k_pdev *pdev;
905 	u8 num_link = 0;
906 	int i, j;
907 
908 	lockdep_assert_held(&ag->mutex);
909 
910 	for (i = 0; i < ag->num_devices; i++) {
911 		partner_ab = ag->ab[i];
912 
913 		for (j = 0; j < partner_ab->num_radios; j++) {
914 			pdev = &partner_ab->pdevs[j];
915 
916 			/* Avoid the self link */
917 			if (ar == pdev->ar)
918 				continue;
919 
920 			num_link++;
921 		}
922 	}
923 
924 	return num_link;
925 }
926 
__ath12k_mac_mlo_ready(struct ath12k * ar)927 static int __ath12k_mac_mlo_ready(struct ath12k *ar)
928 {
929 	u8 num_link = ath12k_get_num_partner_link(ar);
930 	int ret;
931 
932 	if (num_link == 0)
933 		return 0;
934 
935 	ret = ath12k_wmi_mlo_ready(ar);
936 	if (ret) {
937 		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
938 			   ar->pdev_idx, ret);
939 		return ret;
940 	}
941 
942 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
943 		   ar->pdev_idx);
944 
945 	return 0;
946 }
947 
ath12k_mac_mlo_ready(struct ath12k_hw_group * ag)948 int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
949 {
950 	struct ath12k_hw *ah;
951 	struct ath12k *ar;
952 	int ret;
953 	int i, j;
954 
955 	for (i = 0; i < ag->num_hw; i++) {
956 		ah = ag->ah[i];
957 		if (!ah)
958 			continue;
959 
960 		for_each_ar(ah, ar, j) {
961 			ar = &ah->radio[j];
962 			ret = __ath12k_mac_mlo_ready(ar);
963 			if (ret)
964 				return ret;
965 		}
966 	}
967 
968 	return 0;
969 }
970 
ath12k_core_mlo_setup(struct ath12k_hw_group * ag)971 static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
972 {
973 	int ret, i;
974 
975 	if (!ag->mlo_capable)
976 		return 0;
977 
978 	ret = ath12k_mac_mlo_setup(ag);
979 	if (ret)
980 		return ret;
981 
982 	for (i = 0; i < ag->num_devices; i++)
983 		ath12k_dp_partner_cc_init(ag->ab[i]);
984 
985 	ret = ath12k_mac_mlo_ready(ag);
986 	if (ret)
987 		goto err_mlo_teardown;
988 
989 	return 0;
990 
991 err_mlo_teardown:
992 	ath12k_mac_mlo_teardown(ag);
993 
994 	return ret;
995 }
996 
ath12k_core_hw_group_start(struct ath12k_hw_group * ag)997 static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
998 {
999 	struct ath12k_base *ab;
1000 	int ret, i;
1001 
1002 	lockdep_assert_held(&ag->mutex);
1003 
1004 	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
1005 		goto core_pdev_create;
1006 
1007 	ret = ath12k_mac_allocate(ag);
1008 	if (WARN_ON(ret))
1009 		return ret;
1010 
1011 	ret = ath12k_core_mlo_setup(ag);
1012 	if (WARN_ON(ret))
1013 		goto err_mac_destroy;
1014 
1015 	ret = ath12k_mac_register(ag);
1016 	if (WARN_ON(ret))
1017 		goto err_mlo_teardown;
1018 
1019 	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
1020 
1021 core_pdev_create:
1022 	for (i = 0; i < ag->num_devices; i++) {
1023 		ab = ag->ab[i];
1024 		if (!ab)
1025 			continue;
1026 
1027 		mutex_lock(&ab->core_lock);
1028 
1029 		ret = ath12k_core_pdev_create(ab);
1030 		if (ret) {
1031 			ath12k_err(ab, "failed to create pdev core %d\n", ret);
1032 			mutex_unlock(&ab->core_lock);
1033 			goto err;
1034 		}
1035 
1036 		ath12k_hif_irq_enable(ab);
1037 
1038 		ret = ath12k_core_rfkill_config(ab);
1039 		if (ret && ret != -EOPNOTSUPP) {
1040 			mutex_unlock(&ab->core_lock);
1041 			goto err;
1042 		}
1043 
1044 		mutex_unlock(&ab->core_lock);
1045 	}
1046 
1047 	return 0;
1048 
1049 err:
1050 	ath12k_core_hw_group_stop(ag);
1051 	return ret;
1052 
1053 err_mlo_teardown:
1054 	ath12k_mac_mlo_teardown(ag);
1055 
1056 err_mac_destroy:
1057 	ath12k_mac_destroy(ag);
1058 
1059 	return ret;
1060 }
1061 
ath12k_core_start_firmware(struct ath12k_base * ab,enum ath12k_firmware_mode mode)1062 static int ath12k_core_start_firmware(struct ath12k_base *ab,
1063 				      enum ath12k_firmware_mode mode)
1064 {
1065 	int ret;
1066 
1067 	ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
1068 				    &ab->qmi.ce_cfg.shadow_reg_v3_len);
1069 
1070 	ret = ath12k_qmi_firmware_start(ab, mode);
1071 	if (ret) {
1072 		ath12k_err(ab, "failed to send firmware start: %d\n", ret);
1073 		return ret;
1074 	}
1075 
1076 	return ret;
1077 }
1078 
1079 static inline
ath12k_core_hw_group_start_ready(struct ath12k_hw_group * ag)1080 bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
1081 {
1082 	lockdep_assert_held(&ag->mutex);
1083 
1084 	return (ag->num_started == ag->num_devices);
1085 }
1086 
ath12k_core_trigger_partner(struct ath12k_base * ab)1087 static void ath12k_core_trigger_partner(struct ath12k_base *ab)
1088 {
1089 	struct ath12k_hw_group *ag = ab->ag;
1090 	struct ath12k_base *partner_ab;
1091 	bool found = false;
1092 	int i;
1093 
1094 	for (i = 0; i < ag->num_devices; i++) {
1095 		partner_ab = ag->ab[i];
1096 		if (!partner_ab)
1097 			continue;
1098 
1099 		if (found)
1100 			ath12k_qmi_trigger_host_cap(partner_ab);
1101 
1102 		found = (partner_ab == ab);
1103 	}
1104 }
1105 
ath12k_core_qmi_firmware_ready(struct ath12k_base * ab)1106 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
1107 {
1108 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1109 	int ret, i;
1110 
1111 	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
1112 	if (ret) {
1113 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
1114 		return ret;
1115 	}
1116 
1117 	ret = ath12k_ce_init_pipes(ab);
1118 	if (ret) {
1119 		ath12k_err(ab, "failed to initialize CE: %d\n", ret);
1120 		goto err_firmware_stop;
1121 	}
1122 
1123 	ret = ath12k_dp_alloc(ab);
1124 	if (ret) {
1125 		ath12k_err(ab, "failed to init DP: %d\n", ret);
1126 		goto err_firmware_stop;
1127 	}
1128 
1129 	mutex_lock(&ag->mutex);
1130 	mutex_lock(&ab->core_lock);
1131 
1132 	ret = ath12k_core_start(ab);
1133 	if (ret) {
1134 		ath12k_err(ab, "failed to start core: %d\n", ret);
1135 		goto err_dp_free;
1136 	}
1137 
1138 	mutex_unlock(&ab->core_lock);
1139 
1140 	if (ath12k_core_hw_group_start_ready(ag)) {
1141 		ret = ath12k_core_hw_group_start(ag);
1142 		if (ret) {
1143 			ath12k_warn(ab, "unable to start hw group\n");
1144 			goto err_core_stop;
1145 		}
1146 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
1147 	} else {
1148 		ath12k_core_trigger_partner(ab);
1149 	}
1150 
1151 	mutex_unlock(&ag->mutex);
1152 
1153 	return 0;
1154 
1155 err_core_stop:
1156 	for (i = ag->num_devices - 1; i >= 0; i--) {
1157 		ab = ag->ab[i];
1158 		if (!ab)
1159 			continue;
1160 
1161 		mutex_lock(&ab->core_lock);
1162 		ath12k_core_stop(ab);
1163 		mutex_unlock(&ab->core_lock);
1164 	}
1165 	mutex_unlock(&ag->mutex);
1166 	goto exit;
1167 
1168 err_dp_free:
1169 	ath12k_dp_free(ab);
1170 	mutex_unlock(&ab->core_lock);
1171 	mutex_unlock(&ag->mutex);
1172 
1173 err_firmware_stop:
1174 	ath12k_qmi_firmware_stop(ab);
1175 
1176 exit:
1177 	return ret;
1178 }
1179 
ath12k_core_reconfigure_on_crash(struct ath12k_base * ab)1180 static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
1181 {
1182 	int ret;
1183 
1184 	mutex_lock(&ab->core_lock);
1185 	ath12k_dp_pdev_free(ab);
1186 	ath12k_ce_cleanup_pipes(ab);
1187 	ath12k_wmi_detach(ab);
1188 	ath12k_dp_rx_pdev_reo_cleanup(ab);
1189 	mutex_unlock(&ab->core_lock);
1190 
1191 	ath12k_dp_free(ab);
1192 	ath12k_hal_srng_deinit(ab);
1193 
1194 	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
1195 
1196 	ret = ath12k_hal_srng_init(ab);
1197 	if (ret)
1198 		return ret;
1199 
1200 	clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1201 
1202 	ret = ath12k_core_qmi_firmware_ready(ab);
1203 	if (ret)
1204 		goto err_hal_srng_deinit;
1205 
1206 	clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
1207 
1208 	return 0;
1209 
1210 err_hal_srng_deinit:
1211 	ath12k_hal_srng_deinit(ab);
1212 	return ret;
1213 }
1214 
ath12k_rfkill_work(struct work_struct * work)1215 static void ath12k_rfkill_work(struct work_struct *work)
1216 {
1217 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
1218 	struct ath12k_hw_group *ag = ab->ag;
1219 	struct ath12k *ar;
1220 	struct ath12k_hw *ah;
1221 	struct ieee80211_hw *hw;
1222 	bool rfkill_radio_on;
1223 	int i, j;
1224 
1225 	spin_lock_bh(&ab->base_lock);
1226 	rfkill_radio_on = ab->rfkill_radio_on;
1227 	spin_unlock_bh(&ab->base_lock);
1228 
1229 	for (i = 0; i < ag->num_hw; i++) {
1230 		ah = ath12k_ag_to_ah(ag, i);
1231 		if (!ah)
1232 			continue;
1233 
1234 		for (j = 0; j < ah->num_radio; j++) {
1235 			ar = &ah->radio[j];
1236 			if (!ar)
1237 				continue;
1238 
1239 			ath12k_mac_rfkill_enable_radio(ar, rfkill_radio_on);
1240 		}
1241 
1242 		hw = ah->hw;
1243 		wiphy_rfkill_set_hw_state(hw->wiphy, !rfkill_radio_on);
1244 	}
1245 }
1246 
ath12k_core_halt(struct ath12k * ar)1247 void ath12k_core_halt(struct ath12k *ar)
1248 {
1249 	struct ath12k_base *ab = ar->ab;
1250 
1251 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1252 
1253 	ar->num_created_vdevs = 0;
1254 	ar->allocated_vdev_map = 0;
1255 
1256 	ath12k_mac_scan_finish(ar);
1257 	ath12k_mac_peer_cleanup_all(ar);
1258 	cancel_delayed_work_sync(&ar->scan.timeout);
1259 	cancel_work_sync(&ar->regd_update_work);
1260 	cancel_work_sync(&ab->rfkill_work);
1261 
1262 	rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
1263 	synchronize_rcu();
1264 	INIT_LIST_HEAD(&ar->arvifs);
1265 	idr_init(&ar->txmgmt_idr);
1266 }
1267 
ath12k_core_pre_reconfigure_recovery(struct ath12k_base * ab)1268 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
1269 {
1270 	struct ath12k_hw_group *ag = ab->ag;
1271 	struct ath12k *ar;
1272 	struct ath12k_hw *ah;
1273 	int i, j;
1274 
1275 	spin_lock_bh(&ab->base_lock);
1276 	ab->stats.fw_crash_counter++;
1277 	spin_unlock_bh(&ab->base_lock);
1278 
1279 	if (ab->is_reset)
1280 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
1281 
1282 	for (i = 0; i < ag->num_hw; i++) {
1283 		ah = ath12k_ag_to_ah(ag, i);
1284 		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
1285 		    ah->state == ATH12K_HW_STATE_TM)
1286 			continue;
1287 
1288 		ieee80211_stop_queues(ah->hw);
1289 
1290 		for (j = 0; j < ah->num_radio; j++) {
1291 			ar = &ah->radio[j];
1292 
1293 			ath12k_mac_drain_tx(ar);
1294 			complete(&ar->scan.started);
1295 			complete(&ar->scan.completed);
1296 			complete(&ar->scan.on_channel);
1297 			complete(&ar->peer_assoc_done);
1298 			complete(&ar->peer_delete_done);
1299 			complete(&ar->install_key_done);
1300 			complete(&ar->vdev_setup_done);
1301 			complete(&ar->vdev_delete_done);
1302 			complete(&ar->bss_survey_done);
1303 
1304 			wake_up(&ar->dp.tx_empty_waitq);
1305 			idr_for_each(&ar->txmgmt_idr,
1306 				     ath12k_mac_tx_mgmt_pending_free, ar);
1307 			idr_destroy(&ar->txmgmt_idr);
1308 			wake_up(&ar->txmgmt_empty_waitq);
1309 		}
1310 	}
1311 
1312 	wake_up(&ab->wmi_ab.tx_credits_wq);
1313 	wake_up(&ab->peer_mapping_wq);
1314 }
1315 
ath12k_core_post_reconfigure_recovery(struct ath12k_base * ab)1316 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
1317 {
1318 	struct ath12k_hw_group *ag = ab->ag;
1319 	struct ath12k_hw *ah;
1320 	struct ath12k *ar;
1321 	int i, j;
1322 
1323 	for (i = 0; i < ag->num_hw; i++) {
1324 		ah = ath12k_ag_to_ah(ag, i);
1325 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
1326 			continue;
1327 
1328 		wiphy_lock(ah->hw->wiphy);
1329 		mutex_lock(&ah->hw_mutex);
1330 
1331 		switch (ah->state) {
1332 		case ATH12K_HW_STATE_ON:
1333 			ah->state = ATH12K_HW_STATE_RESTARTING;
1334 
1335 			for (j = 0; j < ah->num_radio; j++) {
1336 				ar = &ah->radio[j];
1337 				ath12k_core_halt(ar);
1338 			}
1339 
1340 			break;
1341 		case ATH12K_HW_STATE_OFF:
1342 			ath12k_warn(ab,
1343 				    "cannot restart hw %d that hasn't been started\n",
1344 				    i);
1345 			break;
1346 		case ATH12K_HW_STATE_RESTARTING:
1347 			break;
1348 		case ATH12K_HW_STATE_RESTARTED:
1349 			ah->state = ATH12K_HW_STATE_WEDGED;
1350 			fallthrough;
1351 		case ATH12K_HW_STATE_WEDGED:
1352 			ath12k_warn(ab,
1353 				    "device is wedged, will not restart hw %d\n", i);
1354 			break;
1355 		case ATH12K_HW_STATE_TM:
1356 			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
1357 			break;
1358 		}
1359 
1360 		mutex_unlock(&ah->hw_mutex);
1361 		wiphy_unlock(ah->hw->wiphy);
1362 	}
1363 
1364 	complete(&ab->driver_recovery);
1365 }
1366 
ath12k_core_restart(struct work_struct * work)1367 static void ath12k_core_restart(struct work_struct *work)
1368 {
1369 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
1370 	struct ath12k_hw_group *ag = ab->ag;
1371 	struct ath12k_hw *ah;
1372 	int ret, i;
1373 
1374 	ret = ath12k_core_reconfigure_on_crash(ab);
1375 	if (ret) {
1376 		ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
1377 		return;
1378 	}
1379 
1380 	if (ab->is_reset) {
1381 		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
1382 			atomic_dec(&ab->reset_count);
1383 			complete(&ab->reset_complete);
1384 			ab->is_reset = false;
1385 			atomic_set(&ab->fail_cont_count, 0);
1386 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
1387 		}
1388 
1389 		for (i = 0; i < ag->num_hw; i++) {
1390 			ah = ath12k_ag_to_ah(ab->ag, i);
1391 			ieee80211_restart_hw(ah->hw);
1392 		}
1393 	}
1394 
1395 	complete(&ab->restart_completed);
1396 }
1397 
ath12k_core_reset(struct work_struct * work)1398 static void ath12k_core_reset(struct work_struct *work)
1399 {
1400 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
1401 	int reset_count, fail_cont_count;
1402 	long time_left;
1403 
1404 	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
1405 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
1406 		return;
1407 	}
1408 
1409 	/* Sometimes the recovery will fail and then the next all recovery fail,
1410 	 * this is to avoid infinite recovery since it can not recovery success
1411 	 */
1412 	fail_cont_count = atomic_read(&ab->fail_cont_count);
1413 
1414 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
1415 		return;
1416 
1417 	if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
1418 	    time_before(jiffies, ab->reset_fail_timeout))
1419 		return;
1420 
1421 	reset_count = atomic_inc_return(&ab->reset_count);
1422 
1423 	if (reset_count > 1) {
1424 		/* Sometimes it happened another reset worker before the previous one
1425 		 * completed, then the second reset worker will destroy the previous one,
1426 		 * thus below is to avoid that.
1427 		 */
1428 		ath12k_warn(ab, "already resetting count %d\n", reset_count);
1429 
1430 		reinit_completion(&ab->reset_complete);
1431 		time_left = wait_for_completion_timeout(&ab->reset_complete,
1432 							ATH12K_RESET_TIMEOUT_HZ);
1433 		if (time_left) {
1434 			ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
1435 			atomic_dec(&ab->reset_count);
1436 			return;
1437 		}
1438 
1439 		ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
1440 		/* Record the continuous recovery fail count when recovery failed*/
1441 		fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
1442 	}
1443 
1444 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
1445 
1446 	ab->is_reset = true;
1447 	atomic_set(&ab->recovery_count, 0);
1448 
1449 	ath12k_coredump_collect(ab);
1450 	ath12k_core_pre_reconfigure_recovery(ab);
1451 
1452 	ath12k_core_post_reconfigure_recovery(ab);
1453 
1454 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
1455 
1456 	ath12k_hif_irq_disable(ab);
1457 	ath12k_hif_ce_irq_disable(ab);
1458 
1459 	ath12k_hif_power_down(ab, false);
1460 	ath12k_hif_power_up(ab);
1461 
1462 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
1463 }
1464 
ath12k_core_pre_init(struct ath12k_base * ab)1465 int ath12k_core_pre_init(struct ath12k_base *ab)
1466 {
1467 	int ret;
1468 
1469 	ret = ath12k_hw_init(ab);
1470 	if (ret) {
1471 		ath12k_err(ab, "failed to init hw params: %d\n", ret);
1472 		return ret;
1473 	}
1474 
1475 	ath12k_fw_map(ab);
1476 
1477 	return 0;
1478 }
1479 
ath12k_core_panic_handler(struct notifier_block * nb,unsigned long action,void * data)1480 static int ath12k_core_panic_handler(struct notifier_block *nb,
1481 				     unsigned long action, void *data)
1482 {
1483 	struct ath12k_base *ab = container_of(nb, struct ath12k_base,
1484 					      panic_nb);
1485 
1486 	return ath12k_hif_panic_handler(ab);
1487 }
1488 
ath12k_core_panic_notifier_register(struct ath12k_base * ab)1489 static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
1490 {
1491 	ab->panic_nb.notifier_call = ath12k_core_panic_handler;
1492 
1493 	return atomic_notifier_chain_register(&panic_notifier_list,
1494 					      &ab->panic_nb);
1495 }
1496 
ath12k_core_panic_notifier_unregister(struct ath12k_base * ab)1497 static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
1498 {
1499 	atomic_notifier_chain_unregister(&panic_notifier_list,
1500 					 &ab->panic_nb);
1501 }
1502 
1503 static inline
ath12k_core_hw_group_create_ready(struct ath12k_hw_group * ag)1504 bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
1505 {
1506 	lockdep_assert_held(&ag->mutex);
1507 
1508 	return (ag->num_probed == ag->num_devices);
1509 }
1510 
ath12k_core_hw_group_alloc(struct ath12k_base * ab)1511 static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
1512 {
1513 	struct ath12k_hw_group *ag;
1514 	int count = 0;
1515 
1516 	lockdep_assert_held(&ath12k_hw_group_mutex);
1517 
1518 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1519 		count++;
1520 
1521 	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
1522 	if (!ag)
1523 		return NULL;
1524 
1525 	ag->id = count;
1526 	list_add(&ag->list, &ath12k_hw_group_list);
1527 	mutex_init(&ag->mutex);
1528 	ag->mlo_capable = false;
1529 
1530 	return ag;
1531 }
1532 
ath12k_core_hw_group_free(struct ath12k_hw_group * ag)1533 static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
1534 {
1535 	mutex_lock(&ath12k_hw_group_mutex);
1536 
1537 	list_del(&ag->list);
1538 	kfree(ag);
1539 
1540 	mutex_unlock(&ath12k_hw_group_mutex);
1541 }
1542 
ath12k_core_hw_group_find_by_dt(struct ath12k_base * ab)1543 static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
1544 {
1545 	struct ath12k_hw_group *ag;
1546 	int i;
1547 
1548 	if (!ab->dev->of_node)
1549 		return NULL;
1550 
1551 	list_for_each_entry(ag, &ath12k_hw_group_list, list)
1552 		for (i = 0; i < ag->num_devices; i++)
1553 			if (ag->wsi_node[i] == ab->dev->of_node)
1554 				return ag;
1555 
1556 	return NULL;
1557 }
1558 
ath12k_core_get_wsi_info(struct ath12k_hw_group * ag,struct ath12k_base * ab)1559 static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
1560 				    struct ath12k_base *ab)
1561 {
1562 	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
1563 	struct device_node *tx_endpoint, *next_rx_endpoint;
1564 	int device_count = 0;
1565 
1566 	next_wsi_dev = wsi_dev;
1567 
1568 	if (!next_wsi_dev)
1569 		return -ENODEV;
1570 
1571 	do {
1572 		ag->wsi_node[device_count] = next_wsi_dev;
1573 
1574 		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
1575 		if (!tx_endpoint) {
1576 			of_node_put(next_wsi_dev);
1577 			return -ENODEV;
1578 		}
1579 
1580 		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
1581 		if (!next_rx_endpoint) {
1582 			of_node_put(next_wsi_dev);
1583 			of_node_put(tx_endpoint);
1584 			return -ENODEV;
1585 		}
1586 
1587 		of_node_put(tx_endpoint);
1588 		of_node_put(next_wsi_dev);
1589 
1590 		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
1591 		if (!next_wsi_dev) {
1592 			of_node_put(next_rx_endpoint);
1593 			return -ENODEV;
1594 		}
1595 
1596 		of_node_put(next_rx_endpoint);
1597 
1598 		device_count++;
1599 		if (device_count > ATH12K_MAX_SOCS) {
1600 			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
1601 				    device_count, ATH12K_MAX_SOCS);
1602 			of_node_put(next_wsi_dev);
1603 			return -EINVAL;
1604 		}
1605 	} while (wsi_dev != next_wsi_dev);
1606 
1607 	of_node_put(next_wsi_dev);
1608 	ag->num_devices = device_count;
1609 
1610 	return 0;
1611 }
1612 
ath12k_core_get_wsi_index(struct ath12k_hw_group * ag,struct ath12k_base * ab)1613 static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
1614 				     struct ath12k_base *ab)
1615 {
1616 	int i, wsi_controller_index = -1, node_index = -1;
1617 	bool control;
1618 
1619 	for (i = 0; i < ag->num_devices; i++) {
1620 		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
1621 		if (control)
1622 			wsi_controller_index = i;
1623 
1624 		if (ag->wsi_node[i] == ab->dev->of_node)
1625 			node_index = i;
1626 	}
1627 
1628 	if (wsi_controller_index == -1) {
1629 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
1630 		return -EINVAL;
1631 	}
1632 
1633 	if (node_index == -1) {
1634 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
1635 		return -EINVAL;
1636 	}
1637 
1638 	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
1639 		ag->num_devices;
1640 
1641 	return 0;
1642 }
1643 
ath12k_core_hw_group_assign(struct ath12k_base * ab)1644 static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
1645 {
1646 	struct ath12k_wsi_info *wsi = &ab->wsi_info;
1647 	struct ath12k_hw_group *ag;
1648 
1649 	lockdep_assert_held(&ath12k_hw_group_mutex);
1650 
1651 	if (ath12k_ftm_mode)
1652 		goto invalid_group;
1653 
1654 	/* The grouping of multiple devices will be done based on device tree file.
1655 	 * The platforms that do not have any valid group information would have
1656 	 * each device to be part of its own invalid group.
1657 	 *
1658 	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
1659 	 * which didn't have dt entry or wrong dt entry, there could be many
1660 	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
1661 	 * default group id of ATH12K_INVALID_GROUP_ID combined with
1662 	 * num devices in ath12k_hw_group determines if the group is
1663 	 * multi device or single device group
1664 	 */
1665 
1666 	ag = ath12k_core_hw_group_find_by_dt(ab);
1667 	if (!ag) {
1668 		ag = ath12k_core_hw_group_alloc(ab);
1669 		if (!ag) {
1670 			ath12k_warn(ab, "unable to create new hw group\n");
1671 			return NULL;
1672 		}
1673 
1674 		if (ath12k_core_get_wsi_info(ag, ab) ||
1675 		    ath12k_core_get_wsi_index(ag, ab)) {
1676 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
1677 				   "unable to get wsi info from dt, grouping single device");
1678 			ag->id = ATH12K_INVALID_GROUP_ID;
1679 			ag->num_devices = 1;
1680 			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
1681 			wsi->index = 0;
1682 		}
1683 
1684 		goto exit;
1685 	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1686 		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
1687 			   ag->id);
1688 		goto invalid_group;
1689 	} else {
1690 		if (ath12k_core_get_wsi_index(ag, ab))
1691 			goto invalid_group;
1692 		goto exit;
1693 	}
1694 
1695 invalid_group:
1696 	ag = ath12k_core_hw_group_alloc(ab);
1697 	if (!ag) {
1698 		ath12k_warn(ab, "unable to create new hw group\n");
1699 		return NULL;
1700 	}
1701 
1702 	ag->id = ATH12K_INVALID_GROUP_ID;
1703 	ag->num_devices = 1;
1704 	wsi->index = 0;
1705 
1706 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
1707 
1708 exit:
1709 	if (ag->num_probed >= ag->num_devices) {
1710 		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
1711 		goto invalid_group;
1712 	}
1713 
1714 	ab->device_id = ag->num_probed++;
1715 	ag->ab[ab->device_id] = ab;
1716 	ab->ag = ag;
1717 
1718 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
1719 		   ag->id, ag->num_devices, wsi->index);
1720 
1721 	return ag;
1722 }
1723 
ath12k_core_hw_group_unassign(struct ath12k_base * ab)1724 void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
1725 {
1726 	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
1727 	u8 device_id = ab->device_id;
1728 	int num_probed;
1729 
1730 	if (!ag)
1731 		return;
1732 
1733 	mutex_lock(&ag->mutex);
1734 
1735 	if (WARN_ON(device_id >= ag->num_devices)) {
1736 		mutex_unlock(&ag->mutex);
1737 		return;
1738 	}
1739 
1740 	if (WARN_ON(ag->ab[device_id] != ab)) {
1741 		mutex_unlock(&ag->mutex);
1742 		return;
1743 	}
1744 
1745 	ag->ab[device_id] = NULL;
1746 	ab->ag = NULL;
1747 	ab->device_id = ATH12K_INVALID_DEVICE_ID;
1748 
1749 	if (ag->num_probed)
1750 		ag->num_probed--;
1751 
1752 	num_probed = ag->num_probed;
1753 
1754 	mutex_unlock(&ag->mutex);
1755 
1756 	if (!num_probed)
1757 		ath12k_core_hw_group_free(ag);
1758 }
1759 
ath12k_core_hw_group_destroy(struct ath12k_hw_group * ag)1760 static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
1761 {
1762 	struct ath12k_base *ab;
1763 	int i;
1764 
1765 	if (WARN_ON(!ag))
1766 		return;
1767 
1768 	for (i = 0; i < ag->num_devices; i++) {
1769 		ab = ag->ab[i];
1770 		if (!ab)
1771 			continue;
1772 
1773 		ath12k_core_soc_destroy(ab);
1774 	}
1775 }
1776 
ath12k_core_hw_group_cleanup(struct ath12k_hw_group * ag)1777 static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
1778 {
1779 	struct ath12k_base *ab;
1780 	int i;
1781 
1782 	if (!ag)
1783 		return;
1784 
1785 	mutex_lock(&ag->mutex);
1786 
1787 	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
1788 		mutex_unlock(&ag->mutex);
1789 		return;
1790 	}
1791 
1792 	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
1793 
1794 	ath12k_core_hw_group_stop(ag);
1795 
1796 	for (i = 0; i < ag->num_devices; i++) {
1797 		ab = ag->ab[i];
1798 		if (!ab)
1799 			continue;
1800 
1801 		mutex_lock(&ab->core_lock);
1802 		ath12k_core_stop(ab);
1803 		mutex_unlock(&ab->core_lock);
1804 	}
1805 
1806 	mutex_unlock(&ag->mutex);
1807 }
1808 
ath12k_core_hw_group_create(struct ath12k_hw_group * ag)1809 static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
1810 {
1811 	struct ath12k_base *ab;
1812 	int i, ret;
1813 
1814 	lockdep_assert_held(&ag->mutex);
1815 
1816 	for (i = 0; i < ag->num_devices; i++) {
1817 		ab = ag->ab[i];
1818 		if (!ab)
1819 			continue;
1820 
1821 		mutex_lock(&ab->core_lock);
1822 
1823 		ret = ath12k_core_soc_create(ab);
1824 		if (ret) {
1825 			mutex_unlock(&ab->core_lock);
1826 			ath12k_err(ab, "failed to create soc core: %d\n", ret);
1827 			return ret;
1828 		}
1829 
1830 		mutex_unlock(&ab->core_lock);
1831 	}
1832 
1833 	return 0;
1834 }
1835 
ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group * ag)1836 void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
1837 {
1838 	struct ath12k_base *ab;
1839 	int i;
1840 
1841 	if (ath12k_ftm_mode)
1842 		return;
1843 
1844 	lockdep_assert_held(&ag->mutex);
1845 
1846 	/* If more than one devices are grouped, then inter MLO
1847 	 * functionality can work still independent of whether internally
1848 	 * each device supports single_chip_mlo or not.
1849 	 * Only when there is one device, then disable for WCN chipsets
1850 	 * till the required driver implementation is in place.
1851 	 */
1852 	if (ag->num_devices == 1) {
1853 		ab = ag->ab[0];
1854 
1855 		/* WCN chipsets does not advertise in firmware features
1856 		 * hence skip checking
1857 		 */
1858 		if (ab->hw_params->def_num_link)
1859 			return;
1860 	}
1861 
1862 	ag->mlo_capable = true;
1863 
1864 	for (i = 0; i < ag->num_devices; i++) {
1865 		ab = ag->ab[i];
1866 		if (!ab)
1867 			continue;
1868 
1869 		/* even if 1 device's firmware feature indicates MLO
1870 		 * unsupported, make MLO unsupported for the whole group
1871 		 */
1872 		if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
1873 			ag->mlo_capable = false;
1874 			return;
1875 		}
1876 	}
1877 }
1878 
ath12k_core_init(struct ath12k_base * ab)1879 int ath12k_core_init(struct ath12k_base *ab)
1880 {
1881 	struct ath12k_hw_group *ag;
1882 	int ret;
1883 
1884 	ret = ath12k_core_panic_notifier_register(ab);
1885 	if (ret)
1886 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
1887 
1888 	mutex_lock(&ath12k_hw_group_mutex);
1889 
1890 	ag = ath12k_core_hw_group_assign(ab);
1891 	if (!ag) {
1892 		mutex_unlock(&ath12k_hw_group_mutex);
1893 		ath12k_warn(ab, "unable to get hw group\n");
1894 		return -ENODEV;
1895 	}
1896 
1897 	mutex_unlock(&ath12k_hw_group_mutex);
1898 
1899 	mutex_lock(&ag->mutex);
1900 
1901 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
1902 		   ag->num_devices, ag->num_probed);
1903 
1904 	if (ath12k_core_hw_group_create_ready(ag)) {
1905 		ret = ath12k_core_hw_group_create(ag);
1906 		if (ret) {
1907 			mutex_unlock(&ag->mutex);
1908 			ath12k_warn(ab, "unable to create hw group\n");
1909 			goto err;
1910 		}
1911 	}
1912 
1913 	mutex_unlock(&ag->mutex);
1914 
1915 	return 0;
1916 
1917 err:
1918 	ath12k_core_hw_group_destroy(ab->ag);
1919 	ath12k_core_hw_group_unassign(ab);
1920 	return ret;
1921 }
1922 
ath12k_core_deinit(struct ath12k_base * ab)1923 void ath12k_core_deinit(struct ath12k_base *ab)
1924 {
1925 	ath12k_core_panic_notifier_unregister(ab);
1926 	ath12k_core_hw_group_cleanup(ab->ag);
1927 	ath12k_core_hw_group_destroy(ab->ag);
1928 	ath12k_core_hw_group_unassign(ab);
1929 }
1930 
ath12k_core_free(struct ath12k_base * ab)1931 void ath12k_core_free(struct ath12k_base *ab)
1932 {
1933 	timer_delete_sync(&ab->rx_replenish_retry);
1934 	destroy_workqueue(ab->workqueue_aux);
1935 	destroy_workqueue(ab->workqueue);
1936 	kfree(ab);
1937 }
1938 
ath12k_core_alloc(struct device * dev,size_t priv_size,enum ath12k_bus bus)1939 struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
1940 				      enum ath12k_bus bus)
1941 {
1942 	struct ath12k_base *ab;
1943 
1944 	ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
1945 	if (!ab)
1946 		return NULL;
1947 
1948 	init_completion(&ab->driver_recovery);
1949 
1950 	ab->workqueue = create_singlethread_workqueue("ath12k_wq");
1951 	if (!ab->workqueue)
1952 		goto err_sc_free;
1953 
1954 	ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
1955 	if (!ab->workqueue_aux)
1956 		goto err_free_wq;
1957 
1958 	mutex_init(&ab->core_lock);
1959 	spin_lock_init(&ab->base_lock);
1960 	init_completion(&ab->reset_complete);
1961 
1962 	INIT_LIST_HEAD(&ab->peers);
1963 	init_waitqueue_head(&ab->peer_mapping_wq);
1964 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
1965 	INIT_WORK(&ab->restart_work, ath12k_core_restart);
1966 	INIT_WORK(&ab->reset_work, ath12k_core_reset);
1967 	INIT_WORK(&ab->rfkill_work, ath12k_rfkill_work);
1968 	INIT_WORK(&ab->dump_work, ath12k_coredump_upload);
1969 
1970 	timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
1971 	init_completion(&ab->htc_suspend);
1972 	init_completion(&ab->restart_completed);
1973 	init_completion(&ab->wow.wakeup_completed);
1974 
1975 	ab->dev = dev;
1976 	ab->hif.bus = bus;
1977 	ab->qmi.num_radios = U8_MAX;
1978 
1979 	/* Device index used to identify the devices in a group.
1980 	 *
1981 	 * In Intra-device MLO, only one device present in a group,
1982 	 * so it is always zero.
1983 	 *
1984 	 * In Inter-device MLO, Multiple device present in a group,
1985 	 * expect non-zero value.
1986 	 */
1987 	ab->device_id = 0;
1988 
1989 	return ab;
1990 
1991 err_free_wq:
1992 	destroy_workqueue(ab->workqueue);
1993 err_sc_free:
1994 	kfree(ab);
1995 	return NULL;
1996 }
1997 
1998 MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
1999 MODULE_LICENSE("Dual BSD/GPL");
2000