xref: /linux/drivers/net/wireless/ath/ath10k/bmi.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: ISC
2 /*
3  * Copyright (c) 2005-2011 Atheros Communications Inc.
4  * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
5  * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
7  */
8 
9 #include <linux/export.h>
10 #include "bmi.h"
11 #include "hif.h"
12 #include "debug.h"
13 #include "htc.h"
14 #include "hw.h"
15 
ath10k_bmi_start(struct ath10k * ar)16 void ath10k_bmi_start(struct ath10k *ar)
17 {
18 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
19 
20 	ar->bmi.done_sent = false;
21 }
22 EXPORT_SYMBOL(ath10k_bmi_start);
23 
ath10k_bmi_done(struct ath10k * ar)24 int ath10k_bmi_done(struct ath10k *ar)
25 {
26 	struct bmi_cmd cmd;
27 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
28 	int ret;
29 
30 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
31 
32 	if (ar->bmi.done_sent) {
33 		ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
34 		return 0;
35 	}
36 
37 	ar->bmi.done_sent = true;
38 	cmd.id = __cpu_to_le32(BMI_DONE);
39 
40 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
41 	if (ret) {
42 		ath10k_warn(ar, "unable to write to the device: %d\n", ret);
43 		return ret;
44 	}
45 
46 	return 0;
47 }
48 
ath10k_bmi_get_target_info(struct ath10k * ar,struct bmi_target_info * target_info)49 int ath10k_bmi_get_target_info(struct ath10k *ar,
50 			       struct bmi_target_info *target_info)
51 {
52 	struct bmi_cmd cmd;
53 	union bmi_resp resp;
54 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
55 	u32 resplen = sizeof(resp.get_target_info);
56 	int ret;
57 
58 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
59 
60 	if (ar->bmi.done_sent) {
61 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
62 		return -EBUSY;
63 	}
64 
65 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
66 
67 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
68 	if (ret) {
69 		ath10k_warn(ar, "unable to get target info from device\n");
70 		return ret;
71 	}
72 
73 	if (resplen < sizeof(resp.get_target_info)) {
74 		ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
75 			    resplen);
76 		return -EIO;
77 	}
78 
79 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
80 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
81 
82 	return 0;
83 }
84 
85 #define TARGET_VERSION_SENTINAL 0xffffffffu
86 
ath10k_bmi_get_target_info_sdio(struct ath10k * ar,struct bmi_target_info * target_info)87 int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
88 				    struct bmi_target_info *target_info)
89 {
90 	struct bmi_cmd cmd;
91 	union bmi_resp resp;
92 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
93 	u32 resplen, ver_len;
94 	__le32 tmp;
95 	int ret;
96 
97 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
98 
99 	if (ar->bmi.done_sent) {
100 		ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
101 		return -EBUSY;
102 	}
103 
104 	cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
105 
106 	/* Step 1: Read 4 bytes of the target info and check if it is
107 	 * the special sentinel version word or the first word in the
108 	 * version response.
109 	 */
110 	resplen = sizeof(u32);
111 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
112 	if (ret) {
113 		ath10k_warn(ar, "unable to read from device\n");
114 		return ret;
115 	}
116 
117 	/* Some SDIO boards have a special sentinel byte before the real
118 	 * version response.
119 	 */
120 	if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
121 		/* Step 1b: Read the version length */
122 		resplen = sizeof(u32);
123 		ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
124 						  &resplen);
125 		if (ret) {
126 			ath10k_warn(ar, "unable to read from device\n");
127 			return ret;
128 		}
129 	}
130 
131 	ver_len = __le32_to_cpu(tmp);
132 
133 	/* Step 2: Check the target info length */
134 	if (ver_len != sizeof(resp.get_target_info)) {
135 		ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
136 			    ver_len, sizeof(resp.get_target_info));
137 		return -EINVAL;
138 	}
139 
140 	/* Step 3: Read the rest of the version response */
141 	resplen = sizeof(resp.get_target_info) - sizeof(u32);
142 	ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
143 					  &resp.get_target_info.version,
144 					  &resplen);
145 	if (ret) {
146 		ath10k_warn(ar, "unable to read from device\n");
147 		return ret;
148 	}
149 
150 	target_info->version = __le32_to_cpu(resp.get_target_info.version);
151 	target_info->type    = __le32_to_cpu(resp.get_target_info.type);
152 
153 	return 0;
154 }
155 
ath10k_bmi_read_memory(struct ath10k * ar,u32 address,void * buffer,u32 length)156 int ath10k_bmi_read_memory(struct ath10k *ar,
157 			   u32 address, void *buffer, u32 length)
158 {
159 	struct bmi_cmd cmd;
160 	union bmi_resp resp;
161 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
162 	u32 rxlen;
163 	int ret;
164 
165 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
166 		   address, length);
167 
168 	if (ar->bmi.done_sent) {
169 		ath10k_warn(ar, "command disallowed\n");
170 		return -EBUSY;
171 	}
172 
173 	while (length) {
174 		rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
175 
176 		cmd.id            = __cpu_to_le32(BMI_READ_MEMORY);
177 		cmd.read_mem.addr = __cpu_to_le32(address);
178 		cmd.read_mem.len  = __cpu_to_le32(rxlen);
179 
180 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
181 						  &resp, &rxlen);
182 		if (ret) {
183 			ath10k_warn(ar, "unable to read from the device (%d)\n",
184 				    ret);
185 			return ret;
186 		}
187 
188 		memcpy(buffer, resp.read_mem.payload, rxlen);
189 		address += rxlen;
190 		buffer  += rxlen;
191 		length  -= rxlen;
192 	}
193 
194 	return 0;
195 }
196 EXPORT_SYMBOL(ath10k_bmi_read_memory);
197 
ath10k_bmi_write_soc_reg(struct ath10k * ar,u32 address,u32 reg_val)198 int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
199 {
200 	struct bmi_cmd cmd;
201 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
202 	int ret;
203 
204 	ath10k_dbg(ar, ATH10K_DBG_BMI,
205 		   "bmi write soc register 0x%08x val 0x%08x\n",
206 		   address, reg_val);
207 
208 	if (ar->bmi.done_sent) {
209 		ath10k_warn(ar, "bmi write soc register command in progress\n");
210 		return -EBUSY;
211 	}
212 
213 	cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
214 	cmd.write_soc_reg.addr = __cpu_to_le32(address);
215 	cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
216 
217 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
218 	if (ret) {
219 		ath10k_warn(ar, "Unable to write soc register to device: %d\n",
220 			    ret);
221 		return ret;
222 	}
223 
224 	return 0;
225 }
226 
ath10k_bmi_read_soc_reg(struct ath10k * ar,u32 address,u32 * reg_val)227 int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
228 {
229 	struct bmi_cmd cmd;
230 	union bmi_resp resp;
231 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
232 	u32 resplen = sizeof(resp.read_soc_reg);
233 	int ret;
234 
235 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
236 		   address);
237 
238 	if (ar->bmi.done_sent) {
239 		ath10k_warn(ar, "bmi read soc register command in progress\n");
240 		return -EBUSY;
241 	}
242 
243 	cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
244 	cmd.read_soc_reg.addr = __cpu_to_le32(address);
245 
246 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
247 	if (ret) {
248 		ath10k_warn(ar, "Unable to read soc register from device: %d\n",
249 			    ret);
250 		return ret;
251 	}
252 
253 	*reg_val = __le32_to_cpu(resp.read_soc_reg.value);
254 
255 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
256 		   *reg_val);
257 
258 	return 0;
259 }
260 
ath10k_bmi_write_memory(struct ath10k * ar,u32 address,const void * buffer,u32 length)261 int ath10k_bmi_write_memory(struct ath10k *ar,
262 			    u32 address, const void *buffer, u32 length)
263 {
264 	struct bmi_cmd cmd;
265 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
266 	u32 txlen;
267 	int ret;
268 
269 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
270 		   address, length);
271 
272 	if (ar->bmi.done_sent) {
273 		ath10k_warn(ar, "command disallowed\n");
274 		return -EBUSY;
275 	}
276 
277 	while (length) {
278 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
279 
280 		/* copy before roundup to avoid reading beyond buffer*/
281 		memcpy(cmd.write_mem.payload, buffer, txlen);
282 		txlen = roundup(txlen, 4);
283 
284 		cmd.id             = __cpu_to_le32(BMI_WRITE_MEMORY);
285 		cmd.write_mem.addr = __cpu_to_le32(address);
286 		cmd.write_mem.len  = __cpu_to_le32(txlen);
287 
288 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
289 						  NULL, NULL);
290 		if (ret) {
291 			ath10k_warn(ar, "unable to write to the device (%d)\n",
292 				    ret);
293 			return ret;
294 		}
295 
296 		/* fixup roundup() so `length` zeroes out for last chunk */
297 		txlen = min(txlen, length);
298 
299 		address += txlen;
300 		buffer  += txlen;
301 		length  -= txlen;
302 	}
303 
304 	return 0;
305 }
306 
ath10k_bmi_execute(struct ath10k * ar,u32 address,u32 param,u32 * result)307 int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
308 {
309 	struct bmi_cmd cmd;
310 	union bmi_resp resp;
311 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
312 	u32 resplen = sizeof(resp.execute);
313 	int ret;
314 
315 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
316 		   address, param);
317 
318 	if (ar->bmi.done_sent) {
319 		ath10k_warn(ar, "command disallowed\n");
320 		return -EBUSY;
321 	}
322 
323 	cmd.id            = __cpu_to_le32(BMI_EXECUTE);
324 	cmd.execute.addr  = __cpu_to_le32(address);
325 	cmd.execute.param = __cpu_to_le32(param);
326 
327 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
328 	if (ret) {
329 		ath10k_warn(ar, "unable to read from the device\n");
330 		return ret;
331 	}
332 
333 	if (resplen < sizeof(resp.execute)) {
334 		ath10k_warn(ar, "invalid execute response length (%d)\n",
335 			    resplen);
336 		return -EIO;
337 	}
338 
339 	*result = __le32_to_cpu(resp.execute.result);
340 
341 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
342 
343 	return 0;
344 }
345 
ath10k_bmi_lz_data_large(struct ath10k * ar,const void * buffer,u32 length)346 static int ath10k_bmi_lz_data_large(struct ath10k *ar, const void *buffer, u32 length)
347 {
348 	struct bmi_cmd *cmd;
349 	u32 hdrlen = sizeof(cmd->id) + sizeof(cmd->lz_data);
350 	u32 txlen;
351 	int ret;
352 	size_t buf_len;
353 
354 	ath10k_dbg(ar, ATH10K_DBG_BMI, "large bmi lz data buffer 0x%p length %d\n",
355 		   buffer, length);
356 
357 	if (ar->bmi.done_sent) {
358 		ath10k_warn(ar, "command disallowed\n");
359 		return -EBUSY;
360 	}
361 
362 	buf_len = sizeof(*cmd) + BMI_MAX_LARGE_DATA_SIZE - BMI_MAX_DATA_SIZE;
363 	cmd = kzalloc(buf_len, GFP_KERNEL);
364 	if (!cmd)
365 		return -ENOMEM;
366 
367 	while (length) {
368 		txlen = min(length, BMI_MAX_LARGE_DATA_SIZE - hdrlen);
369 
370 		WARN_ON_ONCE(txlen & 3);
371 
372 		cmd->id          = __cpu_to_le32(BMI_LZ_DATA);
373 		cmd->lz_data.len = __cpu_to_le32(txlen);
374 		memcpy(cmd->lz_data.payload, buffer, txlen);
375 
376 		ret = ath10k_hif_exchange_bmi_msg(ar, cmd, hdrlen + txlen,
377 						  NULL, NULL);
378 		if (ret) {
379 			ath10k_warn(ar, "unable to write to the device\n");
380 			kfree(cmd);
381 			return ret;
382 		}
383 
384 		buffer += txlen;
385 		length -= txlen;
386 	}
387 
388 	kfree(cmd);
389 
390 	return 0;
391 }
392 
ath10k_bmi_lz_data(struct ath10k * ar,const void * buffer,u32 length)393 int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
394 {
395 	struct bmi_cmd cmd;
396 	u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
397 	u32 txlen;
398 	int ret;
399 
400 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n",
401 		   buffer, length);
402 
403 	if (ar->bmi.done_sent) {
404 		ath10k_warn(ar, "command disallowed\n");
405 		return -EBUSY;
406 	}
407 
408 	while (length) {
409 		txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
410 
411 		WARN_ON_ONCE(txlen & 3);
412 
413 		cmd.id          = __cpu_to_le32(BMI_LZ_DATA);
414 		cmd.lz_data.len = __cpu_to_le32(txlen);
415 		memcpy(cmd.lz_data.payload, buffer, txlen);
416 
417 		ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
418 						  NULL, NULL);
419 		if (ret) {
420 			ath10k_warn(ar, "unable to write to the device\n");
421 			return ret;
422 		}
423 
424 		buffer += txlen;
425 		length -= txlen;
426 	}
427 
428 	return 0;
429 }
430 
ath10k_bmi_lz_stream_start(struct ath10k * ar,u32 address)431 int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
432 {
433 	struct bmi_cmd cmd;
434 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
435 	int ret;
436 
437 	ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
438 		   address);
439 
440 	if (ar->bmi.done_sent) {
441 		ath10k_warn(ar, "command disallowed\n");
442 		return -EBUSY;
443 	}
444 
445 	cmd.id            = __cpu_to_le32(BMI_LZ_STREAM_START);
446 	cmd.lz_start.addr = __cpu_to_le32(address);
447 
448 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
449 	if (ret) {
450 		ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
451 		return ret;
452 	}
453 
454 	return 0;
455 }
456 
ath10k_bmi_fast_download(struct ath10k * ar,u32 address,const void * buffer,u32 length)457 int ath10k_bmi_fast_download(struct ath10k *ar,
458 			     u32 address, const void *buffer, u32 length)
459 {
460 	u8 trailer[4] = {};
461 	u32 head_len = rounddown(length, 4);
462 	u32 trailer_len = length - head_len;
463 	int ret;
464 
465 	ath10k_dbg(ar, ATH10K_DBG_BMI,
466 		   "bmi fast download address 0x%x buffer 0x%p length %d\n",
467 		   address, buffer, length);
468 
469 	ret = ath10k_bmi_lz_stream_start(ar, address);
470 	if (ret)
471 		return ret;
472 
473 	/* copy the last word into a zero padded buffer */
474 	if (trailer_len > 0)
475 		memcpy(trailer, buffer + head_len, trailer_len);
476 
477 	if (ar->hw_params.bmi_large_size_download)
478 		ret = ath10k_bmi_lz_data_large(ar, buffer, head_len);
479 	else
480 		ret = ath10k_bmi_lz_data(ar, buffer, head_len);
481 
482 	if (ret)
483 		return ret;
484 
485 	if (trailer_len > 0)
486 		ret = ath10k_bmi_lz_data(ar, trailer, 4);
487 
488 	if (ret != 0)
489 		return ret;
490 
491 	/*
492 	 * Close compressed stream and open a new (fake) one.
493 	 * This serves mainly to flush Target caches.
494 	 */
495 	ret = ath10k_bmi_lz_stream_start(ar, 0x00);
496 
497 	return ret;
498 }
499 
ath10k_bmi_set_start(struct ath10k * ar,u32 address)500 int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
501 {
502 	struct bmi_cmd cmd;
503 	u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
504 	int ret;
505 
506 	if (ar->bmi.done_sent) {
507 		ath10k_warn(ar, "bmi set start command disallowed\n");
508 		return -EBUSY;
509 	}
510 
511 	cmd.id = __cpu_to_le32(BMI_SET_APP_START);
512 	cmd.set_app_start.addr = __cpu_to_le32(address);
513 
514 	ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
515 	if (ret) {
516 		ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
517 		return ret;
518 	}
519 
520 	return 0;
521 }
522