1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
3  *
4  * Copyright (C) 2019 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/ctype.h>
12 #include <linux/firmware.h>
13 #include <linux/string_choices.h>
14 #include "otx_cpt_common.h"
15 #include "otx_cptpf_ucode.h"
16 #include "otx_cptpf.h"
17 
18 #define CSR_DELAY 30
19 /* Tar archive defines */
20 #define TAR_MAGIC		"ustar"
21 #define TAR_MAGIC_LEN		6
22 #define TAR_BLOCK_LEN		512
23 #define REGTYPE			'0'
24 #define AREGTYPE		'\0'
25 
26 /* tar header as defined in POSIX 1003.1-1990. */
27 struct tar_hdr_t {
28 	char name[100];
29 	char mode[8];
30 	char uid[8];
31 	char gid[8];
32 	char size[12];
33 	char mtime[12];
34 	char chksum[8];
35 	char typeflag;
36 	char linkname[100];
37 	char magic[6];
38 	char version[2];
39 	char uname[32];
40 	char gname[32];
41 	char devmajor[8];
42 	char devminor[8];
43 	char prefix[155];
44 };
45 
46 struct tar_blk_t {
47 	union {
48 		struct tar_hdr_t hdr;
49 		char block[TAR_BLOCK_LEN];
50 	};
51 };
52 
53 struct tar_arch_info_t {
54 	struct list_head ucodes;
55 	const struct firmware *fw;
56 };
57 
get_cores_bmap(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)58 static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
59 					   struct otx_cpt_eng_grp_info *eng_grp)
60 {
61 	struct otx_cpt_bitmap bmap = { {0} };
62 	bool found = false;
63 	int i;
64 
65 	if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
66 		dev_err(dev, "unsupported number of engines %d on octeontx\n",
67 			eng_grp->g->engs_num);
68 		return bmap;
69 	}
70 
71 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
72 		if (eng_grp->engs[i].type) {
73 			bitmap_or(bmap.bits, bmap.bits,
74 				  eng_grp->engs[i].bmap,
75 				  eng_grp->g->engs_num);
76 			bmap.size = eng_grp->g->engs_num;
77 			found = true;
78 		}
79 	}
80 
81 	if (!found)
82 		dev_err(dev, "No engines reserved for engine group %d\n",
83 			eng_grp->idx);
84 	return bmap;
85 }
86 
is_eng_type(int val,int eng_type)87 static int is_eng_type(int val, int eng_type)
88 {
89 	return val & (1 << eng_type);
90 }
91 
dev_supports_eng_type(struct otx_cpt_eng_grps * eng_grps,int eng_type)92 static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
93 				 int eng_type)
94 {
95 	return is_eng_type(eng_grps->eng_types_supported, eng_type);
96 }
97 
set_ucode_filename(struct otx_cpt_ucode * ucode,const char * filename)98 static void set_ucode_filename(struct otx_cpt_ucode *ucode,
99 			       const char *filename)
100 {
101 	strscpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
102 }
103 
get_eng_type_str(int eng_type)104 static char *get_eng_type_str(int eng_type)
105 {
106 	char *str = "unknown";
107 
108 	switch (eng_type) {
109 	case OTX_CPT_SE_TYPES:
110 		str = "SE";
111 		break;
112 
113 	case OTX_CPT_AE_TYPES:
114 		str = "AE";
115 		break;
116 	}
117 	return str;
118 }
119 
get_ucode_type_str(int ucode_type)120 static char *get_ucode_type_str(int ucode_type)
121 {
122 	char *str = "unknown";
123 
124 	switch (ucode_type) {
125 	case (1 << OTX_CPT_SE_TYPES):
126 		str = "SE";
127 		break;
128 
129 	case (1 << OTX_CPT_AE_TYPES):
130 		str = "AE";
131 		break;
132 	}
133 	return str;
134 }
135 
get_ucode_type(struct otx_cpt_ucode_hdr * ucode_hdr,int * ucode_type)136 static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
137 {
138 	char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
139 	u32 i, val = 0;
140 	u8 nn;
141 
142 	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
143 	for (i = 0; i < strlen(tmp_ver_str); i++)
144 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
145 
146 	nn = ucode_hdr->ver_num.nn;
147 	if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
148 	    (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
149 	     nn == OTX_CPT_SE_UC_TYPE3))
150 		val |= 1 << OTX_CPT_SE_TYPES;
151 	if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
152 	    nn == OTX_CPT_AE_UC_TYPE)
153 		val |= 1 << OTX_CPT_AE_TYPES;
154 
155 	*ucode_type = val;
156 
157 	if (!val)
158 		return -EINVAL;
159 	if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
160 	    is_eng_type(val, OTX_CPT_SE_TYPES))
161 		return -EINVAL;
162 	return 0;
163 }
164 
is_mem_zero(const char * ptr,int size)165 static int is_mem_zero(const char *ptr, int size)
166 {
167 	int i;
168 
169 	for (i = 0; i < size; i++) {
170 		if (ptr[i])
171 			return 0;
172 	}
173 	return 1;
174 }
175 
cpt_set_ucode_base(struct otx_cpt_eng_grp_info * eng_grp,void * obj)176 static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
177 {
178 	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
179 	dma_addr_t dma_addr;
180 	struct otx_cpt_bitmap bmap;
181 	int i;
182 
183 	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
184 	if (!bmap.size)
185 		return -EINVAL;
186 
187 	if (eng_grp->mirror.is_ena)
188 		dma_addr =
189 		       eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
190 	else
191 		dma_addr = eng_grp->ucode[0].align_dma;
192 
193 	/*
194 	 * Set UCODE_BASE only for the cores which are not used,
195 	 * other cores should have already valid UCODE_BASE set
196 	 */
197 	for_each_set_bit(i, bmap.bits, bmap.size)
198 		if (!eng_grp->g->eng_ref_cnt[i])
199 			writeq((u64) dma_addr, cpt->reg_base +
200 				OTX_CPT_PF_ENGX_UCODE_BASE(i));
201 	return 0;
202 }
203 
cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info * eng_grp,void * obj)204 static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
205 					void *obj)
206 {
207 	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
208 	struct otx_cpt_bitmap bmap = { {0} };
209 	int timeout = 10;
210 	int i, busy;
211 	u64 reg;
212 
213 	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
214 	if (!bmap.size)
215 		return -EINVAL;
216 
217 	/* Detach the cores from group */
218 	reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
219 	for_each_set_bit(i, bmap.bits, bmap.size) {
220 		if (reg & (1ull << i)) {
221 			eng_grp->g->eng_ref_cnt[i]--;
222 			reg &= ~(1ull << i);
223 		}
224 	}
225 	writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
226 
227 	/* Wait for cores to become idle */
228 	do {
229 		busy = 0;
230 		usleep_range(10000, 20000);
231 		if (timeout-- < 0)
232 			return -EBUSY;
233 
234 		reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
235 		for_each_set_bit(i, bmap.bits, bmap.size)
236 			if (reg & (1ull << i)) {
237 				busy = 1;
238 				break;
239 			}
240 	} while (busy);
241 
242 	/* Disable the cores only if they are not used anymore */
243 	reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
244 	for_each_set_bit(i, bmap.bits, bmap.size)
245 		if (!eng_grp->g->eng_ref_cnt[i])
246 			reg &= ~(1ull << i);
247 	writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
248 
249 	return 0;
250 }
251 
cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info * eng_grp,void * obj)252 static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
253 				       void *obj)
254 {
255 	struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
256 	struct otx_cpt_bitmap bmap;
257 	u64 reg;
258 	int i;
259 
260 	bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
261 	if (!bmap.size)
262 		return -EINVAL;
263 
264 	/* Attach the cores to the group */
265 	reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
266 	for_each_set_bit(i, bmap.bits, bmap.size) {
267 		if (!(reg & (1ull << i))) {
268 			eng_grp->g->eng_ref_cnt[i]++;
269 			reg |= 1ull << i;
270 		}
271 	}
272 	writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
273 
274 	/* Enable the cores */
275 	reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
276 	for_each_set_bit(i, bmap.bits, bmap.size)
277 		reg |= 1ull << i;
278 	writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
279 
280 	return 0;
281 }
282 
process_tar_file(struct device * dev,struct tar_arch_info_t * tar_arch,char * filename,const u8 * data,u32 size)283 static int process_tar_file(struct device *dev,
284 			    struct tar_arch_info_t *tar_arch, char *filename,
285 			    const u8 *data, u32 size)
286 {
287 	struct tar_ucode_info_t *tar_info;
288 	struct otx_cpt_ucode_hdr *ucode_hdr;
289 	int ucode_type, ucode_size;
290 	unsigned int code_length;
291 
292 	/*
293 	 * If size is less than microcode header size then don't report
294 	 * an error because it might not be microcode file, just process
295 	 * next file from archive
296 	 */
297 	if (size < sizeof(struct otx_cpt_ucode_hdr))
298 		return 0;
299 
300 	ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
301 	/*
302 	 * If microcode version can't be found don't report an error
303 	 * because it might not be microcode file, just process next file
304 	 */
305 	if (get_ucode_type(ucode_hdr, &ucode_type))
306 		return 0;
307 
308 	code_length = ntohl(ucode_hdr->code_length);
309 	if (code_length >= INT_MAX / 2) {
310 		dev_err(dev, "Invalid code_length %u\n", code_length);
311 		return -EINVAL;
312 	}
313 
314 	ucode_size = code_length * 2;
315 	if (!ucode_size || (size < round_up(ucode_size, 16) +
316 	    sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
317 		dev_err(dev, "Ucode %s invalid size\n", filename);
318 		return -EINVAL;
319 	}
320 
321 	tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
322 	if (!tar_info)
323 		return -ENOMEM;
324 
325 	tar_info->ucode_ptr = data;
326 	set_ucode_filename(&tar_info->ucode, filename);
327 	memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
328 	       OTX_CPT_UCODE_VER_STR_SZ);
329 	tar_info->ucode.ver_num = ucode_hdr->ver_num;
330 	tar_info->ucode.type = ucode_type;
331 	tar_info->ucode.size = ucode_size;
332 	list_add_tail(&tar_info->list, &tar_arch->ucodes);
333 
334 	return 0;
335 }
336 
release_tar_archive(struct tar_arch_info_t * tar_arch)337 static void release_tar_archive(struct tar_arch_info_t *tar_arch)
338 {
339 	struct tar_ucode_info_t *curr, *temp;
340 
341 	if (!tar_arch)
342 		return;
343 
344 	list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
345 		list_del(&curr->list);
346 		kfree(curr);
347 	}
348 
349 	release_firmware(tar_arch->fw);
350 	kfree(tar_arch);
351 }
352 
get_uc_from_tar_archive(struct tar_arch_info_t * tar_arch,int ucode_type)353 static struct tar_ucode_info_t *get_uc_from_tar_archive(
354 					struct tar_arch_info_t *tar_arch,
355 					int ucode_type)
356 {
357 	struct tar_ucode_info_t *curr, *uc_found = NULL;
358 
359 	list_for_each_entry(curr, &tar_arch->ucodes, list) {
360 		if (!is_eng_type(curr->ucode.type, ucode_type))
361 			continue;
362 
363 		if (!uc_found) {
364 			uc_found = curr;
365 			continue;
366 		}
367 
368 		switch (ucode_type) {
369 		case OTX_CPT_AE_TYPES:
370 			break;
371 
372 		case OTX_CPT_SE_TYPES:
373 			if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
374 			    (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
375 			     && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
376 				uc_found = curr;
377 			break;
378 		}
379 	}
380 
381 	return uc_found;
382 }
383 
print_tar_dbg_info(struct tar_arch_info_t * tar_arch,char * tar_filename)384 static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
385 			       char *tar_filename)
386 {
387 	struct tar_ucode_info_t *curr;
388 
389 	pr_debug("Tar archive filename %s\n", tar_filename);
390 	pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
391 		 tar_arch->fw->size);
392 	list_for_each_entry(curr, &tar_arch->ucodes, list) {
393 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
394 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
395 		pr_debug("Ucode version %d.%d.%d.%d\n",
396 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
397 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
398 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
399 			 get_ucode_type_str(curr->ucode.type));
400 		pr_debug("Ucode size %d\n", curr->ucode.size);
401 		pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
402 	}
403 }
404 
load_tar_archive(struct device * dev,char * tar_filename)405 static struct tar_arch_info_t *load_tar_archive(struct device *dev,
406 						char *tar_filename)
407 {
408 	struct tar_arch_info_t *tar_arch = NULL;
409 	struct tar_blk_t *tar_blk;
410 	unsigned int cur_size;
411 	size_t tar_offs = 0;
412 	size_t tar_size;
413 	int ret;
414 
415 	tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
416 	if (!tar_arch)
417 		return NULL;
418 
419 	INIT_LIST_HEAD(&tar_arch->ucodes);
420 
421 	/* Load tar archive */
422 	ret = request_firmware(&tar_arch->fw, tar_filename, dev);
423 	if (ret)
424 		goto release_tar_arch;
425 
426 	if (tar_arch->fw->size < TAR_BLOCK_LEN) {
427 		dev_err(dev, "Invalid tar archive %s\n", tar_filename);
428 		goto release_tar_arch;
429 	}
430 
431 	tar_size = tar_arch->fw->size;
432 	tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
433 	if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
434 		dev_err(dev, "Unsupported format of tar archive %s\n",
435 			tar_filename);
436 		goto release_tar_arch;
437 	}
438 
439 	while (1) {
440 		/* Read current file size */
441 		ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
442 		if (ret)
443 			goto release_tar_arch;
444 
445 		if (tar_offs + cur_size > tar_size ||
446 		    tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
447 			dev_err(dev, "Invalid tar archive %s\n", tar_filename);
448 			goto release_tar_arch;
449 		}
450 
451 		tar_offs += TAR_BLOCK_LEN;
452 		if (tar_blk->hdr.typeflag == REGTYPE ||
453 		    tar_blk->hdr.typeflag == AREGTYPE) {
454 			ret = process_tar_file(dev, tar_arch,
455 					       tar_blk->hdr.name,
456 					       &tar_arch->fw->data[tar_offs],
457 					       cur_size);
458 			if (ret)
459 				goto release_tar_arch;
460 		}
461 
462 		tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
463 		if (cur_size % TAR_BLOCK_LEN)
464 			tar_offs += TAR_BLOCK_LEN;
465 
466 		/* Check for the end of the archive */
467 		if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
468 			dev_err(dev, "Invalid tar archive %s\n", tar_filename);
469 			goto release_tar_arch;
470 		}
471 
472 		if (is_mem_zero(&tar_arch->fw->data[tar_offs],
473 		    2*TAR_BLOCK_LEN))
474 			break;
475 
476 		/* Read next block from tar archive */
477 		tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
478 	}
479 
480 	print_tar_dbg_info(tar_arch, tar_filename);
481 	return tar_arch;
482 release_tar_arch:
483 	release_tar_archive(tar_arch);
484 	return NULL;
485 }
486 
find_engines_by_type(struct otx_cpt_eng_grp_info * eng_grp,int eng_type)487 static struct otx_cpt_engs_rsvd *find_engines_by_type(
488 					struct otx_cpt_eng_grp_info *eng_grp,
489 					int eng_type)
490 {
491 	int i;
492 
493 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
494 		if (!eng_grp->engs[i].type)
495 			continue;
496 
497 		if (eng_grp->engs[i].type == eng_type)
498 			return &eng_grp->engs[i];
499 	}
500 	return NULL;
501 }
502 
otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode * ucode,int eng_type)503 int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
504 {
505 	return is_eng_type(ucode->type, eng_type);
506 }
507 EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
508 
print_ucode_info(struct otx_cpt_eng_grp_info * eng_grp,char * buf,int size)509 static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
510 			     char *buf, int size)
511 {
512 	if (eng_grp->mirror.is_ena) {
513 		scnprintf(buf, size, "%s (shared with engine_group%d)",
514 			  eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
515 			  eng_grp->mirror.idx);
516 	} else {
517 		scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
518 	}
519 }
520 
print_engs_info(struct otx_cpt_eng_grp_info * eng_grp,char * buf,int size,int idx)521 static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
522 			    char *buf, int size, int idx)
523 {
524 	struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
525 	struct otx_cpt_engs_rsvd *engs;
526 	int len, i;
527 
528 	buf[0] = '\0';
529 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
530 		engs = &eng_grp->engs[i];
531 		if (!engs->type)
532 			continue;
533 		if (idx != -1 && idx != i)
534 			continue;
535 
536 		if (eng_grp->mirror.is_ena)
537 			mirrored_engs = find_engines_by_type(
538 					&eng_grp->g->grp[eng_grp->mirror.idx],
539 					engs->type);
540 		if (i > 0 && idx == -1) {
541 			len = strlen(buf);
542 			scnprintf(buf+len, size-len, ", ");
543 		}
544 
545 		len = strlen(buf);
546 		scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
547 			  engs->count + mirrored_engs->count : engs->count,
548 			  get_eng_type_str(engs->type));
549 		if (mirrored_engs) {
550 			len = strlen(buf);
551 			scnprintf(buf+len, size-len,
552 				  "(%d shared with engine_group%d) ",
553 				  engs->count <= 0 ? engs->count +
554 				  mirrored_engs->count : mirrored_engs->count,
555 				  eng_grp->mirror.idx);
556 		}
557 	}
558 }
559 
print_ucode_dbg_info(struct otx_cpt_ucode * ucode)560 static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
561 {
562 	pr_debug("Ucode info\n");
563 	pr_debug("Ucode version string %s\n", ucode->ver_str);
564 	pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
565 		 ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
566 	pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
567 	pr_debug("Ucode size %d\n", ucode->size);
568 	pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
569 	pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
570 }
571 
cpt_print_engines_mask(struct otx_cpt_eng_grp_info * eng_grp,struct device * dev,char * buf,int size)572 static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
573 				   struct device *dev, char *buf, int size)
574 {
575 	struct otx_cpt_bitmap bmap;
576 	u32 mask[2];
577 
578 	bmap = get_cores_bmap(dev, eng_grp);
579 	if (!bmap.size) {
580 		scnprintf(buf, size, "unknown");
581 		return;
582 	}
583 	bitmap_to_arr32(mask, bmap.bits, bmap.size);
584 	scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
585 }
586 
587 
print_dbg_info(struct device * dev,struct otx_cpt_eng_grps * eng_grps)588 static void print_dbg_info(struct device *dev,
589 			   struct otx_cpt_eng_grps *eng_grps)
590 {
591 	char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
592 	struct otx_cpt_eng_grp_info *mirrored_grp;
593 	char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
594 	struct otx_cpt_eng_grp_info *grp;
595 	struct otx_cpt_engs_rsvd *engs;
596 	u32 mask[4];
597 	int i, j;
598 
599 	pr_debug("Engine groups global info\n");
600 	pr_debug("max SE %d, max AE %d\n",
601 		 eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
602 	pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
603 	pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
604 
605 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
606 		grp = &eng_grps->grp[i];
607 		pr_debug("engine_group%d, state %s\n", i,
608 			 str_enabled_disabled(grp->is_enabled));
609 		if (grp->is_enabled) {
610 			mirrored_grp = &eng_grps->grp[grp->mirror.idx];
611 			pr_debug("Ucode0 filename %s, version %s\n",
612 				 grp->mirror.is_ena ?
613 				 mirrored_grp->ucode[0].filename :
614 				 grp->ucode[0].filename,
615 				 grp->mirror.is_ena ?
616 				 mirrored_grp->ucode[0].ver_str :
617 				 grp->ucode[0].ver_str);
618 		}
619 
620 		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
621 			engs = &grp->engs[j];
622 			if (engs->type) {
623 				print_engs_info(grp, engs_info,
624 						2*OTX_CPT_UCODE_NAME_LENGTH, j);
625 				pr_debug("Slot%d: %s\n", j, engs_info);
626 				bitmap_to_arr32(mask, engs->bmap,
627 						eng_grps->engs_num);
628 				pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
629 					 mask[3], mask[2], mask[1], mask[0]);
630 			} else
631 				pr_debug("Slot%d not used\n", j);
632 		}
633 		if (grp->is_enabled) {
634 			cpt_print_engines_mask(grp, dev, engs_mask,
635 					       OTX_CPT_UCODE_NAME_LENGTH);
636 			pr_debug("Cmask: %s\n", engs_mask);
637 		}
638 	}
639 }
640 
update_engines_avail_count(struct device * dev,struct otx_cpt_engs_available * avail,struct otx_cpt_engs_rsvd * engs,int val)641 static int update_engines_avail_count(struct device *dev,
642 				      struct otx_cpt_engs_available *avail,
643 				      struct otx_cpt_engs_rsvd *engs, int val)
644 {
645 	switch (engs->type) {
646 	case OTX_CPT_SE_TYPES:
647 		avail->se_cnt += val;
648 		break;
649 
650 	case OTX_CPT_AE_TYPES:
651 		avail->ae_cnt += val;
652 		break;
653 
654 	default:
655 		dev_err(dev, "Invalid engine type %d\n", engs->type);
656 		return -EINVAL;
657 	}
658 
659 	return 0;
660 }
661 
update_engines_offset(struct device * dev,struct otx_cpt_engs_available * avail,struct otx_cpt_engs_rsvd * engs)662 static int update_engines_offset(struct device *dev,
663 				 struct otx_cpt_engs_available *avail,
664 				 struct otx_cpt_engs_rsvd *engs)
665 {
666 	switch (engs->type) {
667 	case OTX_CPT_SE_TYPES:
668 		engs->offset = 0;
669 		break;
670 
671 	case OTX_CPT_AE_TYPES:
672 		engs->offset = avail->max_se_cnt;
673 		break;
674 
675 	default:
676 		dev_err(dev, "Invalid engine type %d\n", engs->type);
677 		return -EINVAL;
678 	}
679 
680 	return 0;
681 }
682 
release_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp)683 static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
684 {
685 	int i, ret = 0;
686 
687 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
688 		if (!grp->engs[i].type)
689 			continue;
690 
691 		if (grp->engs[i].count > 0) {
692 			ret = update_engines_avail_count(dev, &grp->g->avail,
693 							 &grp->engs[i],
694 							 grp->engs[i].count);
695 			if (ret)
696 				return ret;
697 		}
698 
699 		grp->engs[i].type = 0;
700 		grp->engs[i].count = 0;
701 		grp->engs[i].offset = 0;
702 		grp->engs[i].ucode = NULL;
703 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
704 	}
705 
706 	return 0;
707 }
708 
do_reserve_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_engs)709 static int do_reserve_engines(struct device *dev,
710 			      struct otx_cpt_eng_grp_info *grp,
711 			      struct otx_cpt_engines *req_engs)
712 {
713 	struct otx_cpt_engs_rsvd *engs = NULL;
714 	int i, ret;
715 
716 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
717 		if (!grp->engs[i].type) {
718 			engs = &grp->engs[i];
719 			break;
720 		}
721 	}
722 
723 	if (!engs)
724 		return -ENOMEM;
725 
726 	engs->type = req_engs->type;
727 	engs->count = req_engs->count;
728 
729 	ret = update_engines_offset(dev, &grp->g->avail, engs);
730 	if (ret)
731 		return ret;
732 
733 	if (engs->count > 0) {
734 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
735 						 -engs->count);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	return 0;
741 }
742 
check_engines_availability(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_eng)743 static int check_engines_availability(struct device *dev,
744 				      struct otx_cpt_eng_grp_info *grp,
745 				      struct otx_cpt_engines *req_eng)
746 {
747 	int avail_cnt = 0;
748 
749 	switch (req_eng->type) {
750 	case OTX_CPT_SE_TYPES:
751 		avail_cnt = grp->g->avail.se_cnt;
752 		break;
753 
754 	case OTX_CPT_AE_TYPES:
755 		avail_cnt = grp->g->avail.ae_cnt;
756 		break;
757 
758 	default:
759 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
760 		return -EINVAL;
761 	}
762 
763 	if (avail_cnt < req_eng->count) {
764 		dev_err(dev,
765 			"Error available %s engines %d < than requested %d\n",
766 			get_eng_type_str(req_eng->type),
767 			avail_cnt, req_eng->count);
768 		return -EBUSY;
769 	}
770 
771 	return 0;
772 }
773 
reserve_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_engs,int req_cnt)774 static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
775 			   struct otx_cpt_engines *req_engs, int req_cnt)
776 {
777 	int i, ret;
778 
779 	/* Validate if a number of requested engines is available */
780 	for (i = 0; i < req_cnt; i++) {
781 		ret = check_engines_availability(dev, grp, &req_engs[i]);
782 		if (ret)
783 			return ret;
784 	}
785 
786 	/* Reserve requested engines for this engine group */
787 	for (i = 0; i < req_cnt; i++) {
788 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
789 		if (ret)
790 			return ret;
791 	}
792 	return 0;
793 }
794 
eng_grp_info_show(struct device * dev,struct device_attribute * attr,char * buf)795 static ssize_t eng_grp_info_show(struct device *dev,
796 				 struct device_attribute *attr,
797 				 char *buf)
798 {
799 	char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
800 	char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
801 	char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
802 	struct otx_cpt_eng_grp_info *eng_grp;
803 	int ret;
804 
805 	eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
806 	mutex_lock(&eng_grp->g->lock);
807 
808 	print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
809 	print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
810 	cpt_print_engines_mask(eng_grp, dev, engs_mask,
811 			       OTX_CPT_UCODE_NAME_LENGTH);
812 	ret = scnprintf(buf, PAGE_SIZE,
813 			"Microcode : %s\nEngines: %s\nEngines mask: %s\n",
814 			ucode_info, engs_info, engs_mask);
815 
816 	mutex_unlock(&eng_grp->g->lock);
817 	return ret;
818 }
819 
create_sysfs_eng_grps_info(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)820 static int create_sysfs_eng_grps_info(struct device *dev,
821 				      struct otx_cpt_eng_grp_info *eng_grp)
822 {
823 	eng_grp->info_attr.show = eng_grp_info_show;
824 	eng_grp->info_attr.store = NULL;
825 	eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
826 	eng_grp->info_attr.attr.mode = 0440;
827 	sysfs_attr_init(&eng_grp->info_attr.attr);
828 	return device_create_file(dev, &eng_grp->info_attr);
829 }
830 
ucode_unload(struct device * dev,struct otx_cpt_ucode * ucode)831 static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
832 {
833 	if (ucode->va) {
834 		dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
835 				  ucode->va, ucode->dma);
836 		ucode->va = NULL;
837 		ucode->align_va = NULL;
838 		ucode->dma = 0;
839 		ucode->align_dma = 0;
840 		ucode->size = 0;
841 	}
842 
843 	memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
844 	memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
845 	set_ucode_filename(ucode, "");
846 	ucode->type = 0;
847 }
848 
copy_ucode_to_dma_mem(struct device * dev,struct otx_cpt_ucode * ucode,const u8 * ucode_data)849 static int copy_ucode_to_dma_mem(struct device *dev,
850 				 struct otx_cpt_ucode *ucode,
851 				 const u8 *ucode_data)
852 {
853 	u32 i;
854 
855 	/*  Allocate DMAable space */
856 	ucode->va = dma_alloc_coherent(dev, ucode->size +
857 				       OTX_CPT_UCODE_ALIGNMENT,
858 				       &ucode->dma, GFP_KERNEL);
859 	if (!ucode->va) {
860 		dev_err(dev, "Unable to allocate space for microcode\n");
861 		return -ENOMEM;
862 	}
863 	ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
864 	ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
865 
866 	memcpy((void *) ucode->align_va, (void *) ucode_data +
867 	       sizeof(struct otx_cpt_ucode_hdr), ucode->size);
868 
869 	/* Byte swap 64-bit */
870 	for (i = 0; i < (ucode->size / 8); i++)
871 		((__be64 *)ucode->align_va)[i] =
872 				cpu_to_be64(((u64 *)ucode->align_va)[i]);
873 	/*  Ucode needs 16-bit swap */
874 	for (i = 0; i < (ucode->size / 2); i++)
875 		((__be16 *)ucode->align_va)[i] =
876 				cpu_to_be16(((u16 *)ucode->align_va)[i]);
877 	return 0;
878 }
879 
ucode_load(struct device * dev,struct otx_cpt_ucode * ucode,const char * ucode_filename)880 static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
881 		      const char *ucode_filename)
882 {
883 	struct otx_cpt_ucode_hdr *ucode_hdr;
884 	const struct firmware *fw;
885 	unsigned int code_length;
886 	int ret;
887 
888 	set_ucode_filename(ucode, ucode_filename);
889 	ret = request_firmware(&fw, ucode->filename, dev);
890 	if (ret)
891 		return ret;
892 
893 	ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
894 	memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
895 	ucode->ver_num = ucode_hdr->ver_num;
896 	code_length = ntohl(ucode_hdr->code_length);
897 	if (code_length >= INT_MAX / 2) {
898 		dev_err(dev, "Ucode invalid code_length %u\n", code_length);
899 		ret = -EINVAL;
900 		goto release_fw;
901 	}
902 	ucode->size = code_length * 2;
903 	if (!ucode->size || (fw->size < round_up(ucode->size, 16)
904 	    + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
905 		dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
906 		ret = -EINVAL;
907 		goto release_fw;
908 	}
909 
910 	ret = get_ucode_type(ucode_hdr, &ucode->type);
911 	if (ret) {
912 		dev_err(dev, "Microcode %s unknown type 0x%x\n",
913 			ucode->filename, ucode->type);
914 		goto release_fw;
915 	}
916 
917 	ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
918 	if (ret)
919 		goto release_fw;
920 
921 	print_ucode_dbg_info(ucode);
922 release_fw:
923 	release_firmware(fw);
924 	return ret;
925 }
926 
enable_eng_grp(struct otx_cpt_eng_grp_info * eng_grp,void * obj)927 static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
928 			  void *obj)
929 {
930 	int ret;
931 
932 	ret = cpt_set_ucode_base(eng_grp, obj);
933 	if (ret)
934 		return ret;
935 
936 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
937 	return ret;
938 }
939 
disable_eng_grp(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp,void * obj)940 static int disable_eng_grp(struct device *dev,
941 			   struct otx_cpt_eng_grp_info *eng_grp,
942 			   void *obj)
943 {
944 	int i, ret;
945 
946 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
947 	if (ret)
948 		return ret;
949 
950 	/* Unload ucode used by this engine group */
951 	ucode_unload(dev, &eng_grp->ucode[0]);
952 
953 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
954 		if (!eng_grp->engs[i].type)
955 			continue;
956 
957 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
958 	}
959 
960 	ret = cpt_set_ucode_base(eng_grp, obj);
961 
962 	return ret;
963 }
964 
setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info * dst_grp,struct otx_cpt_eng_grp_info * src_grp)965 static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
966 				    struct otx_cpt_eng_grp_info *src_grp)
967 {
968 	/* Setup fields for engine group which is mirrored */
969 	src_grp->mirror.is_ena = false;
970 	src_grp->mirror.idx = 0;
971 	src_grp->mirror.ref_count++;
972 
973 	/* Setup fields for mirroring engine group */
974 	dst_grp->mirror.is_ena = true;
975 	dst_grp->mirror.idx = src_grp->idx;
976 	dst_grp->mirror.ref_count = 0;
977 }
978 
remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info * dst_grp)979 static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
980 {
981 	struct otx_cpt_eng_grp_info *src_grp;
982 
983 	if (!dst_grp->mirror.is_ena)
984 		return;
985 
986 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
987 
988 	src_grp->mirror.ref_count--;
989 	dst_grp->mirror.is_ena = false;
990 	dst_grp->mirror.idx = 0;
991 	dst_grp->mirror.ref_count = 0;
992 }
993 
update_requested_engs(struct otx_cpt_eng_grp_info * mirrored_eng_grp,struct otx_cpt_engines * engs,int engs_cnt)994 static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
995 				  struct otx_cpt_engines *engs, int engs_cnt)
996 {
997 	struct otx_cpt_engs_rsvd *mirrored_engs;
998 	int i;
999 
1000 	for (i = 0; i < engs_cnt; i++) {
1001 		mirrored_engs = find_engines_by_type(mirrored_eng_grp,
1002 						     engs[i].type);
1003 		if (!mirrored_engs)
1004 			continue;
1005 
1006 		/*
1007 		 * If mirrored group has this type of engines attached then
1008 		 * there are 3 scenarios possible:
1009 		 * 1) mirrored_engs.count == engs[i].count then all engines
1010 		 * from mirrored engine group will be shared with this engine
1011 		 * group
1012 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
1013 		 * engines from mirrored engine group will be shared with this
1014 		 * engine group
1015 		 * 3) mirrored_engs.count < engs[i].count then all engines
1016 		 * from mirrored engine group will be shared with this group
1017 		 * and additional engines will be reserved for exclusively use
1018 		 * by this engine group
1019 		 */
1020 		engs[i].count -= mirrored_engs->count;
1021 	}
1022 }
1023 
find_mirrored_eng_grp(struct otx_cpt_eng_grp_info * grp)1024 static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
1025 					struct otx_cpt_eng_grp_info *grp)
1026 {
1027 	struct otx_cpt_eng_grps *eng_grps = grp->g;
1028 	int i;
1029 
1030 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1031 		if (!eng_grps->grp[i].is_enabled)
1032 			continue;
1033 		if (eng_grps->grp[i].ucode[0].type)
1034 			continue;
1035 		if (grp->idx == i)
1036 			continue;
1037 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
1038 				 grp->ucode[0].ver_str,
1039 				 OTX_CPT_UCODE_VER_STR_SZ))
1040 			return &eng_grps->grp[i];
1041 	}
1042 
1043 	return NULL;
1044 }
1045 
find_unused_eng_grp(struct otx_cpt_eng_grps * eng_grps)1046 static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
1047 					struct otx_cpt_eng_grps *eng_grps)
1048 {
1049 	int i;
1050 
1051 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1052 		if (!eng_grps->grp[i].is_enabled)
1053 			return &eng_grps->grp[i];
1054 	}
1055 	return NULL;
1056 }
1057 
eng_grp_update_masks(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)1058 static int eng_grp_update_masks(struct device *dev,
1059 				struct otx_cpt_eng_grp_info *eng_grp)
1060 {
1061 	struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
1062 	struct otx_cpt_bitmap tmp_bmap = { {0} };
1063 	int i, j, cnt, max_cnt;
1064 	int bit;
1065 
1066 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1067 		engs = &eng_grp->engs[i];
1068 		if (!engs->type)
1069 			continue;
1070 		if (engs->count <= 0)
1071 			continue;
1072 
1073 		switch (engs->type) {
1074 		case OTX_CPT_SE_TYPES:
1075 			max_cnt = eng_grp->g->avail.max_se_cnt;
1076 			break;
1077 
1078 		case OTX_CPT_AE_TYPES:
1079 			max_cnt = eng_grp->g->avail.max_ae_cnt;
1080 			break;
1081 
1082 		default:
1083 			dev_err(dev, "Invalid engine type %d\n", engs->type);
1084 			return -EINVAL;
1085 		}
1086 
1087 		cnt = engs->count;
1088 		WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
1089 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
1090 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
1091 			if (!eng_grp->g->eng_ref_cnt[j]) {
1092 				bitmap_set(tmp_bmap.bits, j, 1);
1093 				cnt--;
1094 				if (!cnt)
1095 					break;
1096 			}
1097 		}
1098 
1099 		if (cnt)
1100 			return -ENOSPC;
1101 
1102 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
1103 	}
1104 
1105 	if (!eng_grp->mirror.is_ena)
1106 		return 0;
1107 
1108 	for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1109 		engs = &eng_grp->engs[i];
1110 		if (!engs->type)
1111 			continue;
1112 
1113 		mirrored_engs = find_engines_by_type(
1114 					&eng_grp->g->grp[eng_grp->mirror.idx],
1115 					engs->type);
1116 		WARN_ON(!mirrored_engs && engs->count <= 0);
1117 		if (!mirrored_engs)
1118 			continue;
1119 
1120 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
1121 			    eng_grp->g->engs_num);
1122 		if (engs->count < 0) {
1123 			bit = find_first_bit(mirrored_engs->bmap,
1124 					     eng_grp->g->engs_num);
1125 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
1126 		}
1127 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
1128 			  eng_grp->g->engs_num);
1129 	}
1130 	return 0;
1131 }
1132 
delete_engine_group(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)1133 static int delete_engine_group(struct device *dev,
1134 			       struct otx_cpt_eng_grp_info *eng_grp)
1135 {
1136 	int i, ret;
1137 
1138 	if (!eng_grp->is_enabled)
1139 		return -EINVAL;
1140 
1141 	if (eng_grp->mirror.ref_count) {
1142 		dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
1143 			eng_grp->idx);
1144 		for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1145 			if (eng_grp->g->grp[i].mirror.is_ena &&
1146 			    eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
1147 				pr_cont(" %d", i);
1148 		}
1149 		pr_cont("\n");
1150 		return -EINVAL;
1151 	}
1152 
1153 	/* Removing engine group mirroring if enabled */
1154 	remove_eng_grp_mirroring(eng_grp);
1155 
1156 	/* Disable engine group */
1157 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
1158 	if (ret)
1159 		return ret;
1160 
1161 	/* Release all engines held by this engine group */
1162 	ret = release_engines(dev, eng_grp);
1163 	if (ret)
1164 		return ret;
1165 
1166 	device_remove_file(dev, &eng_grp->info_attr);
1167 	eng_grp->is_enabled = false;
1168 
1169 	return 0;
1170 }
1171 
validate_1_ucode_scenario(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp,struct otx_cpt_engines * engs,int engs_cnt)1172 static int validate_1_ucode_scenario(struct device *dev,
1173 				     struct otx_cpt_eng_grp_info *eng_grp,
1174 				     struct otx_cpt_engines *engs, int engs_cnt)
1175 {
1176 	int i;
1177 
1178 	/* Verify that ucode loaded supports requested engine types */
1179 	for (i = 0; i < engs_cnt; i++) {
1180 		if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
1181 						  engs[i].type)) {
1182 			dev_err(dev,
1183 				"Microcode %s does not support %s engines\n",
1184 				eng_grp->ucode[0].filename,
1185 				get_eng_type_str(engs[i].type));
1186 			return -EINVAL;
1187 		}
1188 	}
1189 	return 0;
1190 }
1191 
update_ucode_ptrs(struct otx_cpt_eng_grp_info * eng_grp)1192 static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
1193 {
1194 	struct otx_cpt_ucode *ucode;
1195 
1196 	if (eng_grp->mirror.is_ena)
1197 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
1198 	else
1199 		ucode = &eng_grp->ucode[0];
1200 	WARN_ON(!eng_grp->engs[0].type);
1201 	eng_grp->engs[0].ucode = ucode;
1202 }
1203 
create_engine_group(struct device * dev,struct otx_cpt_eng_grps * eng_grps,struct otx_cpt_engines * engs,int engs_cnt,void * ucode_data[],int ucodes_cnt,bool use_uc_from_tar_arch)1204 static int create_engine_group(struct device *dev,
1205 			       struct otx_cpt_eng_grps *eng_grps,
1206 			       struct otx_cpt_engines *engs, int engs_cnt,
1207 			       void *ucode_data[], int ucodes_cnt,
1208 			       bool use_uc_from_tar_arch)
1209 {
1210 	struct otx_cpt_eng_grp_info *mirrored_eng_grp;
1211 	struct tar_ucode_info_t *tar_info;
1212 	struct otx_cpt_eng_grp_info *eng_grp;
1213 	int i, ret = 0;
1214 
1215 	if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
1216 		return -EINVAL;
1217 
1218 	/* Validate if requested engine types are supported by this device */
1219 	for (i = 0; i < engs_cnt; i++)
1220 		if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
1221 			dev_err(dev, "Device does not support %s engines\n",
1222 				get_eng_type_str(engs[i].type));
1223 			return -EPERM;
1224 		}
1225 
1226 	/* Find engine group which is not used */
1227 	eng_grp = find_unused_eng_grp(eng_grps);
1228 	if (!eng_grp) {
1229 		dev_err(dev, "Error all engine groups are being used\n");
1230 		return -ENOSPC;
1231 	}
1232 
1233 	/* Load ucode */
1234 	for (i = 0; i < ucodes_cnt; i++) {
1235 		if (use_uc_from_tar_arch) {
1236 			tar_info = (struct tar_ucode_info_t *) ucode_data[i];
1237 			eng_grp->ucode[i] = tar_info->ucode;
1238 			ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1239 						    tar_info->ucode_ptr);
1240 		} else
1241 			ret = ucode_load(dev, &eng_grp->ucode[i],
1242 					 (char *) ucode_data[i]);
1243 		if (ret)
1244 			goto err_ucode_unload;
1245 	}
1246 
1247 	/* Validate scenario where 1 ucode is used */
1248 	ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
1249 	if (ret)
1250 		goto err_ucode_unload;
1251 
1252 	/* Check if this group mirrors another existing engine group */
1253 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1254 	if (mirrored_eng_grp) {
1255 		/* Setup mirroring */
1256 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1257 
1258 		/*
1259 		 * Update count of requested engines because some
1260 		 * of them might be shared with mirrored group
1261 		 */
1262 		update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
1263 	}
1264 
1265 	/* Reserve engines */
1266 	ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
1267 	if (ret)
1268 		goto err_ucode_unload;
1269 
1270 	/* Update ucode pointers used by engines */
1271 	update_ucode_ptrs(eng_grp);
1272 
1273 	/* Update engine masks used by this group */
1274 	ret = eng_grp_update_masks(dev, eng_grp);
1275 	if (ret)
1276 		goto err_release_engs;
1277 
1278 	/* Create sysfs entry for engine group info */
1279 	ret = create_sysfs_eng_grps_info(dev, eng_grp);
1280 	if (ret)
1281 		goto err_release_engs;
1282 
1283 	/* Enable engine group */
1284 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1285 	if (ret)
1286 		goto err_release_engs;
1287 
1288 	/*
1289 	 * If this engine group mirrors another engine group
1290 	 * then we need to unload ucode as we will use ucode
1291 	 * from mirrored engine group
1292 	 */
1293 	if (eng_grp->mirror.is_ena)
1294 		ucode_unload(dev, &eng_grp->ucode[0]);
1295 
1296 	eng_grp->is_enabled = true;
1297 	if (eng_grp->mirror.is_ena)
1298 		dev_info(dev,
1299 			 "Engine_group%d: reuse microcode %s from group %d\n",
1300 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1301 			 mirrored_eng_grp->idx);
1302 	else
1303 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1304 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1305 
1306 	return 0;
1307 
1308 err_release_engs:
1309 	release_engines(dev, eng_grp);
1310 err_ucode_unload:
1311 	ucode_unload(dev, &eng_grp->ucode[0]);
1312 	return ret;
1313 }
1314 
ucode_load_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1315 static ssize_t ucode_load_store(struct device *dev,
1316 				struct device_attribute *attr,
1317 				const char *buf, size_t count)
1318 {
1319 	struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1320 	char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
1321 	char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
1322 	char *start, *val, *err_msg, *tmp;
1323 	struct otx_cpt_eng_grps *eng_grps;
1324 	int grp_idx = 0, ret = -EINVAL;
1325 	bool has_se, has_ie, has_ae;
1326 	int del_grp_idx = -1;
1327 	int ucode_idx = 0;
1328 
1329 	if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
1330 		return -EINVAL;
1331 
1332 	eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
1333 	err_msg = "Invalid engine group format";
1334 	strscpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
1335 	start = tmp_buf;
1336 
1337 	has_se = has_ie = has_ae = false;
1338 
1339 	for (;;) {
1340 		val = strsep(&start, ";");
1341 		if (!val)
1342 			break;
1343 		val = strim(val);
1344 		if (!*val)
1345 			continue;
1346 
1347 		if (!strncasecmp(val, "engine_group", 12)) {
1348 			if (del_grp_idx != -1)
1349 				goto err_print;
1350 			tmp = strim(strsep(&val, ":"));
1351 			if (!val)
1352 				goto err_print;
1353 			if (strlen(tmp) != 13)
1354 				goto err_print;
1355 			if (kstrtoint((tmp + 12), 10, &del_grp_idx))
1356 				goto err_print;
1357 			val = strim(val);
1358 			if (strncasecmp(val, "null", 4))
1359 				goto err_print;
1360 			if (strlen(val) != 4)
1361 				goto err_print;
1362 		} else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1363 			if (has_se || ucode_idx)
1364 				goto err_print;
1365 			tmp = strim(strsep(&val, ":"));
1366 			if (!val)
1367 				goto err_print;
1368 			if (strlen(tmp) != 2)
1369 				goto err_print;
1370 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1371 				goto err_print;
1372 			engs[grp_idx++].type = OTX_CPT_SE_TYPES;
1373 			has_se = true;
1374 		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1375 			if (has_ae || ucode_idx)
1376 				goto err_print;
1377 			tmp = strim(strsep(&val, ":"));
1378 			if (!val)
1379 				goto err_print;
1380 			if (strlen(tmp) != 2)
1381 				goto err_print;
1382 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1383 				goto err_print;
1384 			engs[grp_idx++].type = OTX_CPT_AE_TYPES;
1385 			has_ae = true;
1386 		} else {
1387 			if (ucode_idx > 1)
1388 				goto err_print;
1389 			if (!strlen(val))
1390 				goto err_print;
1391 			if (strnstr(val, " ", strlen(val)))
1392 				goto err_print;
1393 			ucode_filename[ucode_idx++] = val;
1394 		}
1395 	}
1396 
1397 	/* Validate input parameters */
1398 	if (del_grp_idx == -1) {
1399 		if (!(grp_idx && ucode_idx))
1400 			goto err_print;
1401 
1402 		if (ucode_idx > 1 && grp_idx < 2)
1403 			goto err_print;
1404 
1405 		if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
1406 			err_msg = "Error max 2 engine types can be attached";
1407 			goto err_print;
1408 		}
1409 
1410 	} else {
1411 		if (del_grp_idx < 0 ||
1412 		    del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
1413 			dev_err(dev, "Invalid engine group index %d\n",
1414 				del_grp_idx);
1415 			ret = -EINVAL;
1416 			return ret;
1417 		}
1418 
1419 		if (!eng_grps->grp[del_grp_idx].is_enabled) {
1420 			dev_err(dev, "Error engine_group%d is not configured\n",
1421 				del_grp_idx);
1422 			ret = -EINVAL;
1423 			return ret;
1424 		}
1425 
1426 		if (grp_idx || ucode_idx)
1427 			goto err_print;
1428 	}
1429 
1430 	mutex_lock(&eng_grps->lock);
1431 
1432 	if (eng_grps->is_rdonly) {
1433 		dev_err(dev, "Disable VFs before modifying engine groups\n");
1434 		ret = -EACCES;
1435 		goto err_unlock;
1436 	}
1437 
1438 	if (del_grp_idx == -1)
1439 		/* create engine group */
1440 		ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1441 					  (void **) ucode_filename,
1442 					  ucode_idx, false);
1443 	else
1444 		/* delete engine group */
1445 		ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
1446 	if (ret)
1447 		goto err_unlock;
1448 
1449 	print_dbg_info(dev, eng_grps);
1450 err_unlock:
1451 	mutex_unlock(&eng_grps->lock);
1452 	return ret ? ret : count;
1453 err_print:
1454 	dev_err(dev, "%s\n", err_msg);
1455 
1456 	return ret;
1457 }
1458 
otx_cpt_try_create_default_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps,int pf_type)1459 int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
1460 					struct otx_cpt_eng_grps *eng_grps,
1461 					int pf_type)
1462 {
1463 	struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1464 	struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1465 	struct tar_arch_info_t *tar_arch = NULL;
1466 	char *tar_filename;
1467 	int i, ret = 0;
1468 
1469 	mutex_lock(&eng_grps->lock);
1470 
1471 	/*
1472 	 * We don't create engine group for kernel crypto if attempt to create
1473 	 * it was already made (when user enabled VFs for the first time)
1474 	 */
1475 	if (eng_grps->is_first_try)
1476 		goto unlock_mutex;
1477 	eng_grps->is_first_try = true;
1478 
1479 	/* We create group for kcrypto only if no groups are configured */
1480 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1481 		if (eng_grps->grp[i].is_enabled)
1482 			goto unlock_mutex;
1483 
1484 	switch (pf_type) {
1485 	case OTX_CPT_AE:
1486 	case OTX_CPT_SE:
1487 		tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
1488 		break;
1489 
1490 	default:
1491 		dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1492 		ret = -EINVAL;
1493 		goto unlock_mutex;
1494 	}
1495 
1496 	tar_arch = load_tar_archive(&pdev->dev, tar_filename);
1497 	if (!tar_arch)
1498 		goto unlock_mutex;
1499 
1500 	/*
1501 	 * If device supports SE engines and there is SE microcode in tar
1502 	 * archive try to create engine group with SE engines for kernel
1503 	 * crypto functionality (symmetric crypto)
1504 	 */
1505 	tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
1506 	if (tar_info[0] &&
1507 	    dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
1508 
1509 		engs[0].type = OTX_CPT_SE_TYPES;
1510 		engs[0].count = eng_grps->avail.max_se_cnt;
1511 
1512 		ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1513 					  (void **) tar_info, 1, true);
1514 		if (ret)
1515 			goto release_tar_arch;
1516 	}
1517 	/*
1518 	 * If device supports AE engines and there is AE microcode in tar
1519 	 * archive try to create engine group with AE engines for asymmetric
1520 	 * crypto functionality.
1521 	 */
1522 	tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
1523 	if (tar_info[0] &&
1524 	    dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
1525 
1526 		engs[0].type = OTX_CPT_AE_TYPES;
1527 		engs[0].count = eng_grps->avail.max_ae_cnt;
1528 
1529 		ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1530 					  (void **) tar_info, 1, true);
1531 		if (ret)
1532 			goto release_tar_arch;
1533 	}
1534 
1535 	print_dbg_info(&pdev->dev, eng_grps);
1536 release_tar_arch:
1537 	release_tar_archive(tar_arch);
1538 unlock_mutex:
1539 	mutex_unlock(&eng_grps->lock);
1540 	return ret;
1541 }
1542 
otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps * eng_grps,bool is_rdonly)1543 void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
1544 				    bool is_rdonly)
1545 {
1546 	mutex_lock(&eng_grps->lock);
1547 
1548 	eng_grps->is_rdonly = is_rdonly;
1549 
1550 	mutex_unlock(&eng_grps->lock);
1551 }
1552 
otx_cpt_disable_all_cores(struct otx_cpt_device * cpt)1553 void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
1554 {
1555 	int grp, timeout = 100;
1556 	u64 reg;
1557 
1558 	/* Disengage the cores from groups */
1559 	for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
1560 		writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
1561 		udelay(CSR_DELAY);
1562 	}
1563 
1564 	reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1565 	while (reg) {
1566 		udelay(CSR_DELAY);
1567 		reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1568 		if (timeout--) {
1569 			dev_warn(&cpt->pdev->dev, "Cores still busy\n");
1570 			break;
1571 		}
1572 	}
1573 
1574 	/* Disable the cores */
1575 	writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
1576 }
1577 
otx_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps)1578 void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1579 			      struct otx_cpt_eng_grps *eng_grps)
1580 {
1581 	struct otx_cpt_eng_grp_info *grp;
1582 	int i, j;
1583 
1584 	mutex_lock(&eng_grps->lock);
1585 	if (eng_grps->is_ucode_load_created) {
1586 		device_remove_file(&pdev->dev,
1587 				   &eng_grps->ucode_load_attr);
1588 		eng_grps->is_ucode_load_created = false;
1589 	}
1590 
1591 	/* First delete all mirroring engine groups */
1592 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1593 		if (eng_grps->grp[i].mirror.is_ena)
1594 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1595 
1596 	/* Delete remaining engine groups */
1597 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1598 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1599 
1600 	/* Release memory */
1601 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1602 		grp = &eng_grps->grp[i];
1603 		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1604 			kfree(grp->engs[j].bmap);
1605 			grp->engs[j].bmap = NULL;
1606 		}
1607 	}
1608 
1609 	mutex_unlock(&eng_grps->lock);
1610 }
1611 
otx_cpt_init_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps,int pf_type)1612 int otx_cpt_init_eng_grps(struct pci_dev *pdev,
1613 			  struct otx_cpt_eng_grps *eng_grps, int pf_type)
1614 {
1615 	struct otx_cpt_eng_grp_info *grp;
1616 	int i, j, ret = 0;
1617 
1618 	mutex_init(&eng_grps->lock);
1619 	eng_grps->obj = pci_get_drvdata(pdev);
1620 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1621 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1622 
1623 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1624 			     eng_grps->avail.max_ae_cnt;
1625 	if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
1626 		dev_err(&pdev->dev,
1627 			"Number of engines %d > than max supported %d\n",
1628 			eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
1629 		ret = -EINVAL;
1630 		goto err;
1631 	}
1632 
1633 	for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1634 		grp = &eng_grps->grp[i];
1635 		grp->g = eng_grps;
1636 		grp->idx = i;
1637 
1638 		snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
1639 			 "engine_group%d", i);
1640 		for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1641 			grp->engs[j].bmap =
1642 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1643 					sizeof(long), GFP_KERNEL);
1644 			if (!grp->engs[j].bmap) {
1645 				ret = -ENOMEM;
1646 				goto err;
1647 			}
1648 		}
1649 	}
1650 
1651 	switch (pf_type) {
1652 	case OTX_CPT_SE:
1653 		/* OcteonTX 83XX SE CPT PF has only SE engines attached */
1654 		eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
1655 		break;
1656 
1657 	case OTX_CPT_AE:
1658 		/* OcteonTX 83XX AE CPT PF has only AE engines attached */
1659 		eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
1660 		break;
1661 
1662 	default:
1663 		dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1664 		ret = -EINVAL;
1665 		goto err;
1666 	}
1667 
1668 	eng_grps->ucode_load_attr.show = NULL;
1669 	eng_grps->ucode_load_attr.store = ucode_load_store;
1670 	eng_grps->ucode_load_attr.attr.name = "ucode_load";
1671 	eng_grps->ucode_load_attr.attr.mode = 0220;
1672 	sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
1673 	ret = device_create_file(&pdev->dev,
1674 				 &eng_grps->ucode_load_attr);
1675 	if (ret)
1676 		goto err;
1677 	eng_grps->is_ucode_load_created = true;
1678 
1679 	print_dbg_info(&pdev->dev, eng_grps);
1680 	return ret;
1681 err:
1682 	otx_cpt_cleanup_eng_grps(pdev, eng_grps);
1683 	return ret;
1684 }
1685