1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
3 
4 #include <linux/ctype.h>
5 #include <linux/firmware.h>
6 #include <linux/string_choices.h>
7 #include "otx2_cptpf_ucode.h"
8 #include "otx2_cpt_common.h"
9 #include "otx2_cptpf.h"
10 #include "otx2_cptlf.h"
11 #include "otx2_cpt_reqmgr.h"
12 #include "rvu_reg.h"
13 
14 #define CSR_DELAY 30
15 
16 #define LOADFVC_RLEN 8
17 #define LOADFVC_MAJOR_OP 0x01
18 #define LOADFVC_MINOR_OP 0x08
19 
20 /*
21  * Interval to flush dirty data for next CTX entry. The interval is measured
22  * in increments of 10ns(interval time = CTX_FLUSH_TIMER_COUNT * 10ns).
23  */
24 #define CTX_FLUSH_TIMER_CNT 0x2FAF0
25 
26 struct fw_info_t {
27 	struct list_head ucodes;
28 };
29 
get_cores_bmap(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)30 static struct otx2_cpt_bitmap get_cores_bmap(struct device *dev,
31 					struct otx2_cpt_eng_grp_info *eng_grp)
32 {
33 	struct otx2_cpt_bitmap bmap = { {0} };
34 	bool found = false;
35 	int i;
36 
37 	if (eng_grp->g->engs_num < 0 ||
38 	    eng_grp->g->engs_num > OTX2_CPT_MAX_ENGINES) {
39 		dev_err(dev, "unsupported number of engines %d on octeontx2\n",
40 			eng_grp->g->engs_num);
41 		return bmap;
42 	}
43 
44 	for (i = 0; i  < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
45 		if (eng_grp->engs[i].type) {
46 			bitmap_or(bmap.bits, bmap.bits,
47 				  eng_grp->engs[i].bmap,
48 				  eng_grp->g->engs_num);
49 			bmap.size = eng_grp->g->engs_num;
50 			found = true;
51 		}
52 	}
53 
54 	if (!found)
55 		dev_err(dev, "No engines reserved for engine group %d\n",
56 			eng_grp->idx);
57 	return bmap;
58 }
59 
is_eng_type(int val,int eng_type)60 static int is_eng_type(int val, int eng_type)
61 {
62 	return val & (1 << eng_type);
63 }
64 
is_2nd_ucode_used(struct otx2_cpt_eng_grp_info * eng_grp)65 static int is_2nd_ucode_used(struct otx2_cpt_eng_grp_info *eng_grp)
66 {
67 	if (eng_grp->ucode[1].type)
68 		return true;
69 	else
70 		return false;
71 }
72 
set_ucode_filename(struct otx2_cpt_ucode * ucode,const char * filename)73 static void set_ucode_filename(struct otx2_cpt_ucode *ucode,
74 			       const char *filename)
75 {
76 	strscpy(ucode->filename, filename, OTX2_CPT_NAME_LENGTH);
77 }
78 
get_eng_type_str(int eng_type)79 static char *get_eng_type_str(int eng_type)
80 {
81 	char *str = "unknown";
82 
83 	switch (eng_type) {
84 	case OTX2_CPT_SE_TYPES:
85 		str = "SE";
86 		break;
87 
88 	case OTX2_CPT_IE_TYPES:
89 		str = "IE";
90 		break;
91 
92 	case OTX2_CPT_AE_TYPES:
93 		str = "AE";
94 		break;
95 	}
96 	return str;
97 }
98 
get_ucode_type_str(int ucode_type)99 static char *get_ucode_type_str(int ucode_type)
100 {
101 	char *str = "unknown";
102 
103 	switch (ucode_type) {
104 	case (1 << OTX2_CPT_SE_TYPES):
105 		str = "SE";
106 		break;
107 
108 	case (1 << OTX2_CPT_IE_TYPES):
109 		str = "IE";
110 		break;
111 
112 	case (1 << OTX2_CPT_AE_TYPES):
113 		str = "AE";
114 		break;
115 
116 	case (1 << OTX2_CPT_SE_TYPES | 1 << OTX2_CPT_IE_TYPES):
117 		str = "SE+IPSEC";
118 		break;
119 	}
120 	return str;
121 }
122 
get_ucode_type(struct device * dev,struct otx2_cpt_ucode_hdr * ucode_hdr,int * ucode_type,u16 rid)123 static int get_ucode_type(struct device *dev,
124 			  struct otx2_cpt_ucode_hdr *ucode_hdr,
125 			  int *ucode_type, u16 rid)
126 {
127 	char ver_str_prefix[OTX2_CPT_UCODE_VER_STR_SZ];
128 	char tmp_ver_str[OTX2_CPT_UCODE_VER_STR_SZ];
129 	int i, val = 0;
130 	u8 nn;
131 
132 	strscpy(tmp_ver_str, ucode_hdr->ver_str, OTX2_CPT_UCODE_VER_STR_SZ);
133 	for (i = 0; i < strlen(tmp_ver_str); i++)
134 		tmp_ver_str[i] = tolower(tmp_ver_str[i]);
135 
136 	sprintf(ver_str_prefix, "ocpt-%02d", rid);
137 	if (!strnstr(tmp_ver_str, ver_str_prefix, OTX2_CPT_UCODE_VER_STR_SZ))
138 		return -EINVAL;
139 
140 	nn = ucode_hdr->ver_num.nn;
141 	if (strnstr(tmp_ver_str, "se-", OTX2_CPT_UCODE_VER_STR_SZ) &&
142 	    (nn == OTX2_CPT_SE_UC_TYPE1 || nn == OTX2_CPT_SE_UC_TYPE2 ||
143 	     nn == OTX2_CPT_SE_UC_TYPE3))
144 		val |= 1 << OTX2_CPT_SE_TYPES;
145 	if (strnstr(tmp_ver_str, "ie-", OTX2_CPT_UCODE_VER_STR_SZ) &&
146 	    (nn == OTX2_CPT_IE_UC_TYPE1 || nn == OTX2_CPT_IE_UC_TYPE2 ||
147 	     nn == OTX2_CPT_IE_UC_TYPE3))
148 		val |= 1 << OTX2_CPT_IE_TYPES;
149 	if (strnstr(tmp_ver_str, "ae", OTX2_CPT_UCODE_VER_STR_SZ) &&
150 	    nn == OTX2_CPT_AE_UC_TYPE)
151 		val |= 1 << OTX2_CPT_AE_TYPES;
152 
153 	*ucode_type = val;
154 
155 	if (!val)
156 		return -EINVAL;
157 
158 	return 0;
159 }
160 
__write_ucode_base(struct otx2_cptpf_dev * cptpf,int eng,dma_addr_t dma_addr,int blkaddr)161 static int __write_ucode_base(struct otx2_cptpf_dev *cptpf, int eng,
162 			      dma_addr_t dma_addr, int blkaddr)
163 {
164 	return otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
165 				     CPT_AF_EXEX_UCODE_BASE(eng),
166 				     (u64)dma_addr, blkaddr);
167 }
168 
cptx_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,int blkaddr)169 static int cptx_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp,
170 			       struct otx2_cptpf_dev *cptpf, int blkaddr)
171 {
172 	struct otx2_cpt_engs_rsvd *engs;
173 	dma_addr_t dma_addr;
174 	int i, bit, ret;
175 
176 	/* Set PF number for microcode fetches */
177 	ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
178 				    CPT_AF_PF_FUNC,
179 				    cptpf->pf_id << RVU_PFVF_PF_SHIFT, blkaddr);
180 	if (ret)
181 		return ret;
182 
183 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
184 		engs = &eng_grp->engs[i];
185 		if (!engs->type)
186 			continue;
187 
188 		dma_addr = engs->ucode->dma;
189 
190 		/*
191 		 * Set UCODE_BASE only for the cores which are not used,
192 		 * other cores should have already valid UCODE_BASE set
193 		 */
194 		for_each_set_bit(bit, engs->bmap, eng_grp->g->engs_num)
195 			if (!eng_grp->g->eng_ref_cnt[bit]) {
196 				ret = __write_ucode_base(cptpf, bit, dma_addr,
197 							 blkaddr);
198 				if (ret)
199 					return ret;
200 			}
201 	}
202 	return 0;
203 }
204 
cpt_set_ucode_base(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)205 static int cpt_set_ucode_base(struct otx2_cpt_eng_grp_info *eng_grp, void *obj)
206 {
207 	struct otx2_cptpf_dev *cptpf = obj;
208 	int ret;
209 
210 	if (cptpf->has_cpt1) {
211 		ret = cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT1);
212 		if (ret)
213 			return ret;
214 	}
215 	return cptx_set_ucode_base(eng_grp, cptpf, BLKADDR_CPT0);
216 }
217 
cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)218 static int cptx_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
219 					 struct otx2_cptpf_dev *cptpf,
220 					 struct otx2_cpt_bitmap bmap,
221 					 int blkaddr)
222 {
223 	int i, timeout = 10;
224 	int busy, ret;
225 	u64 reg = 0;
226 
227 	/* Detach the cores from group */
228 	for_each_set_bit(i, bmap.bits, bmap.size) {
229 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
230 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
231 		if (ret)
232 			return ret;
233 
234 		if (reg & (1ull << eng_grp->idx)) {
235 			eng_grp->g->eng_ref_cnt[i]--;
236 			reg &= ~(1ull << eng_grp->idx);
237 
238 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
239 						    cptpf->pdev,
240 						    CPT_AF_EXEX_CTL2(i), reg,
241 						    blkaddr);
242 			if (ret)
243 				return ret;
244 		}
245 	}
246 
247 	/* Wait for cores to become idle */
248 	do {
249 		busy = 0;
250 		usleep_range(10000, 20000);
251 		if (timeout-- < 0)
252 			return -EBUSY;
253 
254 		for_each_set_bit(i, bmap.bits, bmap.size) {
255 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
256 						   cptpf->pdev,
257 						   CPT_AF_EXEX_STS(i), &reg,
258 						   blkaddr);
259 			if (ret)
260 				return ret;
261 
262 			if (reg & 0x1) {
263 				busy = 1;
264 				break;
265 			}
266 		}
267 	} while (busy);
268 
269 	/* Disable the cores only if they are not used anymore */
270 	for_each_set_bit(i, bmap.bits, bmap.size) {
271 		if (!eng_grp->g->eng_ref_cnt[i]) {
272 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
273 						    cptpf->pdev,
274 						    CPT_AF_EXEX_CTL(i), 0x0,
275 						    blkaddr);
276 			if (ret)
277 				return ret;
278 		}
279 	}
280 
281 	return 0;
282 }
283 
cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)284 static int cpt_detach_and_disable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
285 					void *obj)
286 {
287 	struct otx2_cptpf_dev *cptpf = obj;
288 	struct otx2_cpt_bitmap bmap;
289 	int ret;
290 
291 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
292 	if (!bmap.size)
293 		return -EINVAL;
294 
295 	if (cptpf->has_cpt1) {
296 		ret = cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
297 						    BLKADDR_CPT1);
298 		if (ret)
299 			return ret;
300 	}
301 	return cptx_detach_and_disable_cores(eng_grp, cptpf, bmap,
302 					     BLKADDR_CPT0);
303 }
304 
cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,struct otx2_cptpf_dev * cptpf,struct otx2_cpt_bitmap bmap,int blkaddr)305 static int cptx_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
306 					struct otx2_cptpf_dev *cptpf,
307 					struct otx2_cpt_bitmap bmap,
308 					int blkaddr)
309 {
310 	u64 reg = 0;
311 	int i, ret;
312 
313 	/* Attach the cores to the group */
314 	for_each_set_bit(i, bmap.bits, bmap.size) {
315 		ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
316 					   CPT_AF_EXEX_CTL2(i), &reg, blkaddr);
317 		if (ret)
318 			return ret;
319 
320 		if (!(reg & (1ull << eng_grp->idx))) {
321 			eng_grp->g->eng_ref_cnt[i]++;
322 			reg |= 1ull << eng_grp->idx;
323 
324 			ret = otx2_cpt_write_af_reg(&cptpf->afpf_mbox,
325 						    cptpf->pdev,
326 						    CPT_AF_EXEX_CTL2(i), reg,
327 						    blkaddr);
328 			if (ret)
329 				return ret;
330 		}
331 	}
332 
333 	/* Enable the cores */
334 	for_each_set_bit(i, bmap.bits, bmap.size) {
335 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
336 						CPT_AF_EXEX_CTL(i), 0x1,
337 						blkaddr);
338 		if (ret)
339 			return ret;
340 	}
341 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
342 }
343 
cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)344 static int cpt_attach_and_enable_cores(struct otx2_cpt_eng_grp_info *eng_grp,
345 				       void *obj)
346 {
347 	struct otx2_cptpf_dev *cptpf = obj;
348 	struct otx2_cpt_bitmap bmap;
349 	int ret;
350 
351 	bmap = get_cores_bmap(&cptpf->pdev->dev, eng_grp);
352 	if (!bmap.size)
353 		return -EINVAL;
354 
355 	if (cptpf->has_cpt1) {
356 		ret = cptx_attach_and_enable_cores(eng_grp, cptpf, bmap,
357 						   BLKADDR_CPT1);
358 		if (ret)
359 			return ret;
360 	}
361 	return cptx_attach_and_enable_cores(eng_grp, cptpf, bmap, BLKADDR_CPT0);
362 }
363 
load_fw(struct device * dev,struct fw_info_t * fw_info,char * filename,u16 rid)364 static int load_fw(struct device *dev, struct fw_info_t *fw_info,
365 		   char *filename, u16 rid)
366 {
367 	struct otx2_cpt_ucode_hdr *ucode_hdr;
368 	struct otx2_cpt_uc_info_t *uc_info;
369 	int ucode_type, ucode_size;
370 	int ret;
371 
372 	uc_info = kzalloc(sizeof(*uc_info), GFP_KERNEL);
373 	if (!uc_info)
374 		return -ENOMEM;
375 
376 	ret = request_firmware(&uc_info->fw, filename, dev);
377 	if (ret)
378 		goto free_uc_info;
379 
380 	ucode_hdr = (struct otx2_cpt_ucode_hdr *)uc_info->fw->data;
381 	ret = get_ucode_type(dev, ucode_hdr, &ucode_type, rid);
382 	if (ret)
383 		goto release_fw;
384 
385 	ucode_size = ntohl(ucode_hdr->code_length) * 2;
386 	if (!ucode_size) {
387 		dev_err(dev, "Ucode %s invalid size\n", filename);
388 		ret = -EINVAL;
389 		goto release_fw;
390 	}
391 
392 	set_ucode_filename(&uc_info->ucode, filename);
393 	memcpy(uc_info->ucode.ver_str, ucode_hdr->ver_str,
394 	       OTX2_CPT_UCODE_VER_STR_SZ);
395 	uc_info->ucode.ver_str[OTX2_CPT_UCODE_VER_STR_SZ] = 0;
396 	uc_info->ucode.ver_num = ucode_hdr->ver_num;
397 	uc_info->ucode.type = ucode_type;
398 	uc_info->ucode.size = ucode_size;
399 	list_add_tail(&uc_info->list, &fw_info->ucodes);
400 
401 	return 0;
402 
403 release_fw:
404 	release_firmware(uc_info->fw);
405 free_uc_info:
406 	kfree(uc_info);
407 	return ret;
408 }
409 
cpt_ucode_release_fw(struct fw_info_t * fw_info)410 static void cpt_ucode_release_fw(struct fw_info_t *fw_info)
411 {
412 	struct otx2_cpt_uc_info_t *curr, *temp;
413 
414 	if (!fw_info)
415 		return;
416 
417 	list_for_each_entry_safe(curr, temp, &fw_info->ucodes, list) {
418 		list_del(&curr->list);
419 		release_firmware(curr->fw);
420 		kfree(curr);
421 	}
422 }
423 
get_ucode(struct fw_info_t * fw_info,int ucode_type)424 static struct otx2_cpt_uc_info_t *get_ucode(struct fw_info_t *fw_info,
425 					    int ucode_type)
426 {
427 	struct otx2_cpt_uc_info_t *curr;
428 
429 	list_for_each_entry(curr, &fw_info->ucodes, list) {
430 		if (!is_eng_type(curr->ucode.type, ucode_type))
431 			continue;
432 
433 		return curr;
434 	}
435 	return NULL;
436 }
437 
print_uc_info(struct fw_info_t * fw_info)438 static void print_uc_info(struct fw_info_t *fw_info)
439 {
440 	struct otx2_cpt_uc_info_t *curr;
441 
442 	list_for_each_entry(curr, &fw_info->ucodes, list) {
443 		pr_debug("Ucode filename %s\n", curr->ucode.filename);
444 		pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
445 		pr_debug("Ucode version %d.%d.%d.%d\n",
446 			 curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
447 			 curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
448 		pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
449 			 get_ucode_type_str(curr->ucode.type));
450 		pr_debug("Ucode size %d\n", curr->ucode.size);
451 		pr_debug("Ucode ptr %p\n", curr->fw->data);
452 	}
453 }
454 
cpt_ucode_load_fw(struct pci_dev * pdev,struct fw_info_t * fw_info,u16 rid)455 static int cpt_ucode_load_fw(struct pci_dev *pdev, struct fw_info_t *fw_info,
456 			     u16 rid)
457 {
458 	char filename[OTX2_CPT_NAME_LENGTH];
459 	char eng_type[8] = {0};
460 	int ret, e, i;
461 
462 	INIT_LIST_HEAD(&fw_info->ucodes);
463 
464 	for (e = 1; e < OTX2_CPT_MAX_ENG_TYPES; e++) {
465 		strcpy(eng_type, get_eng_type_str(e));
466 		for (i = 0; i < strlen(eng_type); i++)
467 			eng_type[i] = tolower(eng_type[i]);
468 
469 		snprintf(filename, sizeof(filename), "mrvl/cpt%02d/%s.out",
470 			 rid, eng_type);
471 		/* Request firmware for each engine type */
472 		ret = load_fw(&pdev->dev, fw_info, filename, rid);
473 		if (ret)
474 			goto release_fw;
475 	}
476 	print_uc_info(fw_info);
477 	return 0;
478 
479 release_fw:
480 	cpt_ucode_release_fw(fw_info);
481 	return ret;
482 }
483 
find_engines_by_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)484 struct otx2_cpt_engs_rsvd *find_engines_by_type(
485 					struct otx2_cpt_eng_grp_info *eng_grp,
486 					int eng_type)
487 {
488 	int i;
489 
490 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
491 		if (!eng_grp->engs[i].type)
492 			continue;
493 
494 		if (eng_grp->engs[i].type == eng_type)
495 			return &eng_grp->engs[i];
496 	}
497 	return NULL;
498 }
499 
eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info * eng_grp,int eng_type)500 static int eng_grp_has_eng_type(struct otx2_cpt_eng_grp_info *eng_grp,
501 				int eng_type)
502 {
503 	struct otx2_cpt_engs_rsvd *engs;
504 
505 	engs = find_engines_by_type(eng_grp, eng_type);
506 
507 	return (engs != NULL ? 1 : 0);
508 }
509 
update_engines_avail_count(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs,int val)510 static int update_engines_avail_count(struct device *dev,
511 				      struct otx2_cpt_engs_available *avail,
512 				      struct otx2_cpt_engs_rsvd *engs, int val)
513 {
514 	switch (engs->type) {
515 	case OTX2_CPT_SE_TYPES:
516 		avail->se_cnt += val;
517 		break;
518 
519 	case OTX2_CPT_IE_TYPES:
520 		avail->ie_cnt += val;
521 		break;
522 
523 	case OTX2_CPT_AE_TYPES:
524 		avail->ae_cnt += val;
525 		break;
526 
527 	default:
528 		dev_err(dev, "Invalid engine type %d\n", engs->type);
529 		return -EINVAL;
530 	}
531 	return 0;
532 }
533 
update_engines_offset(struct device * dev,struct otx2_cpt_engs_available * avail,struct otx2_cpt_engs_rsvd * engs)534 static int update_engines_offset(struct device *dev,
535 				 struct otx2_cpt_engs_available *avail,
536 				 struct otx2_cpt_engs_rsvd *engs)
537 {
538 	switch (engs->type) {
539 	case OTX2_CPT_SE_TYPES:
540 		engs->offset = 0;
541 		break;
542 
543 	case OTX2_CPT_IE_TYPES:
544 		engs->offset = avail->max_se_cnt;
545 		break;
546 
547 	case OTX2_CPT_AE_TYPES:
548 		engs->offset = avail->max_se_cnt + avail->max_ie_cnt;
549 		break;
550 
551 	default:
552 		dev_err(dev, "Invalid engine type %d\n", engs->type);
553 		return -EINVAL;
554 	}
555 	return 0;
556 }
557 
release_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp)558 static int release_engines(struct device *dev,
559 			   struct otx2_cpt_eng_grp_info *grp)
560 {
561 	int i, ret = 0;
562 
563 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
564 		if (!grp->engs[i].type)
565 			continue;
566 
567 		if (grp->engs[i].count > 0) {
568 			ret = update_engines_avail_count(dev, &grp->g->avail,
569 							 &grp->engs[i],
570 							 grp->engs[i].count);
571 			if (ret)
572 				return ret;
573 		}
574 
575 		grp->engs[i].type = 0;
576 		grp->engs[i].count = 0;
577 		grp->engs[i].offset = 0;
578 		grp->engs[i].ucode = NULL;
579 		bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
580 	}
581 	return 0;
582 }
583 
do_reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs)584 static int do_reserve_engines(struct device *dev,
585 			      struct otx2_cpt_eng_grp_info *grp,
586 			      struct otx2_cpt_engines *req_engs)
587 {
588 	struct otx2_cpt_engs_rsvd *engs = NULL;
589 	int i, ret;
590 
591 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
592 		if (!grp->engs[i].type) {
593 			engs = &grp->engs[i];
594 			break;
595 		}
596 	}
597 
598 	if (!engs)
599 		return -ENOMEM;
600 
601 	engs->type = req_engs->type;
602 	engs->count = req_engs->count;
603 
604 	ret = update_engines_offset(dev, &grp->g->avail, engs);
605 	if (ret)
606 		return ret;
607 
608 	if (engs->count > 0) {
609 		ret = update_engines_avail_count(dev, &grp->g->avail, engs,
610 						 -engs->count);
611 		if (ret)
612 			return ret;
613 	}
614 
615 	return 0;
616 }
617 
check_engines_availability(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_eng)618 static int check_engines_availability(struct device *dev,
619 				      struct otx2_cpt_eng_grp_info *grp,
620 				      struct otx2_cpt_engines *req_eng)
621 {
622 	int avail_cnt = 0;
623 
624 	switch (req_eng->type) {
625 	case OTX2_CPT_SE_TYPES:
626 		avail_cnt = grp->g->avail.se_cnt;
627 		break;
628 
629 	case OTX2_CPT_IE_TYPES:
630 		avail_cnt = grp->g->avail.ie_cnt;
631 		break;
632 
633 	case OTX2_CPT_AE_TYPES:
634 		avail_cnt = grp->g->avail.ae_cnt;
635 		break;
636 
637 	default:
638 		dev_err(dev, "Invalid engine type %d\n", req_eng->type);
639 		return -EINVAL;
640 	}
641 
642 	if (avail_cnt < req_eng->count) {
643 		dev_err(dev,
644 			"Error available %s engines %d < than requested %d\n",
645 			get_eng_type_str(req_eng->type),
646 			avail_cnt, req_eng->count);
647 		return -EBUSY;
648 	}
649 	return 0;
650 }
651 
reserve_engines(struct device * dev,struct otx2_cpt_eng_grp_info * grp,struct otx2_cpt_engines * req_engs,int ucodes_cnt)652 static int reserve_engines(struct device *dev,
653 			   struct otx2_cpt_eng_grp_info *grp,
654 			   struct otx2_cpt_engines *req_engs, int ucodes_cnt)
655 {
656 	int i, ret = 0;
657 
658 	/* Validate if a number of requested engines are available */
659 	for (i = 0; i < ucodes_cnt; i++) {
660 		ret = check_engines_availability(dev, grp, &req_engs[i]);
661 		if (ret)
662 			return ret;
663 	}
664 
665 	/* Reserve requested engines for this engine group */
666 	for (i = 0; i < ucodes_cnt; i++) {
667 		ret = do_reserve_engines(dev, grp, &req_engs[i]);
668 		if (ret)
669 			return ret;
670 	}
671 	return 0;
672 }
673 
ucode_unload(struct device * dev,struct otx2_cpt_ucode * ucode)674 static void ucode_unload(struct device *dev, struct otx2_cpt_ucode *ucode)
675 {
676 	if (ucode->va) {
677 		dma_free_coherent(dev, OTX2_CPT_UCODE_SZ, ucode->va,
678 				  ucode->dma);
679 		ucode->va = NULL;
680 		ucode->dma = 0;
681 		ucode->size = 0;
682 	}
683 
684 	memset(&ucode->ver_str, 0, OTX2_CPT_UCODE_VER_STR_SZ);
685 	memset(&ucode->ver_num, 0, sizeof(struct otx2_cpt_ucode_ver_num));
686 	set_ucode_filename(ucode, "");
687 	ucode->type = 0;
688 }
689 
copy_ucode_to_dma_mem(struct device * dev,struct otx2_cpt_ucode * ucode,const u8 * ucode_data)690 static int copy_ucode_to_dma_mem(struct device *dev,
691 				 struct otx2_cpt_ucode *ucode,
692 				 const u8 *ucode_data)
693 {
694 	u32 i;
695 
696 	/*  Allocate DMAable space */
697 	ucode->va = dma_alloc_coherent(dev, OTX2_CPT_UCODE_SZ, &ucode->dma,
698 				       GFP_KERNEL);
699 	if (!ucode->va)
700 		return -ENOMEM;
701 
702 	memcpy(ucode->va, ucode_data + sizeof(struct otx2_cpt_ucode_hdr),
703 	       ucode->size);
704 
705 	/* Byte swap 64-bit */
706 	for (i = 0; i < (ucode->size / 8); i++)
707 		cpu_to_be64s(&((u64 *)ucode->va)[i]);
708 	/*  Ucode needs 16-bit swap */
709 	for (i = 0; i < (ucode->size / 2); i++)
710 		cpu_to_be16s(&((u16 *)ucode->va)[i]);
711 	return 0;
712 }
713 
enable_eng_grp(struct otx2_cpt_eng_grp_info * eng_grp,void * obj)714 static int enable_eng_grp(struct otx2_cpt_eng_grp_info *eng_grp,
715 			  void *obj)
716 {
717 	int ret;
718 
719 	/* Point microcode to each core of the group */
720 	ret = cpt_set_ucode_base(eng_grp, obj);
721 	if (ret)
722 		return ret;
723 
724 	/* Attach the cores to the group and enable them */
725 	ret = cpt_attach_and_enable_cores(eng_grp, obj);
726 
727 	return ret;
728 }
729 
disable_eng_grp(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp,void * obj)730 static int disable_eng_grp(struct device *dev,
731 			   struct otx2_cpt_eng_grp_info *eng_grp,
732 			   void *obj)
733 {
734 	int i, ret;
735 
736 	/* Disable all engines used by this group */
737 	ret = cpt_detach_and_disable_cores(eng_grp, obj);
738 	if (ret)
739 		return ret;
740 
741 	/* Unload ucode used by this engine group */
742 	ucode_unload(dev, &eng_grp->ucode[0]);
743 	ucode_unload(dev, &eng_grp->ucode[1]);
744 
745 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
746 		if (!eng_grp->engs[i].type)
747 			continue;
748 
749 		eng_grp->engs[i].ucode = &eng_grp->ucode[0];
750 	}
751 
752 	/* Clear UCODE_BASE register for each engine used by this group */
753 	ret = cpt_set_ucode_base(eng_grp, obj);
754 
755 	return ret;
756 }
757 
setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp,struct otx2_cpt_eng_grp_info * src_grp)758 static void setup_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp,
759 				    struct otx2_cpt_eng_grp_info *src_grp)
760 {
761 	/* Setup fields for engine group which is mirrored */
762 	src_grp->mirror.is_ena = false;
763 	src_grp->mirror.idx = 0;
764 	src_grp->mirror.ref_count++;
765 
766 	/* Setup fields for mirroring engine group */
767 	dst_grp->mirror.is_ena = true;
768 	dst_grp->mirror.idx = src_grp->idx;
769 	dst_grp->mirror.ref_count = 0;
770 }
771 
remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info * dst_grp)772 static void remove_eng_grp_mirroring(struct otx2_cpt_eng_grp_info *dst_grp)
773 {
774 	struct otx2_cpt_eng_grp_info *src_grp;
775 
776 	if (!dst_grp->mirror.is_ena)
777 		return;
778 
779 	src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
780 
781 	src_grp->mirror.ref_count--;
782 	dst_grp->mirror.is_ena = false;
783 	dst_grp->mirror.idx = 0;
784 	dst_grp->mirror.ref_count = 0;
785 }
786 
update_requested_engs(struct otx2_cpt_eng_grp_info * mirror_eng_grp,struct otx2_cpt_engines * engs,int engs_cnt)787 static void update_requested_engs(struct otx2_cpt_eng_grp_info *mirror_eng_grp,
788 				  struct otx2_cpt_engines *engs, int engs_cnt)
789 {
790 	struct otx2_cpt_engs_rsvd *mirrored_engs;
791 	int i;
792 
793 	for (i = 0; i < engs_cnt; i++) {
794 		mirrored_engs = find_engines_by_type(mirror_eng_grp,
795 						     engs[i].type);
796 		if (!mirrored_engs)
797 			continue;
798 
799 		/*
800 		 * If mirrored group has this type of engines attached then
801 		 * there are 3 scenarios possible:
802 		 * 1) mirrored_engs.count == engs[i].count then all engines
803 		 * from mirrored engine group will be shared with this engine
804 		 * group
805 		 * 2) mirrored_engs.count > engs[i].count then only a subset of
806 		 * engines from mirrored engine group will be shared with this
807 		 * engine group
808 		 * 3) mirrored_engs.count < engs[i].count then all engines
809 		 * from mirrored engine group will be shared with this group
810 		 * and additional engines will be reserved for exclusively use
811 		 * by this engine group
812 		 */
813 		engs[i].count -= mirrored_engs->count;
814 	}
815 }
816 
find_mirrored_eng_grp(struct otx2_cpt_eng_grp_info * grp)817 static struct otx2_cpt_eng_grp_info *find_mirrored_eng_grp(
818 					struct otx2_cpt_eng_grp_info *grp)
819 {
820 	struct otx2_cpt_eng_grps *eng_grps = grp->g;
821 	int i;
822 
823 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
824 		if (!eng_grps->grp[i].is_enabled)
825 			continue;
826 		if (eng_grps->grp[i].ucode[0].type &&
827 		    eng_grps->grp[i].ucode[1].type)
828 			continue;
829 		if (grp->idx == i)
830 			continue;
831 		if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
832 				 grp->ucode[0].ver_str,
833 				 OTX2_CPT_UCODE_VER_STR_SZ))
834 			return &eng_grps->grp[i];
835 	}
836 
837 	return NULL;
838 }
839 
find_unused_eng_grp(struct otx2_cpt_eng_grps * eng_grps)840 static struct otx2_cpt_eng_grp_info *find_unused_eng_grp(
841 					struct otx2_cpt_eng_grps *eng_grps)
842 {
843 	int i;
844 
845 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
846 		if (!eng_grps->grp[i].is_enabled)
847 			return &eng_grps->grp[i];
848 	}
849 	return NULL;
850 }
851 
eng_grp_update_masks(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)852 static int eng_grp_update_masks(struct device *dev,
853 				struct otx2_cpt_eng_grp_info *eng_grp)
854 {
855 	struct otx2_cpt_engs_rsvd *engs, *mirrored_engs;
856 	struct otx2_cpt_bitmap tmp_bmap = { {0} };
857 	int i, j, cnt, max_cnt;
858 	int bit;
859 
860 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
861 		engs = &eng_grp->engs[i];
862 		if (!engs->type)
863 			continue;
864 		if (engs->count <= 0)
865 			continue;
866 
867 		switch (engs->type) {
868 		case OTX2_CPT_SE_TYPES:
869 			max_cnt = eng_grp->g->avail.max_se_cnt;
870 			break;
871 
872 		case OTX2_CPT_IE_TYPES:
873 			max_cnt = eng_grp->g->avail.max_ie_cnt;
874 			break;
875 
876 		case OTX2_CPT_AE_TYPES:
877 			max_cnt = eng_grp->g->avail.max_ae_cnt;
878 			break;
879 
880 		default:
881 			dev_err(dev, "Invalid engine type %d\n", engs->type);
882 			return -EINVAL;
883 		}
884 
885 		cnt = engs->count;
886 		WARN_ON(engs->offset + max_cnt > OTX2_CPT_MAX_ENGINES);
887 		bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
888 		for (j = engs->offset; j < engs->offset + max_cnt; j++) {
889 			if (!eng_grp->g->eng_ref_cnt[j]) {
890 				bitmap_set(tmp_bmap.bits, j, 1);
891 				cnt--;
892 				if (!cnt)
893 					break;
894 			}
895 		}
896 
897 		if (cnt)
898 			return -ENOSPC;
899 
900 		bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
901 	}
902 
903 	if (!eng_grp->mirror.is_ena)
904 		return 0;
905 
906 	for (i = 0; i < OTX2_CPT_MAX_ETYPES_PER_GRP; i++) {
907 		engs = &eng_grp->engs[i];
908 		if (!engs->type)
909 			continue;
910 
911 		mirrored_engs = find_engines_by_type(
912 					&eng_grp->g->grp[eng_grp->mirror.idx],
913 					engs->type);
914 		WARN_ON(!mirrored_engs && engs->count <= 0);
915 		if (!mirrored_engs)
916 			continue;
917 
918 		bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
919 			    eng_grp->g->engs_num);
920 		if (engs->count < 0) {
921 			bit = find_first_bit(mirrored_engs->bmap,
922 					     eng_grp->g->engs_num);
923 			bitmap_clear(tmp_bmap.bits, bit, -engs->count);
924 		}
925 		bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
926 			  eng_grp->g->engs_num);
927 	}
928 	return 0;
929 }
930 
delete_engine_group(struct device * dev,struct otx2_cpt_eng_grp_info * eng_grp)931 static int delete_engine_group(struct device *dev,
932 			       struct otx2_cpt_eng_grp_info *eng_grp)
933 {
934 	int ret;
935 
936 	if (!eng_grp->is_enabled)
937 		return 0;
938 
939 	if (eng_grp->mirror.ref_count)
940 		return -EINVAL;
941 
942 	/* Removing engine group mirroring if enabled */
943 	remove_eng_grp_mirroring(eng_grp);
944 
945 	/* Disable engine group */
946 	ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
947 	if (ret)
948 		return ret;
949 
950 	/* Release all engines held by this engine group */
951 	ret = release_engines(dev, eng_grp);
952 	if (ret)
953 		return ret;
954 
955 	eng_grp->is_enabled = false;
956 
957 	return 0;
958 }
959 
update_ucode_ptrs(struct otx2_cpt_eng_grp_info * eng_grp)960 static void update_ucode_ptrs(struct otx2_cpt_eng_grp_info *eng_grp)
961 {
962 	struct otx2_cpt_ucode *ucode;
963 
964 	if (eng_grp->mirror.is_ena)
965 		ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
966 	else
967 		ucode = &eng_grp->ucode[0];
968 	WARN_ON(!eng_grp->engs[0].type);
969 	eng_grp->engs[0].ucode = ucode;
970 
971 	if (eng_grp->engs[1].type) {
972 		if (is_2nd_ucode_used(eng_grp))
973 			eng_grp->engs[1].ucode = &eng_grp->ucode[1];
974 		else
975 			eng_grp->engs[1].ucode = ucode;
976 	}
977 }
978 
create_engine_group(struct device * dev,struct otx2_cpt_eng_grps * eng_grps,struct otx2_cpt_engines * engs,int ucodes_cnt,void * ucode_data[],int is_print)979 static int create_engine_group(struct device *dev,
980 			       struct otx2_cpt_eng_grps *eng_grps,
981 			       struct otx2_cpt_engines *engs, int ucodes_cnt,
982 			       void *ucode_data[], int is_print)
983 {
984 	struct otx2_cpt_eng_grp_info *mirrored_eng_grp;
985 	struct otx2_cpt_eng_grp_info *eng_grp;
986 	struct otx2_cpt_uc_info_t *uc_info;
987 	int i, ret = 0;
988 
989 	/* Find engine group which is not used */
990 	eng_grp = find_unused_eng_grp(eng_grps);
991 	if (!eng_grp) {
992 		dev_err(dev, "Error all engine groups are being used\n");
993 		return -ENOSPC;
994 	}
995 	/* Load ucode */
996 	for (i = 0; i < ucodes_cnt; i++) {
997 		uc_info = (struct otx2_cpt_uc_info_t *) ucode_data[i];
998 		eng_grp->ucode[i] = uc_info->ucode;
999 		ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1000 					    uc_info->fw->data);
1001 		if (ret)
1002 			goto unload_ucode;
1003 	}
1004 
1005 	/* Check if this group mirrors another existing engine group */
1006 	mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1007 	if (mirrored_eng_grp) {
1008 		/* Setup mirroring */
1009 		setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1010 
1011 		/*
1012 		 * Update count of requested engines because some
1013 		 * of them might be shared with mirrored group
1014 		 */
1015 		update_requested_engs(mirrored_eng_grp, engs, ucodes_cnt);
1016 	}
1017 	ret = reserve_engines(dev, eng_grp, engs, ucodes_cnt);
1018 	if (ret)
1019 		goto unload_ucode;
1020 
1021 	/* Update ucode pointers used by engines */
1022 	update_ucode_ptrs(eng_grp);
1023 
1024 	/* Update engine masks used by this group */
1025 	ret = eng_grp_update_masks(dev, eng_grp);
1026 	if (ret)
1027 		goto release_engs;
1028 
1029 	/* Enable engine group */
1030 	ret = enable_eng_grp(eng_grp, eng_grps->obj);
1031 	if (ret)
1032 		goto release_engs;
1033 
1034 	/*
1035 	 * If this engine group mirrors another engine group
1036 	 * then we need to unload ucode as we will use ucode
1037 	 * from mirrored engine group
1038 	 */
1039 	if (eng_grp->mirror.is_ena)
1040 		ucode_unload(dev, &eng_grp->ucode[0]);
1041 
1042 	eng_grp->is_enabled = true;
1043 
1044 	if (!is_print)
1045 		return 0;
1046 
1047 	if (mirrored_eng_grp)
1048 		dev_info(dev,
1049 			 "Engine_group%d: reuse microcode %s from group %d\n",
1050 			 eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1051 			 mirrored_eng_grp->idx);
1052 	else
1053 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1054 			 eng_grp->idx, eng_grp->ucode[0].ver_str);
1055 	if (is_2nd_ucode_used(eng_grp))
1056 		dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1057 			 eng_grp->idx, eng_grp->ucode[1].ver_str);
1058 
1059 	return 0;
1060 
1061 release_engs:
1062 	release_engines(dev, eng_grp);
1063 unload_ucode:
1064 	ucode_unload(dev, &eng_grp->ucode[0]);
1065 	ucode_unload(dev, &eng_grp->ucode[1]);
1066 	return ret;
1067 }
1068 
delete_engine_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1069 static void delete_engine_grps(struct pci_dev *pdev,
1070 			       struct otx2_cpt_eng_grps *eng_grps)
1071 {
1072 	int i;
1073 
1074 	/* First delete all mirroring engine groups */
1075 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1076 		if (eng_grps->grp[i].mirror.is_ena)
1077 			delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1078 
1079 	/* Delete remaining engine groups */
1080 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++)
1081 		delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1082 }
1083 
1084 #define PCI_DEVID_CN10K_RNM 0xA098
1085 #define RNM_ENTROPY_STATUS  0x8
1086 
rnm_to_cpt_errata_fixup(struct device * dev)1087 static void rnm_to_cpt_errata_fixup(struct device *dev)
1088 {
1089 	struct pci_dev *pdev;
1090 	void __iomem *base;
1091 	int timeout = 5000;
1092 
1093 	pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RNM, NULL);
1094 	if (!pdev)
1095 		return;
1096 
1097 	base = pci_ioremap_bar(pdev, 0);
1098 	if (!base)
1099 		goto put_pdev;
1100 
1101 	while ((readq(base + RNM_ENTROPY_STATUS) & 0x7F) != 0x40) {
1102 		cpu_relax();
1103 		udelay(1);
1104 		timeout--;
1105 		if (!timeout) {
1106 			dev_warn(dev, "RNM is not producing entropy\n");
1107 			break;
1108 		}
1109 	}
1110 
1111 	iounmap(base);
1112 
1113 put_pdev:
1114 	pci_dev_put(pdev);
1115 }
1116 
otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps * eng_grps,int eng_type)1117 int otx2_cpt_get_eng_grp(struct otx2_cpt_eng_grps *eng_grps, int eng_type)
1118 {
1119 
1120 	int eng_grp_num = OTX2_CPT_INVALID_CRYPTO_ENG_GRP;
1121 	struct otx2_cpt_eng_grp_info *grp;
1122 	int i;
1123 
1124 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1125 		grp = &eng_grps->grp[i];
1126 		if (!grp->is_enabled)
1127 			continue;
1128 
1129 		if (eng_type == OTX2_CPT_SE_TYPES) {
1130 			if (eng_grp_has_eng_type(grp, eng_type) &&
1131 			    !eng_grp_has_eng_type(grp, OTX2_CPT_IE_TYPES)) {
1132 				eng_grp_num = i;
1133 				break;
1134 			}
1135 		} else {
1136 			if (eng_grp_has_eng_type(grp, eng_type)) {
1137 				eng_grp_num = i;
1138 				break;
1139 			}
1140 		}
1141 	}
1142 	return eng_grp_num;
1143 }
1144 
otx2_cpt_create_eng_grps(struct otx2_cptpf_dev * cptpf,struct otx2_cpt_eng_grps * eng_grps)1145 int otx2_cpt_create_eng_grps(struct otx2_cptpf_dev *cptpf,
1146 			     struct otx2_cpt_eng_grps *eng_grps)
1147 {
1148 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1149 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1150 	struct pci_dev *pdev = cptpf->pdev;
1151 	struct fw_info_t fw_info;
1152 	u64 reg_val;
1153 	int ret = 0;
1154 
1155 	mutex_lock(&eng_grps->lock);
1156 	/*
1157 	 * We don't create engine groups if it was already
1158 	 * made (when user enabled VFs for the first time)
1159 	 */
1160 	if (eng_grps->is_grps_created)
1161 		goto unlock;
1162 
1163 	ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1164 	if (ret)
1165 		goto unlock;
1166 
1167 	/*
1168 	 * Create engine group with SE engines for kernel
1169 	 * crypto functionality (symmetric crypto)
1170 	 */
1171 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1172 	if (uc_info[0] == NULL) {
1173 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1174 		ret = -EINVAL;
1175 		goto release_fw;
1176 	}
1177 	engs[0].type = OTX2_CPT_SE_TYPES;
1178 	engs[0].count = eng_grps->avail.max_se_cnt;
1179 
1180 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1181 				  (void **) uc_info, 1);
1182 	if (ret)
1183 		goto release_fw;
1184 
1185 	/*
1186 	 * Create engine group with SE+IE engines for IPSec.
1187 	 * All SE engines will be shared with engine group 0.
1188 	 */
1189 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1190 	uc_info[1] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1191 
1192 	if (uc_info[1] == NULL) {
1193 		dev_err(&pdev->dev, "Unable to find firmware for IE");
1194 		ret = -EINVAL;
1195 		goto delete_eng_grp;
1196 	}
1197 	engs[0].type = OTX2_CPT_SE_TYPES;
1198 	engs[0].count = eng_grps->avail.max_se_cnt;
1199 	engs[1].type = OTX2_CPT_IE_TYPES;
1200 	engs[1].count = eng_grps->avail.max_ie_cnt;
1201 
1202 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 2,
1203 				  (void **) uc_info, 1);
1204 	if (ret)
1205 		goto delete_eng_grp;
1206 
1207 	/*
1208 	 * Create engine group with AE engines for asymmetric
1209 	 * crypto functionality.
1210 	 */
1211 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1212 	if (uc_info[0] == NULL) {
1213 		dev_err(&pdev->dev, "Unable to find firmware for AE");
1214 		ret = -EINVAL;
1215 		goto delete_eng_grp;
1216 	}
1217 	engs[0].type = OTX2_CPT_AE_TYPES;
1218 	engs[0].count = eng_grps->avail.max_ae_cnt;
1219 
1220 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1221 				  (void **) uc_info, 1);
1222 	if (ret)
1223 		goto delete_eng_grp;
1224 
1225 	eng_grps->is_grps_created = true;
1226 
1227 	cpt_ucode_release_fw(&fw_info);
1228 
1229 	if (is_dev_otx2(pdev))
1230 		goto unlock;
1231 
1232 	/*
1233 	 * Ensure RNM_ENTROPY_STATUS[NORMAL_CNT] = 0x40 before writing
1234 	 * CPT_AF_CTL[RNM_REQ_EN] = 1 as a workaround for HW errata.
1235 	 */
1236 	rnm_to_cpt_errata_fixup(&pdev->dev);
1237 
1238 	otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL, &reg_val,
1239 			     BLKADDR_CPT0);
1240 	/*
1241 	 * Configure engine group mask to allow context prefetching
1242 	 * for the groups and enable random number request, to enable
1243 	 * CPT to request random numbers from RNM.
1244 	 */
1245 	reg_val |= OTX2_CPT_ALL_ENG_GRPS_MASK << 3 | BIT_ULL(16);
1246 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTL,
1247 			      reg_val, BLKADDR_CPT0);
1248 	/*
1249 	 * Set interval to periodically flush dirty data for the next
1250 	 * CTX cache entry. Set the interval count to maximum supported
1251 	 * value.
1252 	 */
1253 	otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_CTX_FLUSH_TIMER,
1254 			      CTX_FLUSH_TIMER_CNT, BLKADDR_CPT0);
1255 
1256 	/*
1257 	 * Set CPT_AF_DIAG[FLT_DIS], as a workaround for HW errata, when
1258 	 * CPT_AF_DIAG[FLT_DIS] = 0 and a CPT engine access to LLC/DRAM
1259 	 * encounters a fault/poison, a rare case may result in
1260 	 * unpredictable data being delivered to a CPT engine.
1261 	 */
1262 	if (cpt_is_errata_38550_exists(pdev)) {
1263 		otx2_cpt_read_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1264 				     &reg_val, BLKADDR_CPT0);
1265 		otx2_cpt_write_af_reg(&cptpf->afpf_mbox, pdev, CPT_AF_DIAG,
1266 				      reg_val | BIT_ULL(24), BLKADDR_CPT0);
1267 	}
1268 
1269 	mutex_unlock(&eng_grps->lock);
1270 	return 0;
1271 
1272 delete_eng_grp:
1273 	delete_engine_grps(pdev, eng_grps);
1274 release_fw:
1275 	cpt_ucode_release_fw(&fw_info);
1276 unlock:
1277 	mutex_unlock(&eng_grps->lock);
1278 	return ret;
1279 }
1280 
cptx_disable_all_cores(struct otx2_cptpf_dev * cptpf,int total_cores,int blkaddr)1281 static int cptx_disable_all_cores(struct otx2_cptpf_dev *cptpf, int total_cores,
1282 				  int blkaddr)
1283 {
1284 	int timeout = 10, ret;
1285 	int i, busy;
1286 	u64 reg;
1287 
1288 	/* Disengage the cores from groups */
1289 	for (i = 0; i < total_cores; i++) {
1290 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1291 						CPT_AF_EXEX_CTL2(i), 0x0,
1292 						blkaddr);
1293 		if (ret)
1294 			return ret;
1295 
1296 		cptpf->eng_grps.eng_ref_cnt[i] = 0;
1297 	}
1298 	ret = otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1299 	if (ret)
1300 		return ret;
1301 
1302 	/* Wait for cores to become idle */
1303 	do {
1304 		busy = 0;
1305 		usleep_range(10000, 20000);
1306 		if (timeout-- < 0)
1307 			return -EBUSY;
1308 
1309 		for (i = 0; i < total_cores; i++) {
1310 			ret = otx2_cpt_read_af_reg(&cptpf->afpf_mbox,
1311 						   cptpf->pdev,
1312 						   CPT_AF_EXEX_STS(i), &reg,
1313 						   blkaddr);
1314 			if (ret)
1315 				return ret;
1316 
1317 			if (reg & 0x1) {
1318 				busy = 1;
1319 				break;
1320 			}
1321 		}
1322 	} while (busy);
1323 
1324 	/* Disable the cores */
1325 	for (i = 0; i < total_cores; i++) {
1326 		ret = otx2_cpt_add_write_af_reg(&cptpf->afpf_mbox, cptpf->pdev,
1327 						CPT_AF_EXEX_CTL(i), 0x0,
1328 						blkaddr);
1329 		if (ret)
1330 			return ret;
1331 	}
1332 	return otx2_cpt_send_af_reg_requests(&cptpf->afpf_mbox, cptpf->pdev);
1333 }
1334 
otx2_cpt_disable_all_cores(struct otx2_cptpf_dev * cptpf)1335 int otx2_cpt_disable_all_cores(struct otx2_cptpf_dev *cptpf)
1336 {
1337 	int total_cores, ret;
1338 
1339 	total_cores = cptpf->eng_grps.avail.max_se_cnt +
1340 		      cptpf->eng_grps.avail.max_ie_cnt +
1341 		      cptpf->eng_grps.avail.max_ae_cnt;
1342 
1343 	if (cptpf->has_cpt1) {
1344 		ret = cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT1);
1345 		if (ret)
1346 			return ret;
1347 	}
1348 	return cptx_disable_all_cores(cptpf, total_cores, BLKADDR_CPT0);
1349 }
1350 
otx2_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1351 void otx2_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1352 			       struct otx2_cpt_eng_grps *eng_grps)
1353 {
1354 	struct otx2_cpt_eng_grp_info *grp;
1355 	int i, j;
1356 
1357 	mutex_lock(&eng_grps->lock);
1358 	delete_engine_grps(pdev, eng_grps);
1359 	/* Release memory */
1360 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1361 		grp = &eng_grps->grp[i];
1362 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1363 			kfree(grp->engs[j].bmap);
1364 			grp->engs[j].bmap = NULL;
1365 		}
1366 	}
1367 	mutex_unlock(&eng_grps->lock);
1368 }
1369 
otx2_cpt_init_eng_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1370 int otx2_cpt_init_eng_grps(struct pci_dev *pdev,
1371 			   struct otx2_cpt_eng_grps *eng_grps)
1372 {
1373 	struct otx2_cpt_eng_grp_info *grp;
1374 	int i, j, ret;
1375 
1376 	mutex_init(&eng_grps->lock);
1377 	eng_grps->obj = pci_get_drvdata(pdev);
1378 	eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1379 	eng_grps->avail.ie_cnt = eng_grps->avail.max_ie_cnt;
1380 	eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1381 
1382 	eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1383 			     eng_grps->avail.max_ie_cnt +
1384 			     eng_grps->avail.max_ae_cnt;
1385 	if (eng_grps->engs_num > OTX2_CPT_MAX_ENGINES) {
1386 		dev_err(&pdev->dev,
1387 			"Number of engines %d > than max supported %d\n",
1388 			eng_grps->engs_num, OTX2_CPT_MAX_ENGINES);
1389 		ret = -EINVAL;
1390 		goto cleanup_eng_grps;
1391 	}
1392 
1393 	for (i = 0; i < OTX2_CPT_MAX_ENGINE_GROUPS; i++) {
1394 		grp = &eng_grps->grp[i];
1395 		grp->g = eng_grps;
1396 		grp->idx = i;
1397 
1398 		for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
1399 			grp->engs[j].bmap =
1400 				kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1401 					sizeof(long), GFP_KERNEL);
1402 			if (!grp->engs[j].bmap) {
1403 				ret = -ENOMEM;
1404 				goto cleanup_eng_grps;
1405 			}
1406 		}
1407 	}
1408 	return 0;
1409 
1410 cleanup_eng_grps:
1411 	otx2_cpt_cleanup_eng_grps(pdev, eng_grps);
1412 	return ret;
1413 }
1414 
create_eng_caps_discovery_grps(struct pci_dev * pdev,struct otx2_cpt_eng_grps * eng_grps)1415 static int create_eng_caps_discovery_grps(struct pci_dev *pdev,
1416 					  struct otx2_cpt_eng_grps *eng_grps)
1417 {
1418 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {  };
1419 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1420 	struct fw_info_t fw_info;
1421 	int ret;
1422 
1423 	mutex_lock(&eng_grps->lock);
1424 	ret = cpt_ucode_load_fw(pdev, &fw_info, eng_grps->rid);
1425 	if (ret) {
1426 		mutex_unlock(&eng_grps->lock);
1427 		return ret;
1428 	}
1429 
1430 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_AE_TYPES);
1431 	if (uc_info[0] == NULL) {
1432 		dev_err(&pdev->dev, "Unable to find firmware for AE\n");
1433 		ret = -EINVAL;
1434 		goto release_fw;
1435 	}
1436 	engs[0].type = OTX2_CPT_AE_TYPES;
1437 	engs[0].count = 2;
1438 
1439 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1440 				  (void **) uc_info, 0);
1441 	if (ret)
1442 		goto release_fw;
1443 
1444 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_SE_TYPES);
1445 	if (uc_info[0] == NULL) {
1446 		dev_err(&pdev->dev, "Unable to find firmware for SE\n");
1447 		ret = -EINVAL;
1448 		goto delete_eng_grp;
1449 	}
1450 	engs[0].type = OTX2_CPT_SE_TYPES;
1451 	engs[0].count = 2;
1452 
1453 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1454 				  (void **) uc_info, 0);
1455 	if (ret)
1456 		goto delete_eng_grp;
1457 
1458 	uc_info[0] = get_ucode(&fw_info, OTX2_CPT_IE_TYPES);
1459 	if (uc_info[0] == NULL) {
1460 		dev_err(&pdev->dev, "Unable to find firmware for IE\n");
1461 		ret = -EINVAL;
1462 		goto delete_eng_grp;
1463 	}
1464 	engs[0].type = OTX2_CPT_IE_TYPES;
1465 	engs[0].count = 2;
1466 
1467 	ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1468 				  (void **) uc_info, 0);
1469 	if (ret)
1470 		goto delete_eng_grp;
1471 
1472 	cpt_ucode_release_fw(&fw_info);
1473 	mutex_unlock(&eng_grps->lock);
1474 	return 0;
1475 
1476 delete_eng_grp:
1477 	delete_engine_grps(pdev, eng_grps);
1478 release_fw:
1479 	cpt_ucode_release_fw(&fw_info);
1480 	mutex_unlock(&eng_grps->lock);
1481 	return ret;
1482 }
1483 
1484 /*
1485  * Get CPT HW capabilities using LOAD_FVC operation.
1486  */
otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev * cptpf)1487 int otx2_cpt_discover_eng_capabilities(struct otx2_cptpf_dev *cptpf)
1488 {
1489 	struct otx2_cptlfs_info *lfs = &cptpf->lfs;
1490 	struct otx2_cpt_iq_command iq_cmd;
1491 	union otx2_cpt_opcode opcode;
1492 	union otx2_cpt_res_s *result;
1493 	union otx2_cpt_inst_s inst;
1494 	dma_addr_t rptr_baddr;
1495 	struct pci_dev *pdev;
1496 	u32 len, compl_rlen;
1497 	int ret, etype;
1498 	void *rptr;
1499 
1500 	/*
1501 	 * We don't get capabilities if it was already done
1502 	 * (when user enabled VFs for the first time)
1503 	 */
1504 	if (cptpf->is_eng_caps_discovered)
1505 		return 0;
1506 
1507 	pdev = cptpf->pdev;
1508 	/*
1509 	 * Create engine groups for each type to submit LOAD_FVC op and
1510 	 * get engine's capabilities.
1511 	 */
1512 	ret = create_eng_caps_discovery_grps(pdev, &cptpf->eng_grps);
1513 	if (ret)
1514 		goto delete_grps;
1515 
1516 	otx2_cptlf_set_dev_info(lfs, cptpf->pdev, cptpf->reg_base,
1517 				&cptpf->afpf_mbox, BLKADDR_CPT0);
1518 	ret = otx2_cptlf_init(lfs, OTX2_CPT_ALL_ENG_GRPS_MASK,
1519 			      OTX2_CPT_QUEUE_HI_PRIO, 1);
1520 	if (ret)
1521 		goto delete_grps;
1522 
1523 	compl_rlen = ALIGN(sizeof(union otx2_cpt_res_s), OTX2_CPT_DMA_MINALIGN);
1524 	len = compl_rlen + LOADFVC_RLEN;
1525 
1526 	result = kzalloc(len, GFP_KERNEL);
1527 	if (!result) {
1528 		ret = -ENOMEM;
1529 		goto lf_cleanup;
1530 	}
1531 	rptr_baddr = dma_map_single(&pdev->dev, (void *)result, len,
1532 				    DMA_BIDIRECTIONAL);
1533 	if (dma_mapping_error(&pdev->dev, rptr_baddr)) {
1534 		dev_err(&pdev->dev, "DMA mapping failed\n");
1535 		ret = -EFAULT;
1536 		goto free_result;
1537 	}
1538 	rptr = (u8 *)result + compl_rlen;
1539 
1540 	/* Fill in the command */
1541 	opcode.s.major = LOADFVC_MAJOR_OP;
1542 	opcode.s.minor = LOADFVC_MINOR_OP;
1543 
1544 	iq_cmd.cmd.u = 0;
1545 	iq_cmd.cmd.s.opcode = cpu_to_be16(opcode.flags);
1546 
1547 	/* 64-bit swap for microcode data reads, not needed for addresses */
1548 	cpu_to_be64s(&iq_cmd.cmd.u);
1549 	iq_cmd.dptr = 0;
1550 	iq_cmd.rptr = rptr_baddr + compl_rlen;
1551 	iq_cmd.cptr.u = 0;
1552 
1553 	for (etype = 1; etype < OTX2_CPT_MAX_ENG_TYPES; etype++) {
1554 		result->s.compcode = OTX2_CPT_COMPLETION_CODE_INIT;
1555 		iq_cmd.cptr.s.grp = otx2_cpt_get_eng_grp(&cptpf->eng_grps,
1556 							 etype);
1557 		otx2_cpt_fill_inst(&inst, &iq_cmd, rptr_baddr);
1558 		lfs->ops->send_cmd(&inst, 1, &cptpf->lfs.lf[0]);
1559 
1560 		while (lfs->ops->cpt_get_compcode(result) ==
1561 						OTX2_CPT_COMPLETION_CODE_INIT)
1562 			cpu_relax();
1563 
1564 		cptpf->eng_caps[etype].u = be64_to_cpup(rptr);
1565 	}
1566 	dma_unmap_single(&pdev->dev, rptr_baddr, len, DMA_BIDIRECTIONAL);
1567 	cptpf->is_eng_caps_discovered = true;
1568 
1569 free_result:
1570 	kfree(result);
1571 lf_cleanup:
1572 	otx2_cptlf_shutdown(lfs);
1573 delete_grps:
1574 	delete_engine_grps(pdev, &cptpf->eng_grps);
1575 
1576 	return ret;
1577 }
1578 
otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1579 int otx2_cpt_dl_custom_egrp_create(struct otx2_cptpf_dev *cptpf,
1580 				   struct devlink_param_gset_ctx *ctx)
1581 {
1582 	struct otx2_cpt_engines engs[OTX2_CPT_MAX_ETYPES_PER_GRP] = { { 0 } };
1583 	struct otx2_cpt_uc_info_t *uc_info[OTX2_CPT_MAX_ETYPES_PER_GRP] = {};
1584 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1585 	char *ucode_filename[OTX2_CPT_MAX_ETYPES_PER_GRP];
1586 	char tmp_buf[OTX2_CPT_NAME_LENGTH] = { 0 };
1587 	struct device *dev = &cptpf->pdev->dev;
1588 	char *start, *val, *err_msg, *tmp;
1589 	int grp_idx = 0, ret = -EINVAL;
1590 	bool has_se, has_ie, has_ae;
1591 	struct fw_info_t fw_info;
1592 	int ucode_idx = 0;
1593 
1594 	if (!eng_grps->is_grps_created) {
1595 		dev_err(dev, "Not allowed before creating the default groups\n");
1596 		return -EINVAL;
1597 	}
1598 	err_msg = "Invalid engine group format";
1599 	strscpy(tmp_buf, ctx->val.vstr, strlen(ctx->val.vstr) + 1);
1600 	start = tmp_buf;
1601 
1602 	has_se = has_ie = has_ae = false;
1603 
1604 	for (;;) {
1605 		val = strsep(&start, ";");
1606 		if (!val)
1607 			break;
1608 		val = strim(val);
1609 		if (!*val)
1610 			continue;
1611 
1612 		if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1613 			if (has_se || ucode_idx)
1614 				goto err_print;
1615 			tmp = strsep(&val, ":");
1616 			if (!tmp)
1617 				goto err_print;
1618 			tmp = strim(tmp);
1619 			if (!val)
1620 				goto err_print;
1621 			if (strlen(tmp) != 2)
1622 				goto err_print;
1623 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1624 				goto err_print;
1625 			engs[grp_idx++].type = OTX2_CPT_SE_TYPES;
1626 			has_se = true;
1627 		} else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1628 			if (has_ae || ucode_idx)
1629 				goto err_print;
1630 			tmp = strsep(&val, ":");
1631 			if (!tmp)
1632 				goto err_print;
1633 			tmp = strim(tmp);
1634 			if (!val)
1635 				goto err_print;
1636 			if (strlen(tmp) != 2)
1637 				goto err_print;
1638 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1639 				goto err_print;
1640 			engs[grp_idx++].type = OTX2_CPT_AE_TYPES;
1641 			has_ae = true;
1642 		} else if (!strncasecmp(val, "ie", 2) && strchr(val, ':')) {
1643 			if (has_ie || ucode_idx)
1644 				goto err_print;
1645 			tmp = strsep(&val, ":");
1646 			if (!tmp)
1647 				goto err_print;
1648 			tmp = strim(tmp);
1649 			if (!val)
1650 				goto err_print;
1651 			if (strlen(tmp) != 2)
1652 				goto err_print;
1653 			if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1654 				goto err_print;
1655 			engs[grp_idx++].type = OTX2_CPT_IE_TYPES;
1656 			has_ie = true;
1657 		} else {
1658 			if (ucode_idx > 1)
1659 				goto err_print;
1660 			if (!strlen(val))
1661 				goto err_print;
1662 			if (strnstr(val, " ", strlen(val)))
1663 				goto err_print;
1664 			ucode_filename[ucode_idx++] = val;
1665 		}
1666 	}
1667 
1668 	/* Validate input parameters */
1669 	if (!(grp_idx && ucode_idx))
1670 		goto err_print;
1671 
1672 	if (ucode_idx > 1 && grp_idx < 2)
1673 		goto err_print;
1674 
1675 	if (grp_idx > OTX2_CPT_MAX_ETYPES_PER_GRP) {
1676 		err_msg = "Error max 2 engine types can be attached";
1677 		goto err_print;
1678 	}
1679 
1680 	if (grp_idx > 1) {
1681 		if ((engs[0].type + engs[1].type) !=
1682 		    (OTX2_CPT_SE_TYPES + OTX2_CPT_IE_TYPES)) {
1683 			err_msg = "Only combination of SE+IE engines is allowed";
1684 			goto err_print;
1685 		}
1686 		/* Keep SE engines at zero index */
1687 		if (engs[1].type == OTX2_CPT_SE_TYPES)
1688 			swap(engs[0], engs[1]);
1689 	}
1690 	mutex_lock(&eng_grps->lock);
1691 
1692 	if (cptpf->enabled_vfs) {
1693 		dev_err(dev, "Disable VFs before modifying engine groups\n");
1694 		ret = -EACCES;
1695 		goto err_unlock;
1696 	}
1697 	INIT_LIST_HEAD(&fw_info.ucodes);
1698 
1699 	ret = load_fw(dev, &fw_info, ucode_filename[0], eng_grps->rid);
1700 	if (ret) {
1701 		dev_err(dev, "Unable to load firmware %s\n", ucode_filename[0]);
1702 		goto err_unlock;
1703 	}
1704 	if (ucode_idx > 1) {
1705 		ret = load_fw(dev, &fw_info, ucode_filename[1], eng_grps->rid);
1706 		if (ret) {
1707 			dev_err(dev, "Unable to load firmware %s\n",
1708 				ucode_filename[1]);
1709 			goto release_fw;
1710 		}
1711 	}
1712 	uc_info[0] = get_ucode(&fw_info, engs[0].type);
1713 	if (uc_info[0] == NULL) {
1714 		dev_err(dev, "Unable to find firmware for %s\n",
1715 			get_eng_type_str(engs[0].type));
1716 		ret = -EINVAL;
1717 		goto release_fw;
1718 	}
1719 	if (ucode_idx > 1) {
1720 		uc_info[1] = get_ucode(&fw_info, engs[1].type);
1721 		if (uc_info[1] == NULL) {
1722 			dev_err(dev, "Unable to find firmware for %s\n",
1723 				get_eng_type_str(engs[1].type));
1724 			ret = -EINVAL;
1725 			goto release_fw;
1726 		}
1727 	}
1728 	ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1729 				  (void **)uc_info, 1);
1730 
1731 release_fw:
1732 	cpt_ucode_release_fw(&fw_info);
1733 err_unlock:
1734 	mutex_unlock(&eng_grps->lock);
1735 	return ret;
1736 err_print:
1737 	dev_err(dev, "%s\n", err_msg);
1738 	return ret;
1739 }
1740 
otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev * cptpf,struct devlink_param_gset_ctx * ctx)1741 int otx2_cpt_dl_custom_egrp_delete(struct otx2_cptpf_dev *cptpf,
1742 				   struct devlink_param_gset_ctx *ctx)
1743 {
1744 	struct otx2_cpt_eng_grps *eng_grps = &cptpf->eng_grps;
1745 	struct device *dev = &cptpf->pdev->dev;
1746 	char *tmp, *err_msg;
1747 	int egrp;
1748 	int ret;
1749 
1750 	err_msg = "Invalid input string format(ex: egrp:0)";
1751 	if (strncasecmp(ctx->val.vstr, "egrp", 4))
1752 		goto err_print;
1753 	tmp = ctx->val.vstr;
1754 	strsep(&tmp, ":");
1755 	if (!tmp)
1756 		goto err_print;
1757 	if (kstrtoint(tmp, 10, &egrp))
1758 		goto err_print;
1759 
1760 	if (egrp < 0 || egrp >= OTX2_CPT_MAX_ENGINE_GROUPS) {
1761 		dev_err(dev, "Invalid engine group %d", egrp);
1762 		return -EINVAL;
1763 	}
1764 	if (!eng_grps->grp[egrp].is_enabled) {
1765 		dev_err(dev, "Error engine_group%d is not configured", egrp);
1766 		return -EINVAL;
1767 	}
1768 	mutex_lock(&eng_grps->lock);
1769 	ret = delete_engine_group(dev, &eng_grps->grp[egrp]);
1770 	mutex_unlock(&eng_grps->lock);
1771 
1772 	return ret;
1773 
1774 err_print:
1775 	dev_err(dev, "%s\n", err_msg);
1776 	return -EINVAL;
1777 }
1778