xref: /linux/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 
4 #define pr_fmt(fmt)	"QAT: " fmt
5 
6 #include <linux/bitops.h>
7 #include <linux/iopoll.h>
8 #include <asm/div64.h>
9 #include "adf_accel_devices.h"
10 #include "adf_cfg_services.h"
11 #include "adf_common_drv.h"
12 #include "adf_fw_config.h"
13 #include "adf_gen4_hw_data.h"
14 #include "adf_gen4_pm.h"
15 #include "icp_qat_fw_comp.h"
16 #include "icp_qat_hw_20_comp.h"
17 
adf_gen4_get_accel_mask(struct adf_hw_device_data * self)18 u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
19 {
20 	return ADF_GEN4_ACCELERATORS_MASK;
21 }
22 EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
23 
adf_gen4_get_num_accels(struct adf_hw_device_data * self)24 u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
25 {
26 	return ADF_GEN4_MAX_ACCELERATORS;
27 }
28 EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
29 
adf_gen4_get_num_aes(struct adf_hw_device_data * self)30 u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
31 {
32 	if (!self || !self->ae_mask)
33 		return 0;
34 
35 	return hweight32(self->ae_mask);
36 }
37 EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
38 
adf_gen4_get_misc_bar_id(struct adf_hw_device_data * self)39 u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
40 {
41 	return ADF_GEN4_PMISC_BAR;
42 }
43 EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
44 
adf_gen4_get_etr_bar_id(struct adf_hw_device_data * self)45 u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
46 {
47 	return ADF_GEN4_ETR_BAR;
48 }
49 EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
50 
adf_gen4_get_sram_bar_id(struct adf_hw_device_data * self)51 u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
52 {
53 	return ADF_GEN4_SRAM_BAR;
54 }
55 EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
56 
adf_gen4_get_sku(struct adf_hw_device_data * self)57 enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
58 {
59 	return DEV_SKU_1;
60 }
61 EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
62 
adf_gen4_get_arb_info(struct arb_info * arb_info)63 void adf_gen4_get_arb_info(struct arb_info *arb_info)
64 {
65 	arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
66 	arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
67 	arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
68 }
69 EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
70 
adf_gen4_get_admin_info(struct admin_info * admin_csrs_info)71 void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
72 {
73 	admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
74 	admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
75 	admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
76 }
77 EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
78 
adf_gen4_get_heartbeat_clock(struct adf_hw_device_data * self)79 u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
80 {
81 	/*
82 	 * GEN4 uses KPT counter for HB
83 	 */
84 	return ADF_GEN4_KPT_COUNTER_FREQ;
85 }
86 EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
87 
adf_gen4_enable_error_correction(struct adf_accel_dev * accel_dev)88 void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
89 {
90 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
91 	void __iomem *csr = misc_bar->virt_addr;
92 
93 	/* Enable all in errsou3 except VFLR notification on host */
94 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
95 }
96 EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
97 
adf_gen4_enable_ints(struct adf_accel_dev * accel_dev)98 void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
99 {
100 	void __iomem *addr;
101 
102 	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
103 
104 	/* Enable bundle interrupts */
105 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
106 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
107 
108 	/* Enable misc interrupts */
109 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
110 }
111 EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
112 
adf_gen4_init_device(struct adf_accel_dev * accel_dev)113 int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
114 {
115 	void __iomem *addr;
116 	u32 status;
117 	u32 csr;
118 	int ret;
119 
120 	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
121 
122 	/* Temporarily mask PM interrupt */
123 	csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
124 	csr |= ADF_GEN4_PM_SOU;
125 	ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
126 
127 	/* Set DRV_ACTIVE bit to power up the device */
128 	ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
129 
130 	/* Poll status register to make sure the device is powered up */
131 	ret = read_poll_timeout(ADF_CSR_RD, status,
132 				status & ADF_GEN4_PM_INIT_STATE,
133 				ADF_GEN4_PM_POLL_DELAY_US,
134 				ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
135 				ADF_GEN4_PM_STATUS);
136 	if (ret)
137 		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
138 
139 	return ret;
140 }
141 EXPORT_SYMBOL_GPL(adf_gen4_init_device);
142 
adf_gen4_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)143 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
144 {
145 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
146 	u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
147 	u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
148 
149 	/* Enable watchdog timer for sym and dc */
150 	ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val);
151 
152 	/* Enable watchdog timer for pke */
153 	ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET,
154 			   timer_val_pke);
155 }
156 EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
157 
158 /*
159  * The vector routing table is used to select the MSI-X entry to use for each
160  * interrupt source.
161  * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
162  * The final entry corresponds to VF2PF or error interrupts.
163  * This vector table could be used to configure one MSI-X entry to be shared
164  * between multiple interrupt sources.
165  *
166  * The default routing is set to have a one to one correspondence between the
167  * interrupt source and the MSI-X entry used.
168  */
adf_gen4_set_msix_default_rttable(struct adf_accel_dev * accel_dev)169 void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
170 {
171 	void __iomem *csr;
172 	int i;
173 
174 	csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
175 	for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
176 		ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
177 }
178 EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
179 
adf_pfvf_comms_disabled(struct adf_accel_dev * accel_dev)180 int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
181 {
182 	return 0;
183 }
184 EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
185 
reset_ring_pair(void __iomem * csr,u32 bank_number)186 static int reset_ring_pair(void __iomem *csr, u32 bank_number)
187 {
188 	u32 status;
189 	int ret;
190 
191 	/* Write rpresetctl register BIT(0) as 1
192 	 * Since rpresetctl registers have no RW fields, no need to preserve
193 	 * values for other bits. Just write directly.
194 	 */
195 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
196 		   ADF_WQM_CSR_RPRESETCTL_RESET);
197 
198 	/* Read rpresetsts register and wait for rp reset to complete */
199 	ret = read_poll_timeout(ADF_CSR_RD, status,
200 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
201 				ADF_RPRESET_POLL_DELAY_US,
202 				ADF_RPRESET_POLL_TIMEOUT_US, true,
203 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
204 	if (!ret) {
205 		/* When rp reset is done, clear rpresetsts */
206 		ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
207 			   ADF_WQM_CSR_RPRESETSTS_STATUS);
208 	}
209 
210 	return ret;
211 }
212 
adf_gen4_ring_pair_reset(struct adf_accel_dev * accel_dev,u32 bank_number)213 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
214 {
215 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
216 	void __iomem *csr = adf_get_etr_base(accel_dev);
217 	int ret;
218 
219 	if (bank_number >= hw_data->num_banks)
220 		return -EINVAL;
221 
222 	dev_dbg(&GET_DEV(accel_dev),
223 		"ring pair reset for bank:%d\n", bank_number);
224 
225 	ret = reset_ring_pair(csr, bank_number);
226 	if (ret)
227 		dev_err(&GET_DEV(accel_dev),
228 			"ring pair reset failed (timeout)\n");
229 	else
230 		dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
231 
232 	return ret;
233 }
234 EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
235 
236 static const u32 thrd_to_arb_map_dcc[] = {
237 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
238 	0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
239 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 	0x0
242 };
243 
244 static const u16 rp_group_to_arb_mask[] = {
245 	[RP_GROUP_0] = 0x5,
246 	[RP_GROUP_1] = 0xA,
247 };
248 
is_single_service(int service_id)249 static bool is_single_service(int service_id)
250 {
251 	switch (service_id) {
252 	case SVC_DC:
253 	case SVC_SYM:
254 	case SVC_ASYM:
255 		return true;
256 	default:
257 		return false;
258 	}
259 }
260 
adf_gen4_services_supported(unsigned long mask)261 bool adf_gen4_services_supported(unsigned long mask)
262 {
263 	unsigned long num_svc = hweight_long(mask);
264 
265 	if (mask >= BIT(SVC_COUNT))
266 		return false;
267 
268 	if (test_bit(SVC_DECOMP, &mask))
269 		return false;
270 
271 	switch (num_svc) {
272 	case ADF_ONE_SERVICE:
273 		return true;
274 	case ADF_TWO_SERVICES:
275 		return !test_bit(SVC_DCC, &mask);
276 	default:
277 		return false;
278 	}
279 }
280 EXPORT_SYMBOL_GPL(adf_gen4_services_supported);
281 
adf_gen4_init_thd2arb_map(struct adf_accel_dev * accel_dev)282 int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
283 {
284 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
285 	u32 *thd2arb_map = hw_data->thd_to_arb_map;
286 	unsigned int ae_cnt, worker_obj_cnt, i, j;
287 	unsigned long ae_mask, thds_mask;
288 	int srv_id, rp_group;
289 	u32 thd2arb_map_base;
290 	u16 arb_mask;
291 
292 	if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
293 	    !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
294 	    !hw_data->uof_get_ae_mask)
295 		return -EFAULT;
296 
297 	srv_id = adf_get_service_enabled(accel_dev);
298 	if (srv_id < 0)
299 		return srv_id;
300 
301 	ae_cnt = hw_data->get_num_aes(hw_data);
302 	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
303 			 ADF_GEN4_ADMIN_ACCELENGINES;
304 
305 	if (srv_id == SVC_DCC) {
306 		if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
307 			return -EINVAL;
308 
309 		memcpy(thd2arb_map, thrd_to_arb_map_dcc,
310 		       array_size(sizeof(*thd2arb_map), ae_cnt));
311 		return 0;
312 	}
313 
314 	for (i = 0; i < worker_obj_cnt; i++) {
315 		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
316 		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
317 		thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
318 		thd2arb_map_base = 0;
319 
320 		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
321 			return -EINVAL;
322 
323 		if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
324 			return -EINVAL;
325 
326 		if (is_single_service(srv_id))
327 			arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
328 				   rp_group_to_arb_mask[RP_GROUP_1];
329 		else
330 			arb_mask = rp_group_to_arb_mask[rp_group];
331 
332 		for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
333 			thd2arb_map_base |= arb_mask << (j * 4);
334 
335 		for_each_set_bit(j, &ae_mask, ae_cnt)
336 			thd2arb_map[j] = thd2arb_map_base;
337 	}
338 	return 0;
339 }
340 EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
341 
adf_gen4_get_ring_to_svc_map(struct adf_accel_dev * accel_dev)342 u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
343 {
344 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
345 	enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
346 	unsigned int ae_mask, start_id, worker_obj_cnt, i;
347 	u16 ring_to_svc_map;
348 	int rp_group;
349 
350 	if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
351 	    !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
352 		return 0;
353 
354 	/* If dcc, all rings handle compression requests */
355 	if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
356 		for (i = 0; i < RP_GROUP_COUNT; i++)
357 			rps[i] = COMP;
358 		goto set_mask;
359 	}
360 
361 	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
362 			 ADF_GEN4_ADMIN_ACCELENGINES;
363 	start_id = worker_obj_cnt - RP_GROUP_COUNT;
364 
365 	for (i = start_id; i < worker_obj_cnt; i++) {
366 		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
367 		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
368 		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
369 			return 0;
370 
371 		switch (hw_data->uof_get_obj_type(accel_dev, i)) {
372 		case ADF_FW_SYM_OBJ:
373 			rps[rp_group] = SYM;
374 			break;
375 		case ADF_FW_ASYM_OBJ:
376 			rps[rp_group] = ASYM;
377 			break;
378 		case ADF_FW_DC_OBJ:
379 			rps[rp_group] = COMP;
380 			break;
381 		default:
382 			rps[rp_group] = 0;
383 			break;
384 		}
385 	}
386 
387 set_mask:
388 	ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
389 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
390 			  rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
391 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
392 
393 	return ring_to_svc_map;
394 }
395 EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
396 
397 /*
398  * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
399  * @accel_dev: Pointer to the device structure
400  * @bank_idx: Offset to the bank within this device
401  * @timeout_ms: Timeout in milliseconds for the operation
402  *
403  * This function tries to quiesce the coalesced interrupt timer of a bank if
404  * it has been enabled and triggered.
405  *
406  * Returns 0 on success, error code otherwise
407  *
408  */
adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev * accel_dev,u32 bank_idx,int timeout_ms)409 int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
410 				     u32 bank_idx, int timeout_ms)
411 {
412 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
413 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
414 	void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
415 	void __iomem *csr_etr = adf_get_etr_base(accel_dev);
416 	u32 int_col_ctl, int_col_mask, int_col_en;
417 	u32 e_stat, intsrc;
418 	u64 wait_us;
419 	int ret;
420 
421 	if (timeout_ms < 0)
422 		return -EINVAL;
423 
424 	int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
425 	int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
426 	if (!(int_col_ctl & int_col_mask))
427 		return 0;
428 
429 	int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
430 	int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
431 
432 	e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
433 	if (!(~e_stat & int_col_en))
434 		return 0;
435 
436 	wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
437 	do_div(wait_us, hw_data->clock_frequency);
438 	wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
439 	dev_dbg(&GET_DEV(accel_dev),
440 		"wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
441 		bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
442 
443 	ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
444 				ADF_COALESCED_POLL_DELAY_US, wait_us, true,
445 				csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
446 	if (ret)
447 		dev_warn(&GET_DEV(accel_dev),
448 			 "coalesced timer for bank %d expired (%llu us)\n",
449 			 bank_idx, wait_us);
450 
451 	return ret;
452 }
453 EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
454 
drain_bank(void __iomem * csr,u32 bank_number,int timeout_us)455 static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
456 {
457 	u32 status;
458 
459 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
460 		   ADF_WQM_CSR_RPRESETCTL_DRAIN);
461 
462 	return read_poll_timeout(ADF_CSR_RD, status,
463 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
464 				ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
465 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
466 }
467 
adf_gen4_bank_drain_finish(struct adf_accel_dev * accel_dev,u32 bank_number)468 void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
469 				u32 bank_number)
470 {
471 	void __iomem *csr = adf_get_etr_base(accel_dev);
472 
473 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
474 		   ADF_WQM_CSR_RPRESETSTS_STATUS);
475 }
476 
adf_gen4_bank_drain_start(struct adf_accel_dev * accel_dev,u32 bank_number,int timeout_us)477 int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
478 			      u32 bank_number, int timeout_us)
479 {
480 	void __iomem *csr = adf_get_etr_base(accel_dev);
481 	int ret;
482 
483 	dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
484 
485 	ret = drain_bank(csr, bank_number, timeout_us);
486 	if (ret)
487 		dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
488 	else
489 		dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
490 
491 	return ret;
492 }
493 
adf_gen4_build_comp_block(void * ctx,enum adf_dc_algo algo)494 static int adf_gen4_build_comp_block(void *ctx, enum adf_dc_algo algo)
495 {
496 	struct icp_qat_fw_comp_req *req_tmpl = ctx;
497 	struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
498 	struct icp_qat_hw_comp_20_config_csr_upper hw_comp_upper_csr = { };
499 	struct icp_qat_hw_comp_20_config_csr_lower hw_comp_lower_csr = { };
500 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
501 	u32 upper_val;
502 	u32 lower_val;
503 
504 	switch (algo) {
505 	case QAT_DEFLATE:
506 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
507 		break;
508 	default:
509 		return -EINVAL;
510 	}
511 
512 	hw_comp_lower_csr.skip_ctrl = ICP_QAT_HW_COMP_20_BYTE_SKIP_3BYTE_LITERAL;
513 	hw_comp_lower_csr.algo = ICP_QAT_HW_COMP_20_HW_COMP_FORMAT_ILZ77;
514 	hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_20_LLLBD_CTRL_LLLBD_ENABLED;
515 	hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_20_SEARCH_DEPTH_LEVEL_1;
516 	hw_comp_lower_csr.hash_update = ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
517 	hw_comp_lower_csr.edmm = ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED;
518 	hw_comp_upper_csr.nice = ICP_QAT_HW_COMP_20_CONFIG_CSR_NICE_PARAM_DEFAULT_VAL;
519 	hw_comp_upper_csr.lazy = ICP_QAT_HW_COMP_20_CONFIG_CSR_LAZY_PARAM_DEFAULT_VAL;
520 
521 	upper_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_UPPER(hw_comp_upper_csr);
522 	lower_val = ICP_QAT_FW_COMP_20_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
523 
524 	cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
525 	cd_pars->u.sl.comp_slice_cfg_word[1] = upper_val;
526 
527 	return 0;
528 }
529 
adf_gen4_build_decomp_block(void * ctx,enum adf_dc_algo algo)530 static int adf_gen4_build_decomp_block(void *ctx, enum adf_dc_algo algo)
531 {
532 	struct icp_qat_fw_comp_req *req_tmpl = ctx;
533 	struct icp_qat_hw_decomp_20_config_csr_lower hw_decomp_lower_csr = { };
534 	struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
535 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
536 	u32 lower_val;
537 
538 	switch (algo) {
539 	case QAT_DEFLATE:
540 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
541 		break;
542 	default:
543 		return -EINVAL;
544 	}
545 
546 	hw_decomp_lower_csr.algo = ICP_QAT_HW_DECOMP_20_HW_DECOMP_FORMAT_DEFLATE;
547 	lower_val = ICP_QAT_FW_DECOMP_20_BUILD_CONFIG_LOWER(hw_decomp_lower_csr);
548 
549 	cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
550 	cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
551 
552 	return 0;
553 }
554 
adf_gen4_init_dc_ops(struct adf_dc_ops * dc_ops)555 void adf_gen4_init_dc_ops(struct adf_dc_ops *dc_ops)
556 {
557 	dc_ops->build_comp_block = adf_gen4_build_comp_block;
558 	dc_ops->build_decomp_block = adf_gen4_build_decomp_block;
559 }
560 EXPORT_SYMBOL_GPL(adf_gen4_init_dc_ops);
561 
adf_gen4_init_num_svc_aes(struct adf_rl_hw_data * device_data)562 void adf_gen4_init_num_svc_aes(struct adf_rl_hw_data *device_data)
563 {
564 	struct adf_hw_device_data *hw_data;
565 	unsigned int i;
566 	u32 ae_cnt;
567 
568 	hw_data = container_of(device_data, struct adf_hw_device_data, rl_data);
569 	ae_cnt = hweight32(hw_data->get_ae_mask(hw_data));
570 	if (!ae_cnt)
571 		return;
572 
573 	for (i = 0; i < SVC_BASE_COUNT; i++)
574 		device_data->svc_ae_mask[i] = ae_cnt - 1;
575 
576 	/*
577 	 * The decompression service is not supported on QAT GEN4 devices.
578 	 * Therefore, set svc_ae_mask to 0.
579 	 */
580 	device_data->svc_ae_mask[SVC_DECOMP] = 0;
581 }
582 EXPORT_SYMBOL_GPL(adf_gen4_init_num_svc_aes);
583 
adf_gen4_get_svc_slice_cnt(struct adf_accel_dev * accel_dev,enum adf_base_services svc)584 u32 adf_gen4_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
585 			       enum adf_base_services svc)
586 {
587 	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
588 
589 	switch (svc) {
590 	case SVC_SYM:
591 		return device_data->slices.cph_cnt;
592 	case SVC_ASYM:
593 		return device_data->slices.pke_cnt;
594 	case SVC_DC:
595 		return device_data->slices.dcpr_cnt;
596 	default:
597 		return 0;
598 	}
599 }
600 EXPORT_SYMBOL_GPL(adf_gen4_get_svc_slice_cnt);
601