1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/bitops.h>
4 #include <linux/iopoll.h>
5 #include <asm/div64.h>
6 #include "adf_accel_devices.h"
7 #include "adf_cfg_services.h"
8 #include "adf_common_drv.h"
9 #include "adf_fw_config.h"
10 #include "adf_gen4_hw_data.h"
11 #include "adf_gen4_pm.h"
12 
adf_gen4_get_accel_mask(struct adf_hw_device_data * self)13 u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
14 {
15 	return ADF_GEN4_ACCELERATORS_MASK;
16 }
17 EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
18 
adf_gen4_get_num_accels(struct adf_hw_device_data * self)19 u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
20 {
21 	return ADF_GEN4_MAX_ACCELERATORS;
22 }
23 EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
24 
adf_gen4_get_num_aes(struct adf_hw_device_data * self)25 u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
26 {
27 	if (!self || !self->ae_mask)
28 		return 0;
29 
30 	return hweight32(self->ae_mask);
31 }
32 EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
33 
adf_gen4_get_misc_bar_id(struct adf_hw_device_data * self)34 u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
35 {
36 	return ADF_GEN4_PMISC_BAR;
37 }
38 EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
39 
adf_gen4_get_etr_bar_id(struct adf_hw_device_data * self)40 u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
41 {
42 	return ADF_GEN4_ETR_BAR;
43 }
44 EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
45 
adf_gen4_get_sram_bar_id(struct adf_hw_device_data * self)46 u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
47 {
48 	return ADF_GEN4_SRAM_BAR;
49 }
50 EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
51 
adf_gen4_get_sku(struct adf_hw_device_data * self)52 enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
53 {
54 	return DEV_SKU_1;
55 }
56 EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
57 
adf_gen4_get_arb_info(struct arb_info * arb_info)58 void adf_gen4_get_arb_info(struct arb_info *arb_info)
59 {
60 	arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
61 	arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
62 	arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
63 }
64 EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
65 
adf_gen4_get_admin_info(struct admin_info * admin_csrs_info)66 void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
67 {
68 	admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
69 	admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
70 	admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
71 }
72 EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
73 
adf_gen4_get_heartbeat_clock(struct adf_hw_device_data * self)74 u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
75 {
76 	/*
77 	 * GEN4 uses KPT counter for HB
78 	 */
79 	return ADF_GEN4_KPT_COUNTER_FREQ;
80 }
81 EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
82 
adf_gen4_enable_error_correction(struct adf_accel_dev * accel_dev)83 void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
84 {
85 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
86 	void __iomem *csr = misc_bar->virt_addr;
87 
88 	/* Enable all in errsou3 except VFLR notification on host */
89 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
90 }
91 EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
92 
adf_gen4_enable_ints(struct adf_accel_dev * accel_dev)93 void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
94 {
95 	void __iomem *addr;
96 
97 	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
98 
99 	/* Enable bundle interrupts */
100 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
101 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
102 
103 	/* Enable misc interrupts */
104 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
105 }
106 EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
107 
adf_gen4_init_device(struct adf_accel_dev * accel_dev)108 int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
109 {
110 	void __iomem *addr;
111 	u32 status;
112 	u32 csr;
113 	int ret;
114 
115 	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
116 
117 	/* Temporarily mask PM interrupt */
118 	csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
119 	csr |= ADF_GEN4_PM_SOU;
120 	ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
121 
122 	/* Set DRV_ACTIVE bit to power up the device */
123 	ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
124 
125 	/* Poll status register to make sure the device is powered up */
126 	ret = read_poll_timeout(ADF_CSR_RD, status,
127 				status & ADF_GEN4_PM_INIT_STATE,
128 				ADF_GEN4_PM_POLL_DELAY_US,
129 				ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
130 				ADF_GEN4_PM_STATUS);
131 	if (ret)
132 		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
133 
134 	return ret;
135 }
136 EXPORT_SYMBOL_GPL(adf_gen4_init_device);
137 
adf_gen4_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)138 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
139 {
140 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
141 	u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
142 	u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
143 
144 	/* Enable watchdog timer for sym and dc */
145 	ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTL_OFFSET, ADF_SSMWDTH_OFFSET, timer_val);
146 
147 	/* Enable watchdog timer for pke */
148 	ADF_CSR_WR64_LO_HI(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET,
149 			   timer_val_pke);
150 }
151 EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
152 
153 /*
154  * The vector routing table is used to select the MSI-X entry to use for each
155  * interrupt source.
156  * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
157  * The final entry corresponds to VF2PF or error interrupts.
158  * This vector table could be used to configure one MSI-X entry to be shared
159  * between multiple interrupt sources.
160  *
161  * The default routing is set to have a one to one correspondence between the
162  * interrupt source and the MSI-X entry used.
163  */
adf_gen4_set_msix_default_rttable(struct adf_accel_dev * accel_dev)164 void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
165 {
166 	void __iomem *csr;
167 	int i;
168 
169 	csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
170 	for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
171 		ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
172 }
173 EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
174 
adf_pfvf_comms_disabled(struct adf_accel_dev * accel_dev)175 int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
176 {
177 	return 0;
178 }
179 EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
180 
reset_ring_pair(void __iomem * csr,u32 bank_number)181 static int reset_ring_pair(void __iomem *csr, u32 bank_number)
182 {
183 	u32 status;
184 	int ret;
185 
186 	/* Write rpresetctl register BIT(0) as 1
187 	 * Since rpresetctl registers have no RW fields, no need to preserve
188 	 * values for other bits. Just write directly.
189 	 */
190 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
191 		   ADF_WQM_CSR_RPRESETCTL_RESET);
192 
193 	/* Read rpresetsts register and wait for rp reset to complete */
194 	ret = read_poll_timeout(ADF_CSR_RD, status,
195 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
196 				ADF_RPRESET_POLL_DELAY_US,
197 				ADF_RPRESET_POLL_TIMEOUT_US, true,
198 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
199 	if (!ret) {
200 		/* When rp reset is done, clear rpresetsts */
201 		ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
202 			   ADF_WQM_CSR_RPRESETSTS_STATUS);
203 	}
204 
205 	return ret;
206 }
207 
adf_gen4_ring_pair_reset(struct adf_accel_dev * accel_dev,u32 bank_number)208 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
209 {
210 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
211 	void __iomem *csr = adf_get_etr_base(accel_dev);
212 	int ret;
213 
214 	if (bank_number >= hw_data->num_banks)
215 		return -EINVAL;
216 
217 	dev_dbg(&GET_DEV(accel_dev),
218 		"ring pair reset for bank:%d\n", bank_number);
219 
220 	ret = reset_ring_pair(csr, bank_number);
221 	if (ret)
222 		dev_err(&GET_DEV(accel_dev),
223 			"ring pair reset failed (timeout)\n");
224 	else
225 		dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
226 
227 	return ret;
228 }
229 EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
230 
231 static const u32 thrd_to_arb_map_dcc[] = {
232 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
233 	0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
234 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
235 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
236 	0x0
237 };
238 
239 static const u16 rp_group_to_arb_mask[] = {
240 	[RP_GROUP_0] = 0x5,
241 	[RP_GROUP_1] = 0xA,
242 };
243 
is_single_service(int service_id)244 static bool is_single_service(int service_id)
245 {
246 	switch (service_id) {
247 	case SVC_DC:
248 	case SVC_SYM:
249 	case SVC_ASYM:
250 		return true;
251 	default:
252 		return false;
253 	}
254 }
255 
adf_gen4_services_supported(unsigned long mask)256 bool adf_gen4_services_supported(unsigned long mask)
257 {
258 	unsigned long num_svc = hweight_long(mask);
259 
260 	if (mask >= BIT(SVC_BASE_COUNT))
261 		return false;
262 
263 	switch (num_svc) {
264 	case ADF_ONE_SERVICE:
265 		return true;
266 	case ADF_TWO_SERVICES:
267 		return !test_bit(SVC_DCC, &mask);
268 	default:
269 		return false;
270 	}
271 }
272 EXPORT_SYMBOL_GPL(adf_gen4_services_supported);
273 
adf_gen4_init_thd2arb_map(struct adf_accel_dev * accel_dev)274 int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
275 {
276 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
277 	u32 *thd2arb_map = hw_data->thd_to_arb_map;
278 	unsigned int ae_cnt, worker_obj_cnt, i, j;
279 	unsigned long ae_mask, thds_mask;
280 	int srv_id, rp_group;
281 	u32 thd2arb_map_base;
282 	u16 arb_mask;
283 
284 	if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
285 	    !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
286 	    !hw_data->uof_get_ae_mask)
287 		return -EFAULT;
288 
289 	srv_id = adf_get_service_enabled(accel_dev);
290 	if (srv_id < 0)
291 		return srv_id;
292 
293 	ae_cnt = hw_data->get_num_aes(hw_data);
294 	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
295 			 ADF_GEN4_ADMIN_ACCELENGINES;
296 
297 	if (srv_id == SVC_DCC) {
298 		if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
299 			return -EINVAL;
300 
301 		memcpy(thd2arb_map, thrd_to_arb_map_dcc,
302 		       array_size(sizeof(*thd2arb_map), ae_cnt));
303 		return 0;
304 	}
305 
306 	for (i = 0; i < worker_obj_cnt; i++) {
307 		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
308 		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
309 		thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
310 		thd2arb_map_base = 0;
311 
312 		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
313 			return -EINVAL;
314 
315 		if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
316 			return -EINVAL;
317 
318 		if (is_single_service(srv_id))
319 			arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
320 				   rp_group_to_arb_mask[RP_GROUP_1];
321 		else
322 			arb_mask = rp_group_to_arb_mask[rp_group];
323 
324 		for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
325 			thd2arb_map_base |= arb_mask << (j * 4);
326 
327 		for_each_set_bit(j, &ae_mask, ae_cnt)
328 			thd2arb_map[j] = thd2arb_map_base;
329 	}
330 	return 0;
331 }
332 EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
333 
adf_gen4_get_ring_to_svc_map(struct adf_accel_dev * accel_dev)334 u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
335 {
336 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
337 	enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
338 	unsigned int ae_mask, start_id, worker_obj_cnt, i;
339 	u16 ring_to_svc_map;
340 	int rp_group;
341 
342 	if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
343 	    !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
344 		return 0;
345 
346 	/* If dcc, all rings handle compression requests */
347 	if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
348 		for (i = 0; i < RP_GROUP_COUNT; i++)
349 			rps[i] = COMP;
350 		goto set_mask;
351 	}
352 
353 	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
354 			 ADF_GEN4_ADMIN_ACCELENGINES;
355 	start_id = worker_obj_cnt - RP_GROUP_COUNT;
356 
357 	for (i = start_id; i < worker_obj_cnt; i++) {
358 		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
359 		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
360 		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
361 			return 0;
362 
363 		switch (hw_data->uof_get_obj_type(accel_dev, i)) {
364 		case ADF_FW_SYM_OBJ:
365 			rps[rp_group] = SYM;
366 			break;
367 		case ADF_FW_ASYM_OBJ:
368 			rps[rp_group] = ASYM;
369 			break;
370 		case ADF_FW_DC_OBJ:
371 			rps[rp_group] = COMP;
372 			break;
373 		default:
374 			rps[rp_group] = 0;
375 			break;
376 		}
377 	}
378 
379 set_mask:
380 	ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
381 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
382 			  rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
383 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
384 
385 	return ring_to_svc_map;
386 }
387 EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
388 
389 /*
390  * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
391  * @accel_dev: Pointer to the device structure
392  * @bank_idx: Offset to the bank within this device
393  * @timeout_ms: Timeout in milliseconds for the operation
394  *
395  * This function tries to quiesce the coalesced interrupt timer of a bank if
396  * it has been enabled and triggered.
397  *
398  * Returns 0 on success, error code otherwise
399  *
400  */
adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev * accel_dev,u32 bank_idx,int timeout_ms)401 int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
402 				     u32 bank_idx, int timeout_ms)
403 {
404 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
405 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
406 	void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
407 	void __iomem *csr_etr = adf_get_etr_base(accel_dev);
408 	u32 int_col_ctl, int_col_mask, int_col_en;
409 	u32 e_stat, intsrc;
410 	u64 wait_us;
411 	int ret;
412 
413 	if (timeout_ms < 0)
414 		return -EINVAL;
415 
416 	int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
417 	int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
418 	if (!(int_col_ctl & int_col_mask))
419 		return 0;
420 
421 	int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
422 	int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
423 
424 	e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
425 	if (!(~e_stat & int_col_en))
426 		return 0;
427 
428 	wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
429 	do_div(wait_us, hw_data->clock_frequency);
430 	wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
431 	dev_dbg(&GET_DEV(accel_dev),
432 		"wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
433 		bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
434 
435 	ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
436 				ADF_COALESCED_POLL_DELAY_US, wait_us, true,
437 				csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
438 	if (ret)
439 		dev_warn(&GET_DEV(accel_dev),
440 			 "coalesced timer for bank %d expired (%llu us)\n",
441 			 bank_idx, wait_us);
442 
443 	return ret;
444 }
445 EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
446 
drain_bank(void __iomem * csr,u32 bank_number,int timeout_us)447 static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
448 {
449 	u32 status;
450 
451 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
452 		   ADF_WQM_CSR_RPRESETCTL_DRAIN);
453 
454 	return read_poll_timeout(ADF_CSR_RD, status,
455 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
456 				ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
457 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
458 }
459 
adf_gen4_bank_drain_finish(struct adf_accel_dev * accel_dev,u32 bank_number)460 void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
461 				u32 bank_number)
462 {
463 	void __iomem *csr = adf_get_etr_base(accel_dev);
464 
465 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
466 		   ADF_WQM_CSR_RPRESETSTS_STATUS);
467 }
468 
adf_gen4_bank_drain_start(struct adf_accel_dev * accel_dev,u32 bank_number,int timeout_us)469 int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
470 			      u32 bank_number, int timeout_us)
471 {
472 	void __iomem *csr = adf_get_etr_base(accel_dev);
473 	int ret;
474 
475 	dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
476 
477 	ret = drain_bank(csr, bank_number, timeout_us);
478 	if (ret)
479 		dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
480 	else
481 		dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
482 
483 	return ret;
484 }
485 
bank_state_save(struct adf_hw_csr_ops * ops,void __iomem * base,u32 bank,struct bank_state * state,u32 num_rings)486 static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
487 			    u32 bank, struct bank_state *state, u32 num_rings)
488 {
489 	u32 i;
490 
491 	state->ringstat0 = ops->read_csr_stat(base, bank);
492 	state->ringuostat = ops->read_csr_uo_stat(base, bank);
493 	state->ringestat = ops->read_csr_e_stat(base, bank);
494 	state->ringnestat = ops->read_csr_ne_stat(base, bank);
495 	state->ringnfstat = ops->read_csr_nf_stat(base, bank);
496 	state->ringfstat = ops->read_csr_f_stat(base, bank);
497 	state->ringcstat0 = ops->read_csr_c_stat(base, bank);
498 	state->iaintflagen = ops->read_csr_int_en(base, bank);
499 	state->iaintflagreg = ops->read_csr_int_flag(base, bank);
500 	state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
501 	state->iaintcolen = ops->read_csr_int_col_en(base, bank);
502 	state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
503 	state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
504 	state->ringexpstat = ops->read_csr_exp_stat(base, bank);
505 	state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
506 	state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
507 
508 	for (i = 0; i < num_rings; i++) {
509 		state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
510 		state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
511 		state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
512 		state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
513 	}
514 }
515 
516 #define CHECK_STAT(op, expect_val, name, args...) \
517 ({ \
518 	u32 __expect_val = (expect_val); \
519 	u32 actual_val = op(args); \
520 	(__expect_val == actual_val) ? 0 : \
521 		(pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
522 			name, __expect_val, actual_val), -EINVAL); \
523 })
524 
bank_state_restore(struct adf_hw_csr_ops * ops,void __iomem * base,u32 bank,struct bank_state * state,u32 num_rings,int tx_rx_gap)525 static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
526 			      u32 bank, struct bank_state *state, u32 num_rings,
527 			      int tx_rx_gap)
528 {
529 	u32 val, tmp_val, i;
530 	int ret;
531 
532 	for (i = 0; i < num_rings; i++)
533 		ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
534 
535 	for (i = 0; i < num_rings; i++)
536 		ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
537 
538 	for (i = 0; i < num_rings / 2; i++) {
539 		int tx = i * (tx_rx_gap + 1);
540 		int rx = tx + tx_rx_gap;
541 
542 		ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
543 		ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
544 
545 		/*
546 		 * The TX ring head needs to be updated again to make sure that
547 		 * the HW will not consider the ring as full when it is empty
548 		 * and the correct state flags are set to match the recovered state.
549 		 */
550 		if (state->ringestat & BIT(tx)) {
551 			val = ops->read_csr_int_srcsel(base, bank);
552 			val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
553 			ops->write_csr_int_srcsel_w_val(base, bank, val);
554 			ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
555 		}
556 
557 		ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
558 		val = ops->read_csr_int_srcsel(base, bank);
559 		val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
560 		ops->write_csr_int_srcsel_w_val(base, bank, val);
561 
562 		ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
563 		val = ops->read_csr_int_srcsel(base, bank);
564 		val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
565 		ops->write_csr_int_srcsel_w_val(base, bank, val);
566 
567 		/*
568 		 * The RX ring tail needs to be updated again to make sure that
569 		 * the HW will not consider the ring as empty when it is full
570 		 * and the correct state flags are set to match the recovered state.
571 		 */
572 		if (state->ringfstat & BIT(rx))
573 			ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
574 	}
575 
576 	ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
577 	ops->write_csr_int_en(base, bank, state->iaintflagen);
578 	ops->write_csr_int_col_en(base, bank, state->iaintcolen);
579 	ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
580 	ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
581 	ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
582 	ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
583 
584 	/* Check that all ring statuses match the saved state. */
585 	ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
586 			 base, bank);
587 	if (ret)
588 		return ret;
589 
590 	ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
591 			 base, bank);
592 	if (ret)
593 		return ret;
594 
595 	ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
596 			 base, bank);
597 	if (ret)
598 		return ret;
599 
600 	ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
601 			 base, bank);
602 	if (ret)
603 		return ret;
604 
605 	ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
606 			 base, bank);
607 	if (ret)
608 		return ret;
609 
610 	ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
611 			 base, bank);
612 	if (ret)
613 		return ret;
614 
615 	tmp_val = ops->read_csr_exp_stat(base, bank);
616 	val = state->ringexpstat;
617 	if (tmp_val && !val) {
618 		pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
619 		return -EINVAL;
620 	}
621 
622 	return 0;
623 }
624 
adf_gen4_bank_state_save(struct adf_accel_dev * accel_dev,u32 bank_number,struct bank_state * state)625 int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
626 			     struct bank_state *state)
627 {
628 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
629 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
630 	void __iomem *csr_base = adf_get_etr_base(accel_dev);
631 
632 	if (bank_number >= hw_data->num_banks || !state)
633 		return -EINVAL;
634 
635 	dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
636 
637 	bank_state_save(csr_ops, csr_base, bank_number, state,
638 			hw_data->num_rings_per_bank);
639 
640 	return 0;
641 }
642 EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
643 
adf_gen4_bank_state_restore(struct adf_accel_dev * accel_dev,u32 bank_number,struct bank_state * state)644 int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
645 				struct bank_state *state)
646 {
647 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
648 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
649 	void __iomem *csr_base = adf_get_etr_base(accel_dev);
650 	int ret;
651 
652 	if (bank_number >= hw_data->num_banks  || !state)
653 		return -EINVAL;
654 
655 	dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
656 
657 	ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
658 				 hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
659 	if (ret)
660 		dev_err(&GET_DEV(accel_dev),
661 			"Unable to restore state of bank %d\n", bank_number);
662 
663 	return ret;
664 }
665 EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
666