1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/slab.h>
4 #include <linux/delay.h>
5 #include <linux/pci_ids.h>
6
7 #include "adf_accel_devices.h"
8 #include "adf_common_drv.h"
9 #include "icp_qat_hal.h"
10 #include "icp_qat_uclo.h"
11
12 #define BAD_REGADDR 0xffff
13 #define MAX_RETRY_TIMES 10000
14 #define INIT_CTX_ARB_VALUE 0x0
15 #define INIT_CTX_ENABLE_VALUE 0x0
16 #define INIT_PC_VALUE 0x0
17 #define INIT_WAKEUP_EVENTS_VALUE 0x1
18 #define INIT_SIG_EVENTS_VALUE 0x1
19 #define INIT_CCENABLE_VALUE 0x2000
20 #define RST_CSR_QAT_LSB 20
21 #define RST_CSR_AE_LSB 0
22 #define MC_TIMESTAMP_ENABLE (0x1 << 7)
23 #define MIN_RESET_DELAY_US 3
24
25 #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
26 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
27 (~(1 << CE_REG_PAR_ERR_BITPOS)))
28 #define INSERT_IMMED_GPRA_CONST(inst, const_val) \
29 (inst = ((inst & 0xFFFF00C03FFull) | \
30 ((((const_val) << 12) & 0x0FF00000ull) | \
31 (((const_val) << 10) & 0x0003FC00ull))))
32 #define INSERT_IMMED_GPRB_CONST(inst, const_val) \
33 (inst = ((inst & 0xFFFF00FFF00ull) | \
34 ((((const_val) << 12) & 0x0FF00000ull) | \
35 (((const_val) << 0) & 0x000000FFull))))
36
37 #define AE(handle, ae) ((handle)->hal_handle->aes[ae])
38
39 static const u64 inst_4b[] = {
40 0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
41 0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
42 0x0A021000000ull
43 };
44
45 static const u64 inst[] = {
46 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
47 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
48 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
49 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
50 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
51 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
52 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
53 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
54 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
55 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
56 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
57 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
58 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
59 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
60 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
61 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
62 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
63 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
64 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
65 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
66 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
67 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
68 };
69
qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)70 void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
71 unsigned char ae, unsigned int ctx_mask)
72 {
73 AE(handle, ae).live_ctx_mask = ctx_mask;
74 }
75
76 #define CSR_RETRY_TIMES 500
qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int csr)77 static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
78 unsigned char ae, unsigned int csr)
79 {
80 unsigned int iterations = CSR_RETRY_TIMES;
81 int value;
82
83 do {
84 value = GET_AE_CSR(handle, ae, csr);
85 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
86 return value;
87 } while (iterations--);
88
89 pr_err("QAT: Read CSR timeout\n");
90 return 0;
91 }
92
qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int csr,unsigned int value)93 static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
94 unsigned char ae, unsigned int csr,
95 unsigned int value)
96 {
97 unsigned int iterations = CSR_RETRY_TIMES;
98
99 do {
100 SET_AE_CSR(handle, ae, csr, value);
101 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
102 return 0;
103 } while (iterations--);
104
105 pr_err("QAT: Write CSR Timeout\n");
106 return -EFAULT;
107 }
108
qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,unsigned int * events)109 static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
110 unsigned char ae, unsigned char ctx,
111 unsigned int *events)
112 {
113 unsigned int cur_ctx;
114
115 cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
116 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
117 *events = qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT);
118 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
119 }
120
qat_hal_wait_cycles(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int cycles,int chk_inactive)121 static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
122 unsigned char ae, unsigned int cycles,
123 int chk_inactive)
124 {
125 unsigned int base_cnt = 0, cur_cnt = 0;
126 unsigned int csr = (1 << ACS_ABO_BITPOS);
127 int times = MAX_RETRY_TIMES;
128 int elapsed_cycles = 0;
129
130 base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
131 base_cnt &= 0xffff;
132 while ((int)cycles > elapsed_cycles && times--) {
133 if (chk_inactive)
134 csr = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
135
136 cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
137 cur_cnt &= 0xffff;
138 elapsed_cycles = cur_cnt - base_cnt;
139
140 if (elapsed_cycles < 0)
141 elapsed_cycles += 0x10000;
142
143 /* ensure at least 8 time cycles elapsed in wait_cycles */
144 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
145 return 0;
146 }
147 if (times < 0) {
148 pr_err("QAT: wait_num_cycles time out\n");
149 return -EFAULT;
150 }
151 return 0;
152 }
153
154 #define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
155 #define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
156
qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)157 int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
158 unsigned char ae, unsigned char mode)
159 {
160 unsigned int csr, new_csr;
161
162 if (mode != 4 && mode != 8) {
163 pr_err("QAT: bad ctx mode=%d\n", mode);
164 return -EINVAL;
165 }
166
167 /* Sets the acceleration engine context mode to either four or eight */
168 csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
169 csr = IGNORE_W1C_MASK & csr;
170 new_csr = (mode == 4) ?
171 SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
172 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
173 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
174 return 0;
175 }
176
qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)177 int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
178 unsigned char ae, unsigned char mode)
179 {
180 unsigned int csr, new_csr;
181
182 csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
183 csr &= IGNORE_W1C_MASK;
184
185 new_csr = (mode) ?
186 SET_BIT(csr, CE_NN_MODE_BITPOS) :
187 CLR_BIT(csr, CE_NN_MODE_BITPOS);
188
189 if (new_csr != csr)
190 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
191
192 return 0;
193 }
194
qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,enum icp_qat_uof_regtype lm_type,unsigned char mode)195 int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
196 unsigned char ae, enum icp_qat_uof_regtype lm_type,
197 unsigned char mode)
198 {
199 unsigned int csr, new_csr;
200
201 csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
202 csr &= IGNORE_W1C_MASK;
203 switch (lm_type) {
204 case ICP_LMEM0:
205 new_csr = (mode) ?
206 SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
207 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
208 break;
209 case ICP_LMEM1:
210 new_csr = (mode) ?
211 SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
212 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
213 break;
214 case ICP_LMEM2:
215 new_csr = (mode) ?
216 SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
217 CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
218 break;
219 case ICP_LMEM3:
220 new_csr = (mode) ?
221 SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
222 CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
223 break;
224 default:
225 pr_err("QAT: lmType = 0x%x\n", lm_type);
226 return -EINVAL;
227 }
228
229 if (new_csr != csr)
230 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
231 return 0;
232 }
233
qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)234 void qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
235 unsigned char ae, unsigned char mode)
236 {
237 unsigned int csr, new_csr;
238
239 csr = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
240 csr &= IGNORE_W1C_MASK;
241 new_csr = (mode) ?
242 SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
243 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
244 if (new_csr != csr)
245 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
246 }
247
qat_hal_get_reg_addr(unsigned int type,unsigned short reg_num)248 static unsigned short qat_hal_get_reg_addr(unsigned int type,
249 unsigned short reg_num)
250 {
251 unsigned short reg_addr;
252
253 switch (type) {
254 case ICP_GPA_ABS:
255 case ICP_GPB_ABS:
256 reg_addr = 0x80 | (reg_num & 0x7f);
257 break;
258 case ICP_GPA_REL:
259 case ICP_GPB_REL:
260 reg_addr = reg_num & 0x1f;
261 break;
262 case ICP_SR_RD_REL:
263 case ICP_SR_WR_REL:
264 case ICP_SR_REL:
265 reg_addr = 0x180 | (reg_num & 0x1f);
266 break;
267 case ICP_SR_ABS:
268 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
269 break;
270 case ICP_DR_RD_REL:
271 case ICP_DR_WR_REL:
272 case ICP_DR_REL:
273 reg_addr = 0x1c0 | (reg_num & 0x1f);
274 break;
275 case ICP_DR_ABS:
276 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
277 break;
278 case ICP_NEIGH_REL:
279 reg_addr = 0x280 | (reg_num & 0x1f);
280 break;
281 case ICP_LMEM0:
282 reg_addr = 0x200;
283 break;
284 case ICP_LMEM1:
285 reg_addr = 0x220;
286 break;
287 case ICP_LMEM2:
288 reg_addr = 0x2c0;
289 break;
290 case ICP_LMEM3:
291 reg_addr = 0x2e0;
292 break;
293 case ICP_NO_DEST:
294 reg_addr = 0x300 | (reg_num & 0xff);
295 break;
296 default:
297 reg_addr = BAD_REGADDR;
298 break;
299 }
300 return reg_addr;
301 }
302
qat_hal_reset(struct icp_qat_fw_loader_handle * handle)303 void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
304 {
305 unsigned int reset_mask = handle->chip_info->icp_rst_mask;
306 unsigned int reset_csr = handle->chip_info->icp_rst_csr;
307 unsigned int csr_val;
308
309 csr_val = GET_CAP_CSR(handle, reset_csr);
310 csr_val |= reset_mask;
311 SET_CAP_CSR(handle, reset_csr, csr_val);
312 }
313
qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int ae_csr,unsigned int csr_val)314 static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
315 unsigned char ae, unsigned int ctx_mask,
316 unsigned int ae_csr, unsigned int csr_val)
317 {
318 unsigned int ctx, cur_ctx;
319
320 cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
321
322 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
323 if (!(ctx_mask & (1 << ctx)))
324 continue;
325 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
326 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
327 }
328
329 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
330 }
331
qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,unsigned int ae_csr)332 static unsigned int qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
333 unsigned char ae, unsigned char ctx,
334 unsigned int ae_csr)
335 {
336 unsigned int cur_ctx, csr_val;
337
338 cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
339 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
340 csr_val = qat_hal_rd_ae_csr(handle, ae, ae_csr);
341 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
342
343 return csr_val;
344 }
345
qat_hal_put_sig_event(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int events)346 static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
347 unsigned char ae, unsigned int ctx_mask,
348 unsigned int events)
349 {
350 unsigned int ctx, cur_ctx;
351
352 cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
353 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
354 if (!(ctx_mask & (1 << ctx)))
355 continue;
356 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
357 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
358 }
359 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
360 }
361
qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int events)362 static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
363 unsigned char ae, unsigned int ctx_mask,
364 unsigned int events)
365 {
366 unsigned int ctx, cur_ctx;
367
368 cur_ctx = qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER);
369 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
370 if (!(ctx_mask & (1 << ctx)))
371 continue;
372 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
373 qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
374 events);
375 }
376 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
377 }
378
qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle * handle)379 static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
380 {
381 unsigned long ae_mask = handle->hal_handle->ae_mask;
382 unsigned int base_cnt, cur_cnt;
383 unsigned char ae;
384 int times = MAX_RETRY_TIMES;
385
386 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
387 base_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
388 base_cnt &= 0xffff;
389
390 do {
391 cur_cnt = qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT);
392 cur_cnt &= 0xffff;
393 } while (times-- && (cur_cnt == base_cnt));
394
395 if (times < 0) {
396 pr_err("QAT: AE%d is inactive!!\n", ae);
397 return -EFAULT;
398 }
399 }
400
401 return 0;
402 }
403
qat_hal_check_ae_active(struct icp_qat_fw_loader_handle * handle,unsigned int ae)404 int qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
405 unsigned int ae)
406 {
407 unsigned int enable = 0, active = 0;
408
409 enable = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
410 active = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
411 if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
412 (active & (1 << ACS_ABO_BITPOS)))
413 return 1;
414 else
415 return 0;
416 }
417
qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle * handle)418 static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
419 {
420 unsigned long ae_mask = handle->hal_handle->ae_mask;
421 unsigned int misc_ctl_csr, misc_ctl;
422 unsigned char ae;
423
424 misc_ctl_csr = handle->chip_info->misc_ctl_csr;
425 /* stop the timestamp timers */
426 misc_ctl = GET_CAP_CSR(handle, misc_ctl_csr);
427 if (misc_ctl & MC_TIMESTAMP_ENABLE)
428 SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl &
429 (~MC_TIMESTAMP_ENABLE));
430
431 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
432 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
433 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
434 }
435 /* start timestamp timers */
436 SET_CAP_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
437 }
438
439 #define ESRAM_AUTO_TINIT BIT(2)
440 #define ESRAM_AUTO_TINIT_DONE BIT(3)
441 #define ESRAM_AUTO_INIT_USED_CYCLES (1640)
442 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
qat_hal_init_esram(struct icp_qat_fw_loader_handle * handle)443 static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
444 {
445 void __iomem *csr_addr =
446 (void __iomem *)((uintptr_t)handle->hal_ep_csr_addr_v +
447 ESRAM_AUTO_INIT_CSR_OFFSET);
448 unsigned int csr_val;
449 int times = 30;
450
451 if (handle->pci_dev->device != PCI_DEVICE_ID_INTEL_QAT_DH895XCC)
452 return 0;
453
454 csr_val = ADF_CSR_RD(csr_addr, 0);
455 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
456 return 0;
457
458 csr_val = ADF_CSR_RD(csr_addr, 0);
459 csr_val |= ESRAM_AUTO_TINIT;
460 ADF_CSR_WR(csr_addr, 0, csr_val);
461
462 do {
463 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
464 csr_val = ADF_CSR_RD(csr_addr, 0);
465 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
466 if (times < 0) {
467 pr_err("QAT: Fail to init eSram!\n");
468 return -EFAULT;
469 }
470 return 0;
471 }
472
473 #define SHRAM_INIT_CYCLES 2060
qat_hal_clr_reset(struct icp_qat_fw_loader_handle * handle)474 int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
475 {
476 unsigned int clk_csr = handle->chip_info->glb_clk_enable_csr;
477 unsigned int reset_mask = handle->chip_info->icp_rst_mask;
478 unsigned int reset_csr = handle->chip_info->icp_rst_csr;
479 unsigned long ae_mask = handle->hal_handle->ae_mask;
480 unsigned char ae = 0;
481 unsigned int times = 100;
482 unsigned int csr_val;
483
484 /* write to the reset csr */
485 csr_val = GET_CAP_CSR(handle, reset_csr);
486 csr_val &= ~reset_mask;
487 do {
488 SET_CAP_CSR(handle, reset_csr, csr_val);
489 if (!(times--))
490 goto out_err;
491 csr_val = GET_CAP_CSR(handle, reset_csr);
492 csr_val &= reset_mask;
493 } while (csr_val);
494 /* enable clock */
495 csr_val = GET_CAP_CSR(handle, clk_csr);
496 csr_val |= reset_mask;
497 SET_CAP_CSR(handle, clk_csr, csr_val);
498 if (qat_hal_check_ae_alive(handle))
499 goto out_err;
500
501 /* Set undefined power-up/reset states to reasonable default values */
502 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
503 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
504 INIT_CTX_ENABLE_VALUE);
505 qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
506 CTX_STS_INDIRECT,
507 handle->hal_handle->upc_mask &
508 INIT_PC_VALUE);
509 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
510 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
511 qat_hal_put_wakeup_event(handle, ae,
512 ICP_QAT_UCLO_AE_ALL_CTX,
513 INIT_WAKEUP_EVENTS_VALUE);
514 qat_hal_put_sig_event(handle, ae,
515 ICP_QAT_UCLO_AE_ALL_CTX,
516 INIT_SIG_EVENTS_VALUE);
517 }
518 if (qat_hal_init_esram(handle))
519 goto out_err;
520 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
521 goto out_err;
522 qat_hal_reset_timestamp(handle);
523
524 return 0;
525 out_err:
526 pr_err("QAT: failed to get device out of reset\n");
527 return -EFAULT;
528 }
529
qat_hal_disable_ctx(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)530 static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
531 unsigned char ae, unsigned int ctx_mask)
532 {
533 unsigned int ctx;
534
535 ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
536 ctx &= IGNORE_W1C_MASK &
537 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
538 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
539 }
540
qat_hal_parity_64bit(u64 word)541 static u64 qat_hal_parity_64bit(u64 word)
542 {
543 word ^= word >> 1;
544 word ^= word >> 2;
545 word ^= word >> 4;
546 word ^= word >> 8;
547 word ^= word >> 16;
548 word ^= word >> 32;
549 return word & 1;
550 }
551
qat_hal_set_uword_ecc(u64 uword)552 static u64 qat_hal_set_uword_ecc(u64 uword)
553 {
554 u64 bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
555 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
556 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
557 bit6_mask = 0xdaf69a46910ULL;
558
559 /* clear the ecc bits */
560 uword &= ~(0x7fULL << 0x2C);
561 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
562 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
563 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
564 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
565 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
566 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
567 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
568 return uword;
569 }
570
qat_hal_wr_uwords(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,u64 * uword)571 void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
572 unsigned char ae, unsigned int uaddr,
573 unsigned int words_num, u64 *uword)
574 {
575 unsigned int ustore_addr;
576 unsigned int i;
577
578 ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
579 uaddr |= UA_ECS;
580 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
581 for (i = 0; i < words_num; i++) {
582 unsigned int uwrd_lo, uwrd_hi;
583 u64 tmp;
584
585 tmp = qat_hal_set_uword_ecc(uword[i]);
586 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
587 uwrd_hi = (unsigned int)(tmp >> 0x20);
588 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
589 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
590 }
591 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
592 }
593
qat_hal_enable_ctx(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)594 static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
595 unsigned char ae, unsigned int ctx_mask)
596 {
597 unsigned int ctx;
598
599 ctx = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
600 ctx &= IGNORE_W1C_MASK;
601 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
602 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
603 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
604 }
605
qat_hal_clear_xfer(struct icp_qat_fw_loader_handle * handle)606 static void qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
607 {
608 unsigned long ae_mask = handle->hal_handle->ae_mask;
609 unsigned char ae;
610 unsigned short reg;
611
612 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
613 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
614 qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
615 reg, 0);
616 qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
617 reg, 0);
618 }
619 }
620 }
621
qat_hal_clear_gpr(struct icp_qat_fw_loader_handle * handle)622 static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
623 {
624 unsigned long ae_mask = handle->hal_handle->ae_mask;
625 unsigned char ae;
626 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
627 int times = MAX_RETRY_TIMES;
628 unsigned int csr_val = 0;
629 unsigned int savctx = 0;
630 int ret = 0;
631
632 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
633 csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
634 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
635 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
636 csr_val = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
637 csr_val &= IGNORE_W1C_MASK;
638 if (handle->chip_info->nn)
639 csr_val |= CE_NN_MODE;
640
641 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
642 qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
643 (u64 *)inst);
644 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
645 handle->hal_handle->upc_mask &
646 INIT_PC_VALUE);
647 savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
648 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
649 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
650 qat_hal_wr_indr_csr(handle, ae, ctx_mask,
651 CTX_SIG_EVENTS_INDIRECT, 0);
652 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
653 qat_hal_enable_ctx(handle, ae, ctx_mask);
654 }
655 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
656 /* wait for AE to finish */
657 do {
658 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
659 } while (ret && times--);
660
661 if (times < 0) {
662 pr_err("QAT: clear GPR of AE %d failed", ae);
663 return -EINVAL;
664 }
665 qat_hal_disable_ctx(handle, ae, ctx_mask);
666 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
667 savctx & ACS_ACNO);
668 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
669 INIT_CTX_ENABLE_VALUE);
670 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
671 handle->hal_handle->upc_mask &
672 INIT_PC_VALUE);
673 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
674 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
675 qat_hal_put_wakeup_event(handle, ae, ctx_mask,
676 INIT_WAKEUP_EVENTS_VALUE);
677 qat_hal_put_sig_event(handle, ae, ctx_mask,
678 INIT_SIG_EVENTS_VALUE);
679 }
680 return 0;
681 }
682
qat_hal_chip_init(struct icp_qat_fw_loader_handle * handle,struct adf_accel_dev * accel_dev)683 static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle,
684 struct adf_accel_dev *accel_dev)
685 {
686 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
687 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
688 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
689 unsigned int max_en_ae_id = 0;
690 struct adf_bar *sram_bar;
691 unsigned int csr_val = 0;
692 unsigned long ae_mask;
693 unsigned char ae = 0;
694 int ret = 0;
695
696 handle->pci_dev = pci_info->pci_dev;
697 switch (handle->pci_dev->device) {
698 case PCI_DEVICE_ID_INTEL_QAT_4XXX:
699 case PCI_DEVICE_ID_INTEL_QAT_401XX:
700 case PCI_DEVICE_ID_INTEL_QAT_402XX:
701 case PCI_DEVICE_ID_INTEL_QAT_420XX:
702 case PCI_DEVICE_ID_INTEL_QAT_6XXX:
703 handle->chip_info->mmp_sram_size = 0;
704 handle->chip_info->nn = false;
705 handle->chip_info->lm2lm3 = true;
706 handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X;
707 handle->chip_info->icp_rst_csr = ICP_RESET_CPP0;
708 if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_420XX)
709 handle->chip_info->icp_rst_mask = 0x100155;
710 else
711 handle->chip_info->icp_rst_mask = 0x100015;
712 handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0;
713 handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX;
714 handle->chip_info->wakeup_event_val = 0x80000000;
715 handle->chip_info->fw_auth = true;
716 handle->chip_info->css_3k = true;
717 if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) {
718 handle->chip_info->dual_sign = true;
719 handle->chip_info->reset_delay_us = MIN_RESET_DELAY_US;
720 }
721 handle->chip_info->tgroup_share_ustore = true;
722 handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
723 handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
724 handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI_4XXX;
725 handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO_4XXX;
726 handle->chip_info->fcu_loaded_ae_csr = FCU_AE_LOADED_4XXX;
727 handle->chip_info->fcu_loaded_ae_pos = 0;
728
729 handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET_4XXX;
730 handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET_4XXX;
731 handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET_4XXX;
732 handle->hal_cap_ae_local_csr_addr_v =
733 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
734 + LOCAL_TO_XFER_REG_OFFSET);
735 break;
736 case PCI_DEVICE_ID_INTEL_QAT_C62X:
737 case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
738 handle->chip_info->mmp_sram_size = 0;
739 handle->chip_info->nn = true;
740 handle->chip_info->lm2lm3 = false;
741 handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
742 handle->chip_info->icp_rst_csr = ICP_RESET;
743 handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
744 (hw_data->accel_mask << RST_CSR_QAT_LSB);
745 handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
746 handle->chip_info->misc_ctl_csr = MISC_CONTROL;
747 handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
748 handle->chip_info->fw_auth = true;
749 handle->chip_info->css_3k = false;
750 handle->chip_info->tgroup_share_ustore = false;
751 handle->chip_info->fcu_ctl_csr = FCU_CONTROL;
752 handle->chip_info->fcu_sts_csr = FCU_STATUS;
753 handle->chip_info->fcu_dram_addr_hi = FCU_DRAM_ADDR_HI;
754 handle->chip_info->fcu_dram_addr_lo = FCU_DRAM_ADDR_LO;
755 handle->chip_info->fcu_loaded_ae_csr = FCU_STATUS;
756 handle->chip_info->fcu_loaded_ae_pos = FCU_LOADED_AE_POS;
757 handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
758 handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
759 handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
760 handle->hal_cap_ae_local_csr_addr_v =
761 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
762 + LOCAL_TO_XFER_REG_OFFSET);
763 break;
764 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
765 handle->chip_info->mmp_sram_size = 0x40000;
766 handle->chip_info->nn = true;
767 handle->chip_info->lm2lm3 = false;
768 handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG;
769 handle->chip_info->icp_rst_csr = ICP_RESET;
770 handle->chip_info->icp_rst_mask = (hw_data->ae_mask << RST_CSR_AE_LSB) |
771 (hw_data->accel_mask << RST_CSR_QAT_LSB);
772 handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE;
773 handle->chip_info->misc_ctl_csr = MISC_CONTROL;
774 handle->chip_info->wakeup_event_val = WAKEUP_EVENT;
775 handle->chip_info->fw_auth = false;
776 handle->chip_info->css_3k = false;
777 handle->chip_info->tgroup_share_ustore = false;
778 handle->chip_info->fcu_ctl_csr = 0;
779 handle->chip_info->fcu_sts_csr = 0;
780 handle->chip_info->fcu_dram_addr_hi = 0;
781 handle->chip_info->fcu_dram_addr_lo = 0;
782 handle->chip_info->fcu_loaded_ae_csr = 0;
783 handle->chip_info->fcu_loaded_ae_pos = 0;
784 handle->hal_cap_g_ctl_csr_addr_v = pmisc_addr + ICP_QAT_CAP_OFFSET;
785 handle->hal_cap_ae_xfer_csr_addr_v = pmisc_addr + ICP_QAT_AE_OFFSET;
786 handle->hal_ep_csr_addr_v = pmisc_addr + ICP_QAT_EP_OFFSET;
787 handle->hal_cap_ae_local_csr_addr_v =
788 (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v
789 + LOCAL_TO_XFER_REG_OFFSET);
790 break;
791 default:
792 ret = -EINVAL;
793 goto out_err;
794 }
795
796 if (handle->chip_info->mmp_sram_size > 0) {
797 sram_bar =
798 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
799 handle->hal_sram_addr_v = sram_bar->virt_addr;
800 }
801 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
802 handle->hal_handle->ae_mask = hw_data->ae_mask;
803 handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
804 handle->hal_handle->slice_mask = hw_data->accel_mask;
805 handle->cfg_ae_mask = ALL_AE_MASK;
806 /* create AE objects */
807 handle->hal_handle->upc_mask = 0x1ffff;
808 handle->hal_handle->max_ustore = 0x4000;
809
810 ae_mask = handle->hal_handle->ae_mask;
811 for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE) {
812 handle->hal_handle->aes[ae].free_addr = 0;
813 handle->hal_handle->aes[ae].free_size =
814 handle->hal_handle->max_ustore;
815 handle->hal_handle->aes[ae].ustore_size =
816 handle->hal_handle->max_ustore;
817 handle->hal_handle->aes[ae].live_ctx_mask =
818 ICP_QAT_UCLO_AE_ALL_CTX;
819 max_en_ae_id = ae;
820 }
821 handle->hal_handle->ae_max_num = max_en_ae_id + 1;
822
823 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
824 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
825 csr_val = qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE);
826 csr_val |= 0x1;
827 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
828 }
829 out_err:
830 return ret;
831 }
832
qat_hal_init(struct adf_accel_dev * accel_dev)833 int qat_hal_init(struct adf_accel_dev *accel_dev)
834 {
835 struct icp_qat_fw_loader_handle *handle;
836 int ret = 0;
837
838 handle = kzalloc_obj(*handle);
839 if (!handle)
840 return -ENOMEM;
841
842 handle->hal_handle = kzalloc_obj(*handle->hal_handle);
843 if (!handle->hal_handle) {
844 ret = -ENOMEM;
845 goto out_hal_handle;
846 }
847
848 handle->chip_info = kzalloc_obj(*handle->chip_info);
849 if (!handle->chip_info) {
850 ret = -ENOMEM;
851 goto out_chip_info;
852 }
853
854 ret = qat_hal_chip_init(handle, accel_dev);
855 if (ret) {
856 dev_err(&GET_DEV(accel_dev), "qat_hal_chip_init error\n");
857 goto out_err;
858 }
859
860 /* take all AEs out of reset */
861 ret = qat_hal_clr_reset(handle);
862 if (ret) {
863 dev_err(&GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
864 goto out_err;
865 }
866
867 qat_hal_clear_xfer(handle);
868 if (!handle->chip_info->fw_auth) {
869 ret = qat_hal_clear_gpr(handle);
870 if (ret)
871 goto out_err;
872 }
873
874 accel_dev->fw_loader->fw_loader = handle;
875 return 0;
876
877 out_err:
878 kfree(handle->chip_info);
879 out_chip_info:
880 kfree(handle->hal_handle);
881 out_hal_handle:
882 kfree(handle);
883 return ret;
884 }
885
qat_hal_deinit(struct icp_qat_fw_loader_handle * handle)886 void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
887 {
888 if (!handle)
889 return;
890 kfree(handle->chip_info);
891 kfree(handle->hal_handle);
892 kfree(handle);
893 }
894
qat_hal_start(struct icp_qat_fw_loader_handle * handle)895 int qat_hal_start(struct icp_qat_fw_loader_handle *handle)
896 {
897 unsigned long ae_mask = handle->hal_handle->ae_mask;
898 u32 wakeup_val = handle->chip_info->wakeup_event_val;
899 u32 fcu_ctl_csr, fcu_sts_csr;
900 unsigned int fcu_sts;
901 unsigned char ae;
902 u32 ae_ctr = 0;
903 int retry = 0;
904
905 if (handle->chip_info->fw_auth) {
906 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
907 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
908 ae_ctr = hweight32(ae_mask);
909 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
910 do {
911 msleep(FW_AUTH_WAIT_PERIOD);
912 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
913 if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
914 return ae_ctr;
915 } while (retry++ < FW_AUTH_MAX_RETRY);
916 pr_err("QAT: start error (FCU_STS = 0x%x)\n", fcu_sts);
917 return 0;
918 } else {
919 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
920 qat_hal_put_wakeup_event(handle, ae, 0, wakeup_val);
921 qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
922 ae_ctr++;
923 }
924 return ae_ctr;
925 }
926 }
927
qat_hal_stop(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)928 void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
929 unsigned int ctx_mask)
930 {
931 if (!handle->chip_info->fw_auth)
932 qat_hal_disable_ctx(handle, ae, ctx_mask);
933 }
934
qat_hal_set_pc(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int upc)935 void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
936 unsigned char ae, unsigned int ctx_mask, unsigned int upc)
937 {
938 qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
939 handle->hal_handle->upc_mask & upc);
940 }
941
qat_hal_get_uwords(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,u64 * uword)942 static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
943 unsigned char ae, unsigned int uaddr,
944 unsigned int words_num, u64 *uword)
945 {
946 unsigned int i, uwrd_lo, uwrd_hi;
947 unsigned int ustore_addr, misc_control;
948
949 misc_control = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
950 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
951 misc_control & 0xfffffffb);
952 ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
953 uaddr |= UA_ECS;
954 for (i = 0; i < words_num; i++) {
955 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
956 uaddr++;
957 uwrd_lo = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER);
958 uwrd_hi = qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER);
959 uword[i] = uwrd_hi;
960 uword[i] = (uword[i] << 0x20) | uwrd_lo;
961 }
962 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
963 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
964 }
965
qat_hal_wr_umem(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,unsigned int * data)966 void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
967 unsigned char ae, unsigned int uaddr,
968 unsigned int words_num, unsigned int *data)
969 {
970 unsigned int i, ustore_addr;
971
972 ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
973 uaddr |= UA_ECS;
974 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
975 for (i = 0; i < words_num; i++) {
976 unsigned int uwrd_lo, uwrd_hi, tmp;
977
978 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
979 ((data[i] & 0xff00) << 2) |
980 (0x3 << 8) | (data[i] & 0xff);
981 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
982 uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
983 tmp = ((data[i] >> 0x10) & 0xffff);
984 uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
985 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
986 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
987 }
988 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
989 }
990
991 #define MAX_EXEC_INST 100
qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,u64 * micro_inst,unsigned int inst_num,int code_off,unsigned int max_cycle,unsigned int * endpc)992 static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
993 unsigned char ae, unsigned char ctx,
994 u64 *micro_inst, unsigned int inst_num,
995 int code_off, unsigned int max_cycle,
996 unsigned int *endpc)
997 {
998 unsigned int ind_lm_addr_byte0 = 0, ind_lm_addr_byte1 = 0;
999 unsigned int ind_lm_addr_byte2 = 0, ind_lm_addr_byte3 = 0;
1000 unsigned int ind_t_index = 0, ind_t_index_byte = 0;
1001 unsigned int ind_lm_addr0 = 0, ind_lm_addr1 = 0;
1002 unsigned int ind_lm_addr2 = 0, ind_lm_addr3 = 0;
1003 u64 savuwords[MAX_EXEC_INST];
1004 unsigned int ind_cnt_sig;
1005 unsigned int ind_sig, act_sig;
1006 unsigned int csr_val = 0, newcsr_val;
1007 unsigned int savctx;
1008 unsigned int savcc, wakeup_events, savpc;
1009 unsigned int ctxarb_ctl, ctx_enables;
1010
1011 if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
1012 pr_err("QAT: invalid instruction num %d\n", inst_num);
1013 return -EINVAL;
1014 }
1015 /* save current context */
1016 ind_lm_addr0 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT);
1017 ind_lm_addr1 = qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT);
1018 ind_lm_addr_byte0 = qat_hal_rd_indr_csr(handle, ae, ctx,
1019 INDIRECT_LM_ADDR_0_BYTE_INDEX);
1020 ind_lm_addr_byte1 = qat_hal_rd_indr_csr(handle, ae, ctx,
1021 INDIRECT_LM_ADDR_1_BYTE_INDEX);
1022 if (handle->chip_info->lm2lm3) {
1023 ind_lm_addr2 = qat_hal_rd_indr_csr(handle, ae, ctx,
1024 LM_ADDR_2_INDIRECT);
1025 ind_lm_addr3 = qat_hal_rd_indr_csr(handle, ae, ctx,
1026 LM_ADDR_3_INDIRECT);
1027 ind_lm_addr_byte2 = qat_hal_rd_indr_csr(handle, ae, ctx,
1028 INDIRECT_LM_ADDR_2_BYTE_INDEX);
1029 ind_lm_addr_byte3 = qat_hal_rd_indr_csr(handle, ae, ctx,
1030 INDIRECT_LM_ADDR_3_BYTE_INDEX);
1031 ind_t_index = qat_hal_rd_indr_csr(handle, ae, ctx,
1032 INDIRECT_T_INDEX);
1033 ind_t_index_byte = qat_hal_rd_indr_csr(handle, ae, ctx,
1034 INDIRECT_T_INDEX_BYTE_INDEX);
1035 }
1036 if (inst_num <= MAX_EXEC_INST)
1037 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
1038 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
1039 savpc = qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT);
1040 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
1041 ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1042 ctx_enables &= IGNORE_W1C_MASK;
1043 savcc = qat_hal_rd_ae_csr(handle, ae, CC_ENABLE);
1044 savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
1045 ctxarb_ctl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
1046 ind_cnt_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
1047 FUTURE_COUNT_SIGNAL_INDIRECT);
1048 ind_sig = qat_hal_rd_indr_csr(handle, ae, ctx,
1049 CTX_SIG_EVENTS_INDIRECT);
1050 act_sig = qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE);
1051 /* execute micro codes */
1052 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1053 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
1054 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
1055 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
1056 if (code_off)
1057 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
1058 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
1059 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
1060 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
1061 qat_hal_enable_ctx(handle, ae, (1 << ctx));
1062 /* wait for micro codes to finish */
1063 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
1064 return -EFAULT;
1065 if (endpc) {
1066 unsigned int ctx_status;
1067
1068 ctx_status = qat_hal_rd_indr_csr(handle, ae, ctx,
1069 CTX_STS_INDIRECT);
1070 *endpc = ctx_status & handle->hal_handle->upc_mask;
1071 }
1072 /* retore to saved context */
1073 qat_hal_disable_ctx(handle, ae, (1 << ctx));
1074 if (inst_num <= MAX_EXEC_INST)
1075 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
1076 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
1077 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
1078 handle->hal_handle->upc_mask & savpc);
1079 csr_val = qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL);
1080 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1081 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1082 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
1083 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
1084 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
1085 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
1086 LM_ADDR_0_INDIRECT, ind_lm_addr0);
1087 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
1088 LM_ADDR_1_INDIRECT, ind_lm_addr1);
1089 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
1090 INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
1091 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
1092 INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
1093 if (handle->chip_info->lm2lm3) {
1094 qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_2_INDIRECT,
1095 ind_lm_addr2);
1096 qat_hal_wr_indr_csr(handle, ae, BIT(ctx), LM_ADDR_3_INDIRECT,
1097 ind_lm_addr3);
1098 qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
1099 INDIRECT_LM_ADDR_2_BYTE_INDEX,
1100 ind_lm_addr_byte2);
1101 qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
1102 INDIRECT_LM_ADDR_3_BYTE_INDEX,
1103 ind_lm_addr_byte3);
1104 qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
1105 INDIRECT_T_INDEX, ind_t_index);
1106 qat_hal_wr_indr_csr(handle, ae, BIT(ctx),
1107 INDIRECT_T_INDEX_BYTE_INDEX,
1108 ind_t_index_byte);
1109 }
1110 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
1111 FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
1112 qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
1113 CTX_SIG_EVENTS_INDIRECT, ind_sig);
1114 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
1115 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1116
1117 return 0;
1118 }
1119
qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int * data)1120 static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
1121 unsigned char ae, unsigned char ctx,
1122 enum icp_qat_uof_regtype reg_type,
1123 unsigned short reg_num, unsigned int *data)
1124 {
1125 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
1126 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
1127 unsigned short reg_addr;
1128 int status = 0;
1129 u64 insts, savuword;
1130
1131 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1132 if (reg_addr == BAD_REGADDR) {
1133 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
1134 return -EINVAL;
1135 }
1136 switch (reg_type) {
1137 case ICP_GPA_REL:
1138 insts = 0xA070000000ull | (reg_addr & 0x3ff);
1139 break;
1140 default:
1141 insts = (u64)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
1142 break;
1143 }
1144 savctx = qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS);
1145 ctxarb_cntl = qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL);
1146 ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1147 ctx_enables &= IGNORE_W1C_MASK;
1148 if (ctx != (savctx & ACS_ACNO))
1149 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
1150 ctx & ACS_ACNO);
1151 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
1152 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1153 ustore_addr = qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS);
1154 uaddr = UA_ECS;
1155 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1156 insts = qat_hal_set_uword_ecc(insts);
1157 uwrd_lo = (unsigned int)(insts & 0xffffffff);
1158 uwrd_hi = (unsigned int)(insts >> 0x20);
1159 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1160 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1161 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1162 /* delay for at least 8 cycles */
1163 qat_hal_wait_cycles(handle, ae, 0x8, 0);
1164 /*
1165 * read ALU output
1166 * the instruction should have been executed
1167 * prior to clearing the ECS in putUwords
1168 */
1169 *data = qat_hal_rd_ae_csr(handle, ae, ALU_OUT);
1170 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1171 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
1172 if (ctx != (savctx & ACS_ACNO))
1173 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
1174 savctx & ACS_ACNO);
1175 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
1176 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1177
1178 return status;
1179 }
1180
qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int data)1181 static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
1182 unsigned char ae, unsigned char ctx,
1183 enum icp_qat_uof_regtype reg_type,
1184 unsigned short reg_num, unsigned int data)
1185 {
1186 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
1187 u64 insts[] = {
1188 0x0F440000000ull,
1189 0x0F040000000ull,
1190 0x0F0000C0300ull,
1191 0x0E000010000ull
1192 };
1193 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1194 const int imm_w1 = 0, imm_w0 = 1;
1195
1196 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1197 if (dest_addr == BAD_REGADDR) {
1198 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1199 return -EINVAL;
1200 }
1201
1202 data16lo = 0xffff & data;
1203 data16hi = 0xffff & (data >> 0x10);
1204 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1205 (0xff & data16hi));
1206 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
1207 (0xff & data16lo));
1208 switch (reg_type) {
1209 case ICP_GPA_REL:
1210 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1211 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1212 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1213 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1214 break;
1215 default:
1216 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1217 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1218
1219 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1220 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1221 break;
1222 }
1223
1224 return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
1225 code_off, num_inst * 0x5, NULL);
1226 }
1227
qat_hal_get_ins_num(void)1228 int qat_hal_get_ins_num(void)
1229 {
1230 return ARRAY_SIZE(inst_4b);
1231 }
1232
qat_hal_concat_micro_code(u64 * micro_inst,unsigned int inst_num,unsigned int size,unsigned int addr,unsigned int * value)1233 static int qat_hal_concat_micro_code(u64 *micro_inst,
1234 unsigned int inst_num, unsigned int size,
1235 unsigned int addr, unsigned int *value)
1236 {
1237 int i;
1238 unsigned int cur_value;
1239 const u64 *inst_arr;
1240 int fixup_offset;
1241 int usize = 0;
1242 int orig_num;
1243
1244 orig_num = inst_num;
1245 cur_value = value[0];
1246 inst_arr = inst_4b;
1247 usize = ARRAY_SIZE(inst_4b);
1248 fixup_offset = inst_num;
1249 for (i = 0; i < usize; i++)
1250 micro_inst[inst_num++] = inst_arr[i];
1251 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1252 fixup_offset++;
1253 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1254 fixup_offset++;
1255 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1256 fixup_offset++;
1257 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1258
1259 return inst_num - orig_num;
1260 }
1261
qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,int * pfirst_exec,u64 * micro_inst,unsigned int inst_num)1262 static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1263 unsigned char ae, unsigned char ctx,
1264 int *pfirst_exec, u64 *micro_inst,
1265 unsigned int inst_num)
1266 {
1267 int stat = 0;
1268 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1269 unsigned int gprb0 = 0, gprb1 = 0;
1270
1271 if (*pfirst_exec) {
1272 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1273 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1274 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1275 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1276 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1277 *pfirst_exec = 0;
1278 }
1279 stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
1280 inst_num * 0x5, NULL);
1281 if (stat != 0)
1282 return -EFAULT;
1283 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1284 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1285 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1286 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1287 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1288
1289 return 0;
1290 }
1291
qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle * handle,unsigned char ae,struct icp_qat_uof_batch_init * lm_init_header)1292 int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1293 unsigned char ae,
1294 struct icp_qat_uof_batch_init *lm_init_header)
1295 {
1296 struct icp_qat_uof_batch_init *plm_init;
1297 u64 *micro_inst_arry;
1298 int micro_inst_num;
1299 int alloc_inst_size;
1300 int first_exec = 1;
1301 int stat = 0;
1302
1303 plm_init = lm_init_header->next;
1304 alloc_inst_size = lm_init_header->size;
1305 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1306 alloc_inst_size = handle->hal_handle->max_ustore;
1307 micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(u64),
1308 GFP_KERNEL);
1309 if (!micro_inst_arry)
1310 return -ENOMEM;
1311 micro_inst_num = 0;
1312 while (plm_init) {
1313 unsigned int addr, *value, size;
1314
1315 ae = plm_init->ae;
1316 addr = plm_init->addr;
1317 value = plm_init->value;
1318 size = plm_init->size;
1319 micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
1320 micro_inst_num,
1321 size, addr, value);
1322 plm_init = plm_init->next;
1323 }
1324 /* exec micro codes */
1325 if (micro_inst_arry && micro_inst_num > 0) {
1326 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1327 stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
1328 micro_inst_arry,
1329 micro_inst_num);
1330 }
1331 kfree(micro_inst_arry);
1332 return stat;
1333 }
1334
qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int val)1335 static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1336 unsigned char ae, unsigned char ctx,
1337 enum icp_qat_uof_regtype reg_type,
1338 unsigned short reg_num, unsigned int val)
1339 {
1340 int status = 0;
1341 unsigned int reg_addr;
1342 unsigned int ctx_enables;
1343 unsigned short mask;
1344 unsigned short dr_offset = 0x10;
1345
1346 ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1347 if (CE_INUSE_CONTEXTS & ctx_enables) {
1348 if (ctx & 0x1) {
1349 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1350 return -EINVAL;
1351 }
1352 mask = 0x1f;
1353 dr_offset = 0x20;
1354 } else {
1355 mask = 0x0f;
1356 }
1357 if (reg_num & ~mask)
1358 return -EINVAL;
1359 reg_addr = reg_num + (ctx << 0x5);
1360 switch (reg_type) {
1361 case ICP_SR_RD_REL:
1362 case ICP_SR_REL:
1363 SET_AE_XFER(handle, ae, reg_addr, val);
1364 break;
1365 case ICP_DR_RD_REL:
1366 case ICP_DR_REL:
1367 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1368 break;
1369 default:
1370 status = -EINVAL;
1371 break;
1372 }
1373 return status;
1374 }
1375
qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int data)1376 static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1377 unsigned char ae, unsigned char ctx,
1378 enum icp_qat_uof_regtype reg_type,
1379 unsigned short reg_num, unsigned int data)
1380 {
1381 unsigned int gprval, ctx_enables;
1382 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1383 data16low;
1384 unsigned short reg_mask;
1385 int status = 0;
1386 u64 micro_inst[] = {
1387 0x0F440000000ull,
1388 0x0F040000000ull,
1389 0x0A000000000ull,
1390 0x0F0000C0300ull,
1391 0x0E000010000ull
1392 };
1393 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1394 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1395
1396 ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1397 if (CE_INUSE_CONTEXTS & ctx_enables) {
1398 if (ctx & 0x1) {
1399 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1400 return -EINVAL;
1401 }
1402 reg_mask = (unsigned short)~0x1f;
1403 } else {
1404 reg_mask = (unsigned short)~0xf;
1405 }
1406 if (reg_num & reg_mask)
1407 return -EINVAL;
1408 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1409 if (xfr_addr == BAD_REGADDR) {
1410 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1411 return -EINVAL;
1412 }
1413 status = qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1414 if (status) {
1415 pr_err("QAT: failed to read register");
1416 return status;
1417 }
1418 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1419 data16low = 0xffff & data;
1420 data16hi = 0xffff & (data >> 0x10);
1421 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1422 (unsigned short)(0xff & data16hi));
1423 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1424 (unsigned short)(0xff & data16low));
1425 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1426 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1427 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1428 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1429 micro_inst[0x2] = micro_inst[0x2] |
1430 ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
1431 status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
1432 code_off, dly, NULL);
1433 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1434 return status;
1435 }
1436
qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,unsigned short nn,unsigned int val)1437 static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1438 unsigned char ae, unsigned char ctx,
1439 unsigned short nn, unsigned int val)
1440 {
1441 unsigned int ctx_enables;
1442 int stat = 0;
1443
1444 ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1445 ctx_enables &= IGNORE_W1C_MASK;
1446 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1447
1448 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1449 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1450 return stat;
1451 }
1452
qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned short absreg_num,unsigned short * relreg,unsigned char * ctx)1453 static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
1454 *handle, unsigned char ae,
1455 unsigned short absreg_num,
1456 unsigned short *relreg,
1457 unsigned char *ctx)
1458 {
1459 unsigned int ctx_enables;
1460
1461 ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
1462 if (ctx_enables & CE_INUSE_CONTEXTS) {
1463 /* 4-ctx mode */
1464 *relreg = absreg_num & 0x1F;
1465 *ctx = (absreg_num >> 0x4) & 0x6;
1466 } else {
1467 /* 8-ctx mode */
1468 *relreg = absreg_num & 0x0F;
1469 *ctx = (absreg_num >> 0x4) & 0x7;
1470 }
1471 return 0;
1472 }
1473
qat_hal_init_gpr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int regdata)1474 int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1475 unsigned char ae, unsigned long ctx_mask,
1476 enum icp_qat_uof_regtype reg_type,
1477 unsigned short reg_num, unsigned int regdata)
1478 {
1479 int stat = 0;
1480 unsigned short reg;
1481 unsigned char ctx = 0;
1482 enum icp_qat_uof_regtype type;
1483
1484 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1485 return -EINVAL;
1486
1487 do {
1488 if (ctx_mask == 0) {
1489 qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
1490 &ctx);
1491 type = reg_type - 1;
1492 } else {
1493 reg = reg_num;
1494 type = reg_type;
1495 if (!test_bit(ctx, &ctx_mask))
1496 continue;
1497 }
1498 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1499 if (stat) {
1500 pr_err("QAT: write gpr fail\n");
1501 return -EINVAL;
1502 }
1503 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1504
1505 return 0;
1506 }
1507
qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int regdata)1508 int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1509 unsigned char ae, unsigned long ctx_mask,
1510 enum icp_qat_uof_regtype reg_type,
1511 unsigned short reg_num, unsigned int regdata)
1512 {
1513 int stat = 0;
1514 unsigned short reg;
1515 unsigned char ctx = 0;
1516 enum icp_qat_uof_regtype type;
1517
1518 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1519 return -EINVAL;
1520
1521 do {
1522 if (ctx_mask == 0) {
1523 qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
1524 &ctx);
1525 type = reg_type - 3;
1526 } else {
1527 reg = reg_num;
1528 type = reg_type;
1529 if (!test_bit(ctx, &ctx_mask))
1530 continue;
1531 }
1532 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
1533 regdata);
1534 if (stat) {
1535 pr_err("QAT: write wr xfer fail\n");
1536 return -EINVAL;
1537 }
1538 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1539
1540 return 0;
1541 }
1542
qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int regdata)1543 int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1544 unsigned char ae, unsigned long ctx_mask,
1545 enum icp_qat_uof_regtype reg_type,
1546 unsigned short reg_num, unsigned int regdata)
1547 {
1548 int stat = 0;
1549 unsigned short reg;
1550 unsigned char ctx = 0;
1551 enum icp_qat_uof_regtype type;
1552
1553 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1554 return -EINVAL;
1555
1556 do {
1557 if (ctx_mask == 0) {
1558 qat_hal_convert_abs_to_rel(handle, ae, reg_num, ®,
1559 &ctx);
1560 type = reg_type - 3;
1561 } else {
1562 reg = reg_num;
1563 type = reg_type;
1564 if (!test_bit(ctx, &ctx_mask))
1565 continue;
1566 }
1567 stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
1568 regdata);
1569 if (stat) {
1570 pr_err("QAT: write rd xfer fail\n");
1571 return -EINVAL;
1572 }
1573 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1574
1575 return 0;
1576 }
1577
qat_hal_init_nn(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,unsigned short reg_num,unsigned int regdata)1578 int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1579 unsigned char ae, unsigned long ctx_mask,
1580 unsigned short reg_num, unsigned int regdata)
1581 {
1582 int stat = 0;
1583 unsigned char ctx;
1584 if (!handle->chip_info->nn) {
1585 dev_err(&handle->pci_dev->dev, "QAT: No next neigh in 0x%x\n",
1586 handle->pci_dev->device);
1587 return -EINVAL;
1588 }
1589
1590 if (ctx_mask == 0)
1591 return -EINVAL;
1592
1593 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
1594 if (!test_bit(ctx, &ctx_mask))
1595 continue;
1596 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1597 if (stat) {
1598 pr_err("QAT: write neigh error\n");
1599 return -EINVAL;
1600 }
1601 }
1602
1603 return 0;
1604 }
1605