1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_admin.h>
6 #include <adf_cfg.h>
7 #include <adf_cfg_services.h>
8 #include <adf_clock.h>
9 #include <adf_common_drv.h>
10 #include <adf_fw_config.h>
11 #include <adf_gen4_config.h>
12 #include <adf_gen4_dc.h>
13 #include <adf_gen4_hw_data.h>
14 #include <adf_gen4_pfvf.h>
15 #include <adf_gen4_pm.h>
16 #include <adf_gen4_ras.h>
17 #include <adf_gen4_timer.h>
18 #include <adf_gen4_tl.h>
19 #include "adf_420xx_hw_data.h"
20 #include "icp_qat_hw.h"
21
22 #define ADF_AE_GROUP_0 GENMASK(3, 0)
23 #define ADF_AE_GROUP_1 GENMASK(7, 4)
24 #define ADF_AE_GROUP_2 GENMASK(11, 8)
25 #define ADF_AE_GROUP_3 GENMASK(15, 12)
26 #define ADF_AE_GROUP_4 BIT(16)
27
28 #define ENA_THD_MASK_ASYM GENMASK(1, 0)
29 #define ENA_THD_MASK_SYM GENMASK(3, 0)
30 #define ENA_THD_MASK_DC GENMASK(1, 0)
31
32 static const char * const adf_420xx_fw_objs[] = {
33 [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ,
34 [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ,
35 [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ,
36 [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ,
37 };
38
39 static const struct adf_fw_config adf_fw_cy_config[] = {
40 {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
41 {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
42 {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
43 {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
44 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
45 };
46
47 static const struct adf_fw_config adf_fw_dc_config[] = {
48 {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
49 {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
50 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
51 };
52
53 static const struct adf_fw_config adf_fw_sym_config[] = {
54 {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ},
55 {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
56 {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
57 {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
58 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
59 };
60
61 static const struct adf_fw_config adf_fw_asym_config[] = {
62 {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
63 {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
64 {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
65 {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
66 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
67 };
68
69 static const struct adf_fw_config adf_fw_asym_dc_config[] = {
70 {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ},
71 {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ},
72 {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
73 {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
74 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
75 };
76
77 static const struct adf_fw_config adf_fw_sym_dc_config[] = {
78 {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ},
79 {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
80 {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
81 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
82 };
83
84 static const struct adf_fw_config adf_fw_dcc_config[] = {
85 {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
86 {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
87 {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ},
88 };
89
90
91 static struct adf_hw_device_class adf_420xx_class = {
92 .name = ADF_420XX_DEVICE_NAME,
93 .type = DEV_420XX,
94 .instances = 0,
95 };
96
get_ae_mask(struct adf_hw_device_data * self)97 static u32 get_ae_mask(struct adf_hw_device_data *self)
98 {
99 u32 me_disable = self->fuses;
100
101 return ~me_disable & ADF_420XX_ACCELENGINES_MASK;
102 }
103
uof_get_num_objs(struct adf_accel_dev * accel_dev)104 static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
105 {
106 switch (adf_get_service_enabled(accel_dev)) {
107 case SVC_CY:
108 case SVC_CY2:
109 return ARRAY_SIZE(adf_fw_cy_config);
110 case SVC_DC:
111 return ARRAY_SIZE(adf_fw_dc_config);
112 case SVC_DCC:
113 return ARRAY_SIZE(adf_fw_dcc_config);
114 case SVC_SYM:
115 return ARRAY_SIZE(adf_fw_sym_config);
116 case SVC_ASYM:
117 return ARRAY_SIZE(adf_fw_asym_config);
118 case SVC_ASYM_DC:
119 case SVC_DC_ASYM:
120 return ARRAY_SIZE(adf_fw_asym_dc_config);
121 case SVC_SYM_DC:
122 case SVC_DC_SYM:
123 return ARRAY_SIZE(adf_fw_sym_dc_config);
124 default:
125 return 0;
126 }
127 }
128
get_fw_config(struct adf_accel_dev * accel_dev)129 static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
130 {
131 switch (adf_get_service_enabled(accel_dev)) {
132 case SVC_CY:
133 case SVC_CY2:
134 return adf_fw_cy_config;
135 case SVC_DC:
136 return adf_fw_dc_config;
137 case SVC_DCC:
138 return adf_fw_dcc_config;
139 case SVC_SYM:
140 return adf_fw_sym_config;
141 case SVC_ASYM:
142 return adf_fw_asym_config;
143 case SVC_ASYM_DC:
144 case SVC_DC_ASYM:
145 return adf_fw_asym_dc_config;
146 case SVC_SYM_DC:
147 case SVC_DC_SYM:
148 return adf_fw_sym_dc_config;
149 default:
150 return NULL;
151 }
152 }
153
update_ae_mask(struct adf_accel_dev * accel_dev)154 static void update_ae_mask(struct adf_accel_dev *accel_dev)
155 {
156 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
157 const struct adf_fw_config *fw_config;
158 u32 config_ae_mask = 0;
159 u32 ae_mask, num_objs;
160 int i;
161
162 ae_mask = get_ae_mask(hw_data);
163
164 /* Modify the AE mask based on the firmware configuration loaded */
165 fw_config = get_fw_config(accel_dev);
166 num_objs = uof_get_num_objs(accel_dev);
167
168 config_ae_mask |= ADF_420XX_ADMIN_AE_MASK;
169 for (i = 0; i < num_objs; i++)
170 config_ae_mask |= fw_config[i].ae_mask;
171
172 hw_data->ae_mask = ae_mask & config_ae_mask;
173 }
174
get_accel_cap(struct adf_accel_dev * accel_dev)175 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
176 {
177 u32 capabilities_sym, capabilities_asym, capabilities_dc;
178 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
179 u32 capabilities_dcc;
180 u32 fusectl1;
181
182 /* As a side effect, update ae_mask based on configuration */
183 update_ae_mask(accel_dev);
184
185 /* Read accelerator capabilities mask */
186 pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
187
188 capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
189 ICP_ACCEL_CAPABILITIES_CIPHER |
190 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
191 ICP_ACCEL_CAPABILITIES_SHA3 |
192 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
193 ICP_ACCEL_CAPABILITIES_HKDF |
194 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
195 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
196 ICP_ACCEL_CAPABILITIES_SM3 |
197 ICP_ACCEL_CAPABILITIES_SM4 |
198 ICP_ACCEL_CAPABILITIES_AES_V2 |
199 ICP_ACCEL_CAPABILITIES_ZUC |
200 ICP_ACCEL_CAPABILITIES_ZUC_256 |
201 ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT |
202 ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
203
204 /* A set bit in fusectl1 means the feature is OFF in this SKU */
205 if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
206 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
207 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
208 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
209 }
210
211 if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
212 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
213 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
214 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
215 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
216 }
217
218 if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
219 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
220 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
221 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
222 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
223 }
224
225 if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
226 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
227 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
228 }
229
230 if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) {
231 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
232 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
233 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT;
234 }
235
236 if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) {
237 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC;
238 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
239 }
240
241 if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE)
242 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256;
243
244 capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
245 ICP_ACCEL_CAPABILITIES_SM2 |
246 ICP_ACCEL_CAPABILITIES_ECEDMONT;
247
248 if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
249 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
250 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
251 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
252 }
253
254 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
255 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
256 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
257 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
258
259 if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
260 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
261 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
262 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
263 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
264 }
265
266 switch (adf_get_service_enabled(accel_dev)) {
267 case SVC_CY:
268 case SVC_CY2:
269 return capabilities_sym | capabilities_asym;
270 case SVC_DC:
271 return capabilities_dc;
272 case SVC_DCC:
273 /*
274 * Sym capabilities are available for chaining operations,
275 * but sym crypto instances cannot be supported
276 */
277 capabilities_dcc = capabilities_dc | capabilities_sym;
278 capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
279 return capabilities_dcc;
280 case SVC_SYM:
281 return capabilities_sym;
282 case SVC_ASYM:
283 return capabilities_asym;
284 case SVC_ASYM_DC:
285 case SVC_DC_ASYM:
286 return capabilities_asym | capabilities_dc;
287 case SVC_SYM_DC:
288 case SVC_DC_SYM:
289 return capabilities_sym | capabilities_dc;
290 default:
291 return 0;
292 }
293 }
294
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)295 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
296 {
297 if (adf_gen4_init_thd2arb_map(accel_dev))
298 dev_warn(&GET_DEV(accel_dev),
299 "Generate of the thread to arbiter map failed");
300
301 return GET_HW_DATA(accel_dev)->thd_to_arb_map;
302 }
303
adf_init_rl_data(struct adf_rl_hw_data * rl_data)304 static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
305 {
306 rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
307 rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
308 rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
309 rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
310 rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
311
312 rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV;
313 rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL;
314 rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION;
315 rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM;
316 rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM;
317 rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC;
318 rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC;
319 rl_data->scale_ref = ADF_420XX_RL_SLICE_REF;
320 }
321
get_rp_group(struct adf_accel_dev * accel_dev,u32 ae_mask)322 static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
323 {
324 switch (ae_mask) {
325 case ADF_AE_GROUP_0:
326 return RP_GROUP_0;
327 case ADF_AE_GROUP_1:
328 case ADF_AE_GROUP_3:
329 return RP_GROUP_1;
330 case ADF_AE_GROUP_2:
331 if (get_fw_config(accel_dev) == adf_fw_cy_config)
332 return RP_GROUP_0;
333 else
334 return RP_GROUP_1;
335 default:
336 dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
337 return -EINVAL;
338 }
339 }
340
get_ena_thd_mask(struct adf_accel_dev * accel_dev,u32 obj_num)341 static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
342 {
343 const struct adf_fw_config *fw_config;
344
345 if (obj_num >= uof_get_num_objs(accel_dev))
346 return ADF_GEN4_ENA_THD_MASK_ERROR;
347
348 fw_config = get_fw_config(accel_dev);
349 if (!fw_config)
350 return ADF_GEN4_ENA_THD_MASK_ERROR;
351
352 switch (fw_config[obj_num].obj) {
353 case ADF_FW_ASYM_OBJ:
354 return ENA_THD_MASK_ASYM;
355 case ADF_FW_SYM_OBJ:
356 return ENA_THD_MASK_SYM;
357 case ADF_FW_DC_OBJ:
358 return ENA_THD_MASK_DC;
359 default:
360 return ADF_GEN4_ENA_THD_MASK_ERROR;
361 }
362 }
363
get_ring_to_svc_map(struct adf_accel_dev * accel_dev)364 static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
365 {
366 enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
367 const struct adf_fw_config *fw_config;
368 u16 ring_to_svc_map;
369 int i, j;
370
371 fw_config = get_fw_config(accel_dev);
372 if (!fw_config)
373 return 0;
374
375 for (i = 0; i < RP_GROUP_COUNT; i++) {
376 switch (fw_config[i].ae_mask) {
377 case ADF_AE_GROUP_0:
378 j = RP_GROUP_0;
379 break;
380 case ADF_AE_GROUP_1:
381 j = RP_GROUP_1;
382 break;
383 default:
384 return 0;
385 }
386
387 switch (fw_config[i].obj) {
388 case ADF_FW_SYM_OBJ:
389 rps[j] = SYM;
390 break;
391 case ADF_FW_ASYM_OBJ:
392 rps[j] = ASYM;
393 break;
394 case ADF_FW_DC_OBJ:
395 rps[j] = COMP;
396 break;
397 default:
398 rps[j] = 0;
399 break;
400 }
401 }
402
403 ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
404 rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
405 rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
406 rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
407
408 return ring_to_svc_map;
409 }
410
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num,const char * const fw_objs[],int num_objs)411 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
412 const char * const fw_objs[], int num_objs)
413 {
414 const struct adf_fw_config *fw_config;
415 int id;
416
417 fw_config = get_fw_config(accel_dev);
418 if (fw_config)
419 id = fw_config[obj_num].obj;
420 else
421 id = -EINVAL;
422
423 if (id < 0 || id > num_objs)
424 return NULL;
425
426 return fw_objs[id];
427 }
428
uof_get_name_420xx(struct adf_accel_dev * accel_dev,u32 obj_num)429 static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num)
430 {
431 int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs);
432
433 return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs);
434 }
435
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)436 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
437 {
438 const struct adf_fw_config *fw_config;
439
440 fw_config = get_fw_config(accel_dev);
441 if (!fw_config)
442 return 0;
443
444 return fw_config[obj_num].ae_mask;
445 }
446
adf_gen4_set_err_mask(struct adf_dev_err_mask * dev_err_mask)447 static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
448 {
449 dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK;
450 dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK;
451 dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK;
452 dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK;
453 dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK;
454 dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK;
455 }
456
adf_init_hw_data_420xx(struct adf_hw_device_data * hw_data,u32 dev_id)457 void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id)
458 {
459 hw_data->dev_class = &adf_420xx_class;
460 hw_data->instance_id = adf_420xx_class.instances++;
461 hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
462 hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
463 hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
464 hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
465 hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES;
466 hw_data->num_logical_accel = 1;
467 hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
468 hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
469 hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
470 hw_data->alloc_irq = adf_isr_resource_alloc;
471 hw_data->free_irq = adf_isr_resource_free;
472 hw_data->enable_error_correction = adf_gen4_enable_error_correction;
473 hw_data->get_accel_mask = adf_gen4_get_accel_mask;
474 hw_data->get_ae_mask = get_ae_mask;
475 hw_data->get_num_accels = adf_gen4_get_num_accels;
476 hw_data->get_num_aes = adf_gen4_get_num_aes;
477 hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
478 hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
479 hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
480 hw_data->get_arb_info = adf_gen4_get_arb_info;
481 hw_data->get_admin_info = adf_gen4_get_admin_info;
482 hw_data->get_accel_cap = get_accel_cap;
483 hw_data->get_sku = adf_gen4_get_sku;
484 hw_data->init_admin_comms = adf_init_admin_comms;
485 hw_data->exit_admin_comms = adf_exit_admin_comms;
486 hw_data->send_admin_init = adf_send_admin_init;
487 hw_data->init_arb = adf_init_arb;
488 hw_data->exit_arb = adf_exit_arb;
489 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
490 hw_data->enable_ints = adf_gen4_enable_ints;
491 hw_data->init_device = adf_gen4_init_device;
492 hw_data->reset_device = adf_reset_flr;
493 hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK;
494 hw_data->num_rps = ADF_GEN4_MAX_RPS;
495 hw_data->fw_name = ADF_420XX_FW;
496 hw_data->fw_mmp_name = ADF_420XX_MMP;
497 hw_data->uof_get_name = uof_get_name_420xx;
498 hw_data->uof_get_num_objs = uof_get_num_objs;
499 hw_data->uof_get_ae_mask = uof_get_ae_mask;
500 hw_data->get_rp_group = get_rp_group;
501 hw_data->get_ena_thd_mask = get_ena_thd_mask;
502 hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
503 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
504 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
505 hw_data->disable_iov = adf_disable_sriov;
506 hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
507 hw_data->enable_pm = adf_gen4_enable_pm;
508 hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
509 hw_data->dev_config = adf_gen4_dev_config;
510 hw_data->start_timer = adf_gen4_timer_start;
511 hw_data->stop_timer = adf_gen4_timer_stop;
512 hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
513 hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
514 hw_data->clock_frequency = ADF_420XX_AE_FREQ;
515
516 adf_gen4_set_err_mask(&hw_data->dev_err_mask);
517 adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
518 adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
519 adf_gen4_init_dc_ops(&hw_data->dc_ops);
520 adf_gen4_init_ras_ops(&hw_data->ras_ops);
521 adf_gen4_init_tl_data(&hw_data->tl_data);
522 adf_init_rl_data(&hw_data->rl_data);
523 }
524
adf_clean_hw_data_420xx(struct adf_hw_device_data * hw_data)525 void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data)
526 {
527 hw_data->dev_class->instances--;
528 }
529