1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 - 2021 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_admin.h>
6 #include <adf_bank_state.h>
7 #include <adf_cfg.h>
8 #include <adf_cfg_services.h>
9 #include <adf_clock.h>
10 #include <adf_common_drv.h>
11 #include <adf_fw_config.h>
12 #include <adf_gen4_config.h>
13 #include <adf_gen4_hw_csr_data.h>
14 #include <adf_gen4_hw_data.h>
15 #include <adf_gen4_pfvf.h>
16 #include <adf_gen4_pm.h>
17 #include "adf_gen4_ras.h"
18 #include <adf_gen4_tl.h>
19 #include <adf_gen4_vf_mig.h>
20 #include <adf_timer.h>
21 #include "adf_4xxx_hw_data.h"
22 #include "icp_qat_hw.h"
23
24 #define ADF_AE_GROUP_0 GENMASK(3, 0)
25 #define ADF_AE_GROUP_1 GENMASK(7, 4)
26 #define ADF_AE_GROUP_2 BIT(8)
27
28 #define ENA_THD_MASK_ASYM GENMASK(1, 0)
29 #define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0)
30 #define ENA_THD_MASK_SYM GENMASK(6, 0)
31 #define ENA_THD_MASK_DC GENMASK(1, 0)
32
33 static const char * const adf_4xxx_fw_objs[] = {
34 [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ,
35 [ADF_FW_ASYM_OBJ] = ADF_4XXX_ASYM_OBJ,
36 [ADF_FW_DC_OBJ] = ADF_4XXX_DC_OBJ,
37 [ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
38 };
39
40 static const char * const adf_402xx_fw_objs[] = {
41 [ADF_FW_SYM_OBJ] = ADF_402XX_SYM_OBJ,
42 [ADF_FW_ASYM_OBJ] = ADF_402XX_ASYM_OBJ,
43 [ADF_FW_DC_OBJ] = ADF_402XX_DC_OBJ,
44 [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
45 };
46
47 static const struct adf_fw_config adf_fw_cy_config[] = {
48 {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
49 {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
50 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
51 };
52
53 static const struct adf_fw_config adf_fw_dc_config[] = {
54 {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
55 {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
56 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
57 };
58
59 static const struct adf_fw_config adf_fw_sym_config[] = {
60 {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
61 {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
62 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
63 };
64
65 static const struct adf_fw_config adf_fw_asym_config[] = {
66 {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
67 {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
68 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
69 };
70
71 static const struct adf_fw_config adf_fw_asym_dc_config[] = {
72 {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
73 {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
74 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
75 };
76
77 static const struct adf_fw_config adf_fw_sym_dc_config[] = {
78 {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
79 {ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
80 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
81 };
82
83 static const struct adf_fw_config adf_fw_dcc_config[] = {
84 {ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
85 {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
86 {ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
87 };
88
89 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
90 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
91 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
92 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
93 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
94 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
95
96 static struct adf_hw_device_class adf_4xxx_class = {
97 .name = ADF_4XXX_DEVICE_NAME,
98 .type = DEV_4XXX,
99 };
100
get_ae_mask(struct adf_hw_device_data * self)101 static u32 get_ae_mask(struct adf_hw_device_data *self)
102 {
103 u32 me_disable = self->fuses[ADF_FUSECTL4];
104
105 return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
106 }
107
get_accel_cap(struct adf_accel_dev * accel_dev)108 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
109 {
110 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
111 u32 capabilities_sym, capabilities_asym, capabilities_dc;
112 u32 capabilities_dcc;
113 u32 fusectl1;
114
115 /* Read accelerator capabilities mask */
116 pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
117
118 capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
119 ICP_ACCEL_CAPABILITIES_CIPHER |
120 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
121 ICP_ACCEL_CAPABILITIES_SHA3 |
122 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
123 ICP_ACCEL_CAPABILITIES_HKDF |
124 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
125 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
126 ICP_ACCEL_CAPABILITIES_SM3 |
127 ICP_ACCEL_CAPABILITIES_SM4 |
128 ICP_ACCEL_CAPABILITIES_AES_V2;
129
130 /* A set bit in fusectl1 means the feature is OFF in this SKU */
131 if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
132 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
133 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
134 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
135 }
136
137 if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
138 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
139 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
140 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
141 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
142 }
143
144 if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
145 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
146 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
147 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
148 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
149 }
150
151 if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
152 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
153 capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
154 }
155
156 capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
157 ICP_ACCEL_CAPABILITIES_CIPHER |
158 ICP_ACCEL_CAPABILITIES_SM2 |
159 ICP_ACCEL_CAPABILITIES_ECEDMONT;
160
161 if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
162 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
163 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
164 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
165 }
166
167 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
168 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
169 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
170 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
171
172 if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
173 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
174 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
175 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
176 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
177 }
178
179 switch (adf_get_service_enabled(accel_dev)) {
180 case SVC_SYM_ASYM:
181 return capabilities_sym | capabilities_asym;
182 case SVC_DC:
183 return capabilities_dc;
184 case SVC_DCC:
185 /*
186 * Sym capabilities are available for chaining operations,
187 * but sym crypto instances cannot be supported
188 */
189 capabilities_dcc = capabilities_dc | capabilities_sym;
190 capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
191 return capabilities_dcc;
192 case SVC_SYM:
193 return capabilities_sym;
194 case SVC_ASYM:
195 return capabilities_asym;
196 case SVC_ASYM_DC:
197 return capabilities_asym | capabilities_dc;
198 case SVC_SYM_DC:
199 return capabilities_sym | capabilities_dc;
200 default:
201 return 0;
202 }
203 }
204
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)205 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
206 {
207 if (adf_gen4_init_thd2arb_map(accel_dev))
208 dev_warn(&GET_DEV(accel_dev),
209 "Failed to generate thread to arbiter mapping");
210
211 return GET_HW_DATA(accel_dev)->thd_to_arb_map;
212 }
213
adf_init_rl_data(struct adf_rl_hw_data * rl_data)214 static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
215 {
216 rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
217 rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
218 rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
219 rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
220 rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
221
222 rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
223 rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
224 rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
225 rl_data->max_tp[SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
226 rl_data->max_tp[SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
227 rl_data->max_tp[SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
228 rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
229 rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
230
231 adf_gen4_init_num_svc_aes(rl_data);
232 }
233
uof_get_num_objs(struct adf_accel_dev * accel_dev)234 static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
235 {
236 return ARRAY_SIZE(adf_fw_cy_config);
237 }
238
get_fw_config(struct adf_accel_dev * accel_dev)239 static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
240 {
241 switch (adf_get_service_enabled(accel_dev)) {
242 case SVC_SYM_ASYM:
243 return adf_fw_cy_config;
244 case SVC_DC:
245 return adf_fw_dc_config;
246 case SVC_DCC:
247 return adf_fw_dcc_config;
248 case SVC_SYM:
249 return adf_fw_sym_config;
250 case SVC_ASYM:
251 return adf_fw_asym_config;
252 case SVC_ASYM_DC:
253 return adf_fw_asym_dc_config;
254 case SVC_SYM_DC:
255 return adf_fw_sym_dc_config;
256 default:
257 return NULL;
258 }
259 }
260
get_rp_group(struct adf_accel_dev * accel_dev,u32 ae_mask)261 static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
262 {
263 switch (ae_mask) {
264 case ADF_AE_GROUP_0:
265 return RP_GROUP_0;
266 case ADF_AE_GROUP_1:
267 return RP_GROUP_1;
268 default:
269 dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
270 return -EINVAL;
271 }
272 }
273
get_ena_thd_mask(struct adf_accel_dev * accel_dev,u32 obj_num)274 static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
275 {
276 const struct adf_fw_config *fw_config;
277
278 if (obj_num >= uof_get_num_objs(accel_dev))
279 return ADF_GEN4_ENA_THD_MASK_ERROR;
280
281 fw_config = get_fw_config(accel_dev);
282 if (!fw_config)
283 return ADF_GEN4_ENA_THD_MASK_ERROR;
284
285 switch (fw_config[obj_num].obj) {
286 case ADF_FW_ASYM_OBJ:
287 return ENA_THD_MASK_ASYM;
288 case ADF_FW_SYM_OBJ:
289 return ENA_THD_MASK_SYM;
290 case ADF_FW_DC_OBJ:
291 return ENA_THD_MASK_DC;
292 default:
293 return ADF_GEN4_ENA_THD_MASK_ERROR;
294 }
295 }
296
get_ena_thd_mask_401xx(struct adf_accel_dev * accel_dev,u32 obj_num)297 static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
298 {
299 const struct adf_fw_config *fw_config;
300
301 if (obj_num >= uof_get_num_objs(accel_dev))
302 return ADF_GEN4_ENA_THD_MASK_ERROR;
303
304 fw_config = get_fw_config(accel_dev);
305 if (!fw_config)
306 return ADF_GEN4_ENA_THD_MASK_ERROR;
307
308 switch (fw_config[obj_num].obj) {
309 case ADF_FW_ASYM_OBJ:
310 return ENA_THD_MASK_ASYM_401XX;
311 case ADF_FW_SYM_OBJ:
312 return ENA_THD_MASK_SYM;
313 case ADF_FW_DC_OBJ:
314 return ENA_THD_MASK_DC;
315 default:
316 return ADF_GEN4_ENA_THD_MASK_ERROR;
317 }
318 }
319
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num,const char * const fw_objs[],int num_objs)320 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
321 const char * const fw_objs[], int num_objs)
322 {
323 const struct adf_fw_config *fw_config;
324 int id;
325
326 fw_config = get_fw_config(accel_dev);
327 if (fw_config)
328 id = fw_config[obj_num].obj;
329 else
330 id = -EINVAL;
331
332 if (id < 0 || id >= num_objs)
333 return NULL;
334
335 return fw_objs[id];
336 }
337
uof_get_name_4xxx(struct adf_accel_dev * accel_dev,u32 obj_num)338 static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
339 {
340 int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
341
342 return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
343 }
344
uof_get_name_402xx(struct adf_accel_dev * accel_dev,u32 obj_num)345 static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
346 {
347 int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
348
349 return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
350 }
351
uof_get_obj_type(struct adf_accel_dev * accel_dev,u32 obj_num)352 static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
353 {
354 const struct adf_fw_config *fw_config;
355
356 if (obj_num >= uof_get_num_objs(accel_dev))
357 return -EINVAL;
358
359 fw_config = get_fw_config(accel_dev);
360 if (!fw_config)
361 return -EINVAL;
362
363 return fw_config[obj_num].obj;
364 }
365
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)366 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
367 {
368 const struct adf_fw_config *fw_config;
369
370 fw_config = get_fw_config(accel_dev);
371 if (!fw_config)
372 return 0;
373
374 return fw_config[obj_num].ae_mask;
375 }
376
adf_gen4_set_err_mask(struct adf_dev_err_mask * dev_err_mask)377 static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
378 {
379 dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK;
380 dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK;
381 dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK;
382 dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK;
383 dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK;
384 dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK;
385 }
386
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data,u32 dev_id)387 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
388 {
389 hw_data->dev_class = &adf_4xxx_class;
390 hw_data->instance_id = adf_4xxx_class.instances++;
391 hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
392 hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
393 hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
394 hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
395 hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
396 hw_data->num_logical_accel = 1;
397 hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
398 hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
399 hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
400 hw_data->alloc_irq = adf_isr_resource_alloc;
401 hw_data->free_irq = adf_isr_resource_free;
402 hw_data->enable_error_correction = adf_gen4_enable_error_correction;
403 hw_data->get_accel_mask = adf_gen4_get_accel_mask;
404 hw_data->get_ae_mask = get_ae_mask;
405 hw_data->get_num_accels = adf_gen4_get_num_accels;
406 hw_data->get_num_aes = adf_gen4_get_num_aes;
407 hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
408 hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
409 hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
410 hw_data->get_arb_info = adf_gen4_get_arb_info;
411 hw_data->get_admin_info = adf_gen4_get_admin_info;
412 hw_data->get_accel_cap = get_accel_cap;
413 hw_data->get_sku = adf_gen4_get_sku;
414 hw_data->init_admin_comms = adf_init_admin_comms;
415 hw_data->exit_admin_comms = adf_exit_admin_comms;
416 hw_data->send_admin_init = adf_send_admin_init;
417 hw_data->init_arb = adf_init_arb;
418 hw_data->exit_arb = adf_exit_arb;
419 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
420 hw_data->enable_ints = adf_gen4_enable_ints;
421 hw_data->init_device = adf_gen4_init_device;
422 hw_data->reset_device = adf_reset_flr;
423 hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
424 hw_data->num_rps = ADF_GEN4_MAX_RPS;
425 switch (dev_id) {
426 case PCI_DEVICE_ID_INTEL_QAT_402XX:
427 hw_data->fw_name = ADF_402XX_FW;
428 hw_data->fw_mmp_name = ADF_402XX_MMP;
429 hw_data->uof_get_name = uof_get_name_402xx;
430 hw_data->get_ena_thd_mask = get_ena_thd_mask;
431 break;
432 case PCI_DEVICE_ID_INTEL_QAT_401XX:
433 hw_data->fw_name = ADF_4XXX_FW;
434 hw_data->fw_mmp_name = ADF_4XXX_MMP;
435 hw_data->uof_get_name = uof_get_name_4xxx;
436 hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx;
437 break;
438 default:
439 hw_data->fw_name = ADF_4XXX_FW;
440 hw_data->fw_mmp_name = ADF_4XXX_MMP;
441 hw_data->uof_get_name = uof_get_name_4xxx;
442 hw_data->get_ena_thd_mask = get_ena_thd_mask;
443 break;
444 }
445 hw_data->uof_get_num_objs = uof_get_num_objs;
446 hw_data->uof_get_obj_type = uof_get_obj_type;
447 hw_data->uof_get_ae_mask = uof_get_ae_mask;
448 hw_data->get_rp_group = get_rp_group;
449 hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
450 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
451 hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
452 hw_data->disable_iov = adf_disable_sriov;
453 hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
454 hw_data->bank_state_save = adf_bank_state_save;
455 hw_data->bank_state_restore = adf_bank_state_restore;
456 hw_data->enable_pm = adf_gen4_enable_pm;
457 hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
458 hw_data->dev_config = adf_gen4_dev_config;
459 hw_data->start_timer = adf_timer_start;
460 hw_data->stop_timer = adf_timer_stop;
461 hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
462 hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
463 hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
464 hw_data->services_supported = adf_gen4_services_supported;
465 hw_data->get_svc_slice_cnt = adf_gen4_get_svc_slice_cnt;
466
467 adf_gen4_set_err_mask(&hw_data->dev_err_mask);
468 adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
469 adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
470 adf_gen4_init_dc_ops(&hw_data->dc_ops);
471 adf_gen4_init_ras_ops(&hw_data->ras_ops);
472 adf_gen4_init_tl_data(&hw_data->tl_data);
473 adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
474 adf_init_rl_data(&hw_data->rl_data);
475 }
476
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)477 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
478 {
479 hw_data->dev_class->instances--;
480 }
481