1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #ifndef ADF_ACCEL_DEVICES_H_
4 #define ADF_ACCEL_DEVICES_H_
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
7 #include <linux/list.h>
8 #include <linux/io.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/types.h>
12 #include <linux/qat/qat_mig_dev.h>
13 #include <linux/wordpart.h>
14 #include "adf_cfg_common.h"
15 #include "adf_rl.h"
16 #include "adf_telemetry.h"
17 #include "adf_pfvf_msg.h"
18 #include "icp_qat_hw.h"
19
20 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
21 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
22 #define ADF_C62X_DEVICE_NAME "c6xx"
23 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
24 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
25 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
26 #define ADF_4XXX_DEVICE_NAME "4xxx"
27 #define ADF_420XX_DEVICE_NAME "420xx"
28 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
29 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
30 #define ADF_401XX_PCI_DEVICE_ID 0x4942
31 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
32 #define ADF_402XX_PCI_DEVICE_ID 0x4944
33 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
34 #define ADF_420XX_PCI_DEVICE_ID 0x4946
35 #define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
36 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
37 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
38 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
39 #define ADF_PCI_MAX_BARS 3
40 #define ADF_DEVICE_NAME_LENGTH 32
41 #define ADF_ETR_MAX_RINGS_PER_BANK 16
42 #define ADF_MAX_MSIX_VECTOR_NAME 48
43 #define ADF_DEVICE_NAME_PREFIX "qat_"
44
45 enum adf_accel_capabilities {
46 ADF_ACCEL_CAPABILITIES_NULL = 0,
47 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
48 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
49 ADF_ACCEL_CAPABILITIES_CIPHER = 4,
50 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
51 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
52 ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
53 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
54 };
55
56 enum adf_fuses {
57 ADF_FUSECTL0,
58 ADF_FUSECTL1,
59 ADF_FUSECTL2,
60 ADF_FUSECTL3,
61 ADF_FUSECTL4,
62 ADF_FUSECTL5,
63 ADF_MAX_FUSES
64 };
65
66 struct adf_bar {
67 resource_size_t base_addr;
68 void __iomem *virt_addr;
69 resource_size_t size;
70 };
71
72 struct adf_irq {
73 bool enabled;
74 char name[ADF_MAX_MSIX_VECTOR_NAME];
75 };
76
77 struct adf_accel_msix {
78 struct adf_irq *irqs;
79 u32 num_entries;
80 };
81
82 struct adf_accel_pci {
83 struct pci_dev *pci_dev;
84 struct adf_accel_msix msix_entries;
85 struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
86 u8 revid;
87 u8 sku;
88 };
89
90 enum dev_state {
91 DEV_DOWN = 0,
92 DEV_UP
93 };
94
95 enum dev_sku_info {
96 DEV_SKU_1 = 0,
97 DEV_SKU_2,
98 DEV_SKU_3,
99 DEV_SKU_4,
100 DEV_SKU_VF,
101 DEV_SKU_UNKNOWN,
102 };
103
104 enum ras_errors {
105 ADF_RAS_CORR,
106 ADF_RAS_UNCORR,
107 ADF_RAS_FATAL,
108 ADF_RAS_ERRORS,
109 };
110
111 struct adf_error_counters {
112 atomic_t counter[ADF_RAS_ERRORS];
113 bool sysfs_added;
114 bool enabled;
115 };
116
get_sku_info(enum dev_sku_info info)117 static inline const char *get_sku_info(enum dev_sku_info info)
118 {
119 switch (info) {
120 case DEV_SKU_1:
121 return "SKU1";
122 case DEV_SKU_2:
123 return "SKU2";
124 case DEV_SKU_3:
125 return "SKU3";
126 case DEV_SKU_4:
127 return "SKU4";
128 case DEV_SKU_VF:
129 return "SKUVF";
130 case DEV_SKU_UNKNOWN:
131 default:
132 break;
133 }
134 return "Unknown SKU";
135 }
136
137 struct adf_hw_device_class {
138 const char *name;
139 const enum adf_device_type type;
140 u32 instances;
141 };
142
143 struct arb_info {
144 u32 arb_cfg;
145 u32 arb_offset;
146 u32 wt2sam_offset;
147 };
148
149 struct admin_info {
150 u32 admin_msg_ur;
151 u32 admin_msg_lr;
152 u32 mailbox_offset;
153 };
154
155 struct ring_config {
156 u64 base;
157 u32 config;
158 u32 head;
159 u32 tail;
160 u32 reserved0;
161 };
162
163 struct bank_state {
164 u32 ringstat0;
165 u32 ringstat1;
166 u32 ringuostat;
167 u32 ringestat;
168 u32 ringnestat;
169 u32 ringnfstat;
170 u32 ringfstat;
171 u32 ringcstat0;
172 u32 ringcstat1;
173 u32 ringcstat2;
174 u32 ringcstat3;
175 u32 iaintflagen;
176 u32 iaintflagreg;
177 u32 iaintflagsrcsel0;
178 u32 iaintflagsrcsel1;
179 u32 iaintcolen;
180 u32 iaintcolctl;
181 u32 iaintflagandcolen;
182 u32 ringexpstat;
183 u32 ringexpintenable;
184 u32 ringsrvarben;
185 u32 reserved0;
186 struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
187 };
188
189 struct adf_hw_csr_ops {
190 u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
191 u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
192 u32 ring);
193 void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
194 u32 ring, u32 value);
195 u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
196 u32 ring);
197 void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
198 u32 ring, u32 value);
199 u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
200 u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
201 u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
202 u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
203 u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
204 u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
205 u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
206 u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
207 u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
208 void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
209 u32 value);
210 u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
211 u32 ring);
212 void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
213 u32 ring, u32 value);
214 dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
215 u32 ring);
216 void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
217 u32 ring, dma_addr_t addr);
218 u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
219 void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
220 u32 value);
221 u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
222 void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
223 u32 value);
224 u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
225 void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
226 void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
227 u32 bank, u32 value);
228 u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
229 void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
230 u32 value);
231 u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
232 void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
233 u32 value);
234 u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
235 u32 bank);
236 void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
237 u32 bank, u32 value);
238 u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
239 void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
240 u32 value);
241 u32 (*get_int_col_ctl_enable_mask)(void);
242 };
243
244 struct adf_cfg_device_data;
245 struct adf_accel_dev;
246 struct adf_etr_data;
247 struct adf_etr_ring_data;
248
249 struct adf_ras_ops {
250 void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
251 void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
252 bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
253 bool *reset_required);
254 };
255
256 struct adf_pfvf_ops {
257 int (*enable_comms)(struct adf_accel_dev *accel_dev);
258 u32 (*get_pf2vf_offset)(u32 i);
259 u32 (*get_vf2pf_offset)(u32 i);
260 void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
261 void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
262 u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
263 int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
264 u32 pfvf_offset, struct mutex *csr_lock);
265 struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
266 u32 pfvf_offset, u8 compat_ver);
267 };
268
269 struct adf_dc_ops {
270 void (*build_deflate_ctx)(void *ctx);
271 };
272
273 struct qat_migdev_ops {
274 int (*init)(struct qat_mig_dev *mdev);
275 void (*cleanup)(struct qat_mig_dev *mdev);
276 void (*reset)(struct qat_mig_dev *mdev);
277 int (*open)(struct qat_mig_dev *mdev);
278 void (*close)(struct qat_mig_dev *mdev);
279 int (*suspend)(struct qat_mig_dev *mdev);
280 int (*resume)(struct qat_mig_dev *mdev);
281 int (*save_state)(struct qat_mig_dev *mdev);
282 int (*save_setup)(struct qat_mig_dev *mdev);
283 int (*load_state)(struct qat_mig_dev *mdev);
284 int (*load_setup)(struct qat_mig_dev *mdev, int size);
285 };
286
287 struct adf_dev_err_mask {
288 u32 cppagentcmdpar_mask;
289 u32 parerr_ath_cph_mask;
290 u32 parerr_cpr_xlt_mask;
291 u32 parerr_dcpr_ucs_mask;
292 u32 parerr_pke_mask;
293 u32 parerr_wat_wcp_mask;
294 u32 ssmfeatren_mask;
295 };
296
297 struct adf_hw_device_data {
298 struct adf_hw_device_class *dev_class;
299 u32 (*get_accel_mask)(struct adf_hw_device_data *self);
300 u32 (*get_ae_mask)(struct adf_hw_device_data *self);
301 u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
302 u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
303 u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
304 u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
305 u32 (*get_num_aes)(struct adf_hw_device_data *self);
306 u32 (*get_num_accels)(struct adf_hw_device_data *self);
307 void (*get_arb_info)(struct arb_info *arb_csrs_info);
308 void (*get_admin_info)(struct admin_info *admin_csrs_info);
309 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
310 u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
311 int (*alloc_irq)(struct adf_accel_dev *accel_dev);
312 void (*free_irq)(struct adf_accel_dev *accel_dev);
313 void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
314 int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
315 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
316 int (*send_admin_init)(struct adf_accel_dev *accel_dev);
317 int (*start_timer)(struct adf_accel_dev *accel_dev);
318 void (*stop_timer)(struct adf_accel_dev *accel_dev);
319 void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
320 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
321 int (*measure_clock)(struct adf_accel_dev *accel_dev);
322 int (*init_arb)(struct adf_accel_dev *accel_dev);
323 void (*exit_arb)(struct adf_accel_dev *accel_dev);
324 const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
325 int (*init_device)(struct adf_accel_dev *accel_dev);
326 int (*enable_pm)(struct adf_accel_dev *accel_dev);
327 bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
328 void (*disable_iov)(struct adf_accel_dev *accel_dev);
329 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
330 bool enable);
331 void (*enable_ints)(struct adf_accel_dev *accel_dev);
332 void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
333 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
334 int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
335 struct bank_state *state);
336 int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
337 u32 bank_number, struct bank_state *state);
338 void (*reset_device)(struct adf_accel_dev *accel_dev);
339 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
340 const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
341 u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
342 int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
343 u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
344 int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
345 u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
346 int (*dev_config)(struct adf_accel_dev *accel_dev);
347 bool (*services_supported)(unsigned long mask);
348 struct adf_pfvf_ops pfvf_ops;
349 struct adf_hw_csr_ops csr_ops;
350 struct adf_dc_ops dc_ops;
351 struct adf_ras_ops ras_ops;
352 struct adf_dev_err_mask dev_err_mask;
353 struct adf_rl_hw_data rl_data;
354 struct adf_tl_hw_data tl_data;
355 struct qat_migdev_ops vfmig_ops;
356 const char *fw_name;
357 const char *fw_mmp_name;
358 u32 fuses[ADF_MAX_FUSES];
359 u32 straps;
360 u32 accel_capabilities_mask;
361 u32 extended_dc_capabilities;
362 u16 fw_capabilities;
363 u32 clock_frequency;
364 u32 instance_id;
365 u16 accel_mask;
366 u32 ae_mask;
367 u32 admin_ae_mask;
368 u16 tx_rings_mask;
369 u16 ring_to_svc_map;
370 u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
371 u8 tx_rx_gap;
372 u8 num_banks;
373 u16 num_banks_per_vf;
374 u8 num_rings_per_bank;
375 u8 num_accel;
376 u8 num_logical_accel;
377 u8 num_engines;
378 u32 num_hb_ctrs;
379 u8 num_rps;
380 };
381
382 /* CSR write macro */
383 #define ADF_CSR_WR(csr_base, csr_offset, val) \
384 __raw_writel(val, csr_base + csr_offset)
385 /*
386 * CSR write macro to handle cases where the high and low
387 * offsets are sparsely located.
388 */
389 #define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \
390 do { \
391 ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \
392 ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \
393 } while (0)
394
395 /* CSR read macro */
396 #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
397
398 #define ADF_CFG_NUM_SERVICES 4
399 #define ADF_SRV_TYPE_BIT_LEN 3
400 #define ADF_SRV_TYPE_MASK 0x7
401 #define ADF_AE_ADMIN_THREAD 7
402 #define ADF_NUM_THREADS_PER_AE 8
403 #define ADF_NUM_PKE_STRAND 2
404 #define ADF_AE_STRAND0_THREAD 8
405 #define ADF_AE_STRAND1_THREAD 9
406
407 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
408 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
409 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
410 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
411 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
412 GET_HW_DATA(accel_dev)->num_rings_per_bank
413 #define GET_SRV_TYPE(accel_dev, idx) \
414 (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
415 & ADF_SRV_TYPE_MASK)
416 #define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
417 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
418 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
419 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
420 #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
421 #define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
422 #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
423 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
424
425 struct adf_admin_comms;
426 struct icp_qat_fw_loader_handle;
427 struct adf_fw_loader_data {
428 struct icp_qat_fw_loader_handle *fw_loader;
429 const struct firmware *uof_fw;
430 const struct firmware *mmp_fw;
431 };
432
433 struct adf_accel_vf_info {
434 struct adf_accel_dev *accel_dev;
435 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
436 struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
437 struct ratelimit_state vf2pf_ratelimit;
438 u32 vf_nr;
439 bool init;
440 bool restarting;
441 u8 vf_compat_ver;
442 /*
443 * Private area used for device migration.
444 * Memory allocation and free is managed by migration driver.
445 */
446 void *mig_priv;
447 };
448
449 struct adf_dc_data {
450 u8 *ovf_buff;
451 size_t ovf_buff_sz;
452 dma_addr_t ovf_buff_p;
453 };
454
455 struct adf_pm {
456 struct dentry *debugfs_pm_status;
457 bool present;
458 int idle_irq_counters;
459 int throttle_irq_counters;
460 int fw_irq_counters;
461 int host_ack_counter;
462 int host_nack_counter;
463 ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
464 char __user *buf, size_t count, loff_t *pos);
465 };
466
467 struct adf_sysfs {
468 int ring_num;
469 struct rw_semaphore lock; /* protects access to the fields in this struct */
470 };
471
472 struct adf_accel_dev {
473 struct adf_etr_data *transport;
474 struct adf_hw_device_data *hw_device;
475 struct adf_cfg_device_data *cfg;
476 struct adf_fw_loader_data *fw_loader;
477 struct adf_admin_comms *admin;
478 struct adf_telemetry *telemetry;
479 struct adf_dc_data *dc_data;
480 struct adf_pm power_management;
481 struct list_head crypto_list;
482 struct list_head compression_list;
483 unsigned long status;
484 atomic_t ref_count;
485 struct dentry *debugfs_dir;
486 struct dentry *fw_cntr_dbgfile;
487 struct dentry *cnv_dbgfile;
488 struct list_head list;
489 struct module *owner;
490 struct adf_accel_pci accel_pci_dev;
491 struct adf_timer *timer;
492 struct adf_heartbeat *heartbeat;
493 struct adf_rl *rate_limiting;
494 struct adf_sysfs sysfs;
495 union {
496 struct {
497 /* protects VF2PF interrupts access */
498 spinlock_t vf2pf_ints_lock;
499 /* vf_info is non-zero when SR-IOV is init'ed */
500 struct adf_accel_vf_info *vf_info;
501 } pf;
502 struct {
503 bool irq_enabled;
504 char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
505 struct tasklet_struct pf2vf_bh_tasklet;
506 struct mutex vf2pf_lock; /* protect CSR access */
507 struct completion msg_received;
508 struct pfvf_message response; /* temp field holding pf2vf response */
509 u8 pf_compat_ver;
510 } vf;
511 };
512 struct adf_error_counters ras_errors;
513 struct mutex state_lock; /* protect state of the device */
514 bool is_vf;
515 bool autoreset_on_error;
516 u32 accel_id;
517 };
518 #endif
519