1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #ifndef ADF_ACCEL_DEVICES_H_
4 #define ADF_ACCEL_DEVICES_H_
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
7 #include <linux/list.h>
8 #include <linux/io.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/types.h>
12 #include <linux/qat/qat_mig_dev.h>
13 #include <linux/wordpart.h>
14 #include "adf_anti_rb.h"
15 #include "adf_cfg_common.h"
16 #include "adf_dc.h"
17 #include "adf_rl.h"
18 #include "adf_telemetry.h"
19 #include "adf_pfvf_msg.h"
20 #include "icp_qat_hw.h"
21
22 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
23 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
24 #define ADF_C62X_DEVICE_NAME "c6xx"
25 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
26 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
27 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
28 #define ADF_4XXX_DEVICE_NAME "4xxx"
29 #define ADF_420XX_DEVICE_NAME "420xx"
30 #define ADF_6XXX_DEVICE_NAME "6xxx"
31 #define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
32 #define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
33 #define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
34 #define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
35 #define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
36 #define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
37 #define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
38 #define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
39 #define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
40 #define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
41
42 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
43 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
44 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
45 #define ADF_PCI_MAX_BARS 3
46 #define ADF_DEVICE_NAME_LENGTH 32
47 #define ADF_ETR_MAX_RINGS_PER_BANK 16
48 #define ADF_MAX_MSIX_VECTOR_NAME 48
49 #define ADF_DEVICE_NAME_PREFIX "qat_"
50
51 enum adf_accel_capabilities {
52 ADF_ACCEL_CAPABILITIES_NULL = 0,
53 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
54 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
55 ADF_ACCEL_CAPABILITIES_CIPHER = 4,
56 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
57 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
58 ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
59 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
60 };
61
62 enum adf_accel_capabilities_ext {
63 ADF_ACCEL_CAPABILITIES_EXT_ZSTD_LZ4S = BIT(0),
64 ADF_ACCEL_CAPABILITIES_EXT_ZSTD = BIT(1),
65 };
66
67 enum adf_fuses {
68 ADF_FUSECTL0,
69 ADF_FUSECTL1,
70 ADF_FUSECTL2,
71 ADF_FUSECTL3,
72 ADF_FUSECTL4,
73 ADF_FUSECTL5,
74 ADF_MAX_FUSES
75 };
76
77 struct adf_bar {
78 resource_size_t base_addr;
79 void __iomem *virt_addr;
80 resource_size_t size;
81 };
82
83 struct adf_irq {
84 bool enabled;
85 char name[ADF_MAX_MSIX_VECTOR_NAME];
86 };
87
88 struct adf_accel_msix {
89 struct adf_irq *irqs;
90 u32 num_entries;
91 };
92
93 struct adf_accel_pci {
94 struct pci_dev *pci_dev;
95 struct adf_accel_msix msix_entries;
96 struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
97 u8 revid;
98 u8 sku;
99 };
100
101 enum dev_state {
102 DEV_DOWN = 0,
103 DEV_UP
104 };
105
106 enum dev_sku_info {
107 DEV_SKU_1 = 0,
108 DEV_SKU_2,
109 DEV_SKU_3,
110 DEV_SKU_4,
111 DEV_SKU_VF,
112 DEV_SKU_UNKNOWN,
113 };
114
115 enum ras_errors {
116 ADF_RAS_CORR,
117 ADF_RAS_UNCORR,
118 ADF_RAS_FATAL,
119 ADF_RAS_ERRORS,
120 };
121
122 struct adf_error_counters {
123 atomic_t counter[ADF_RAS_ERRORS];
124 bool sysfs_added;
125 bool enabled;
126 };
127
get_sku_info(enum dev_sku_info info)128 static inline const char *get_sku_info(enum dev_sku_info info)
129 {
130 switch (info) {
131 case DEV_SKU_1:
132 return "SKU1";
133 case DEV_SKU_2:
134 return "SKU2";
135 case DEV_SKU_3:
136 return "SKU3";
137 case DEV_SKU_4:
138 return "SKU4";
139 case DEV_SKU_VF:
140 return "SKUVF";
141 case DEV_SKU_UNKNOWN:
142 default:
143 break;
144 }
145 return "Unknown SKU";
146 }
147
148 struct adf_hw_device_class {
149 const char *name;
150 const enum adf_device_type type;
151 u32 instances;
152 };
153
154 struct arb_info {
155 u32 arb_cfg;
156 u32 arb_offset;
157 u32 wt2sam_offset;
158 };
159
160 struct admin_info {
161 u32 admin_msg_ur;
162 u32 admin_msg_lr;
163 u32 mailbox_offset;
164 };
165
166 struct adf_bank_state;
167
168 struct adf_hw_csr_ops {
169 u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
170 u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
171 u32 ring);
172 void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
173 u32 ring, u32 value);
174 u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
175 u32 ring);
176 void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
177 u32 ring, u32 value);
178 u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
179 u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
180 u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
181 u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
182 u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
183 u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
184 u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
185 u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
186 u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
187 void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
188 u32 value);
189 u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
190 u32 ring);
191 void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
192 u32 ring, u32 value);
193 dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
194 u32 ring);
195 void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
196 u32 ring, dma_addr_t addr);
197 u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
198 void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
199 u32 value);
200 u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
201 void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
202 u32 value);
203 u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
204 void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
205 void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
206 u32 bank, u32 value);
207 u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
208 void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
209 u32 value);
210 u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
211 void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
212 u32 value);
213 u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
214 u32 bank);
215 void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
216 u32 bank, u32 value);
217 u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
218 void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
219 u32 value);
220 u32 (*get_int_col_ctl_enable_mask)(void);
221 };
222
223 struct adf_cfg_device_data;
224 struct adf_accel_dev;
225 struct adf_etr_data;
226 struct adf_etr_ring_data;
227
228 struct adf_ras_ops {
229 void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
230 void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
231 bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
232 bool *reset_required);
233 };
234
235 struct adf_pfvf_ops {
236 int (*enable_comms)(struct adf_accel_dev *accel_dev);
237 u32 (*get_pf2vf_offset)(u32 i);
238 u32 (*get_vf2pf_offset)(u32 i);
239 void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
240 void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
241 u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
242 int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
243 u32 pfvf_offset, struct mutex *csr_lock);
244 struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
245 u32 pfvf_offset, u8 compat_ver);
246 };
247
248 struct adf_dc_ops {
249 int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
250 int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
251 };
252
253 struct qat_migdev_ops {
254 int (*init)(struct qat_mig_dev *mdev);
255 void (*cleanup)(struct qat_mig_dev *mdev);
256 void (*reset)(struct qat_mig_dev *mdev);
257 int (*open)(struct qat_mig_dev *mdev);
258 void (*close)(struct qat_mig_dev *mdev);
259 int (*suspend)(struct qat_mig_dev *mdev);
260 int (*resume)(struct qat_mig_dev *mdev);
261 int (*save_state)(struct qat_mig_dev *mdev);
262 int (*save_setup)(struct qat_mig_dev *mdev);
263 int (*load_state)(struct qat_mig_dev *mdev);
264 int (*load_setup)(struct qat_mig_dev *mdev, int size);
265 };
266
267 struct adf_dev_err_mask {
268 u32 cppagentcmdpar_mask;
269 u32 parerr_ath_cph_mask;
270 u32 parerr_cpr_xlt_mask;
271 u32 parerr_dcpr_ucs_mask;
272 u32 parerr_pke_mask;
273 u32 parerr_wat_wcp_mask;
274 u32 ssmfeatren_mask;
275 };
276
277 struct adf_hw_device_data {
278 struct adf_hw_device_class *dev_class;
279 u32 (*get_accel_mask)(struct adf_hw_device_data *self);
280 u32 (*get_ae_mask)(struct adf_hw_device_data *self);
281 u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
282 u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
283 u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
284 u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
285 u32 (*get_num_aes)(struct adf_hw_device_data *self);
286 u32 (*get_num_accels)(struct adf_hw_device_data *self);
287 void (*get_arb_info)(struct arb_info *arb_csrs_info);
288 void (*get_admin_info)(struct admin_info *admin_csrs_info);
289 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
290 u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
291 int (*alloc_irq)(struct adf_accel_dev *accel_dev);
292 void (*free_irq)(struct adf_accel_dev *accel_dev);
293 void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
294 int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
295 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
296 int (*send_admin_init)(struct adf_accel_dev *accel_dev);
297 int (*start_timer)(struct adf_accel_dev *accel_dev);
298 void (*stop_timer)(struct adf_accel_dev *accel_dev);
299 void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
300 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
301 int (*measure_clock)(struct adf_accel_dev *accel_dev);
302 int (*init_arb)(struct adf_accel_dev *accel_dev);
303 void (*exit_arb)(struct adf_accel_dev *accel_dev);
304 const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
305 int (*init_device)(struct adf_accel_dev *accel_dev);
306 int (*enable_pm)(struct adf_accel_dev *accel_dev);
307 bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
308 void (*disable_iov)(struct adf_accel_dev *accel_dev);
309 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
310 bool enable);
311 void (*enable_ints)(struct adf_accel_dev *accel_dev);
312 void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
313 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
314 int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
315 struct adf_bank_state *state);
316 int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
317 u32 bank_number, struct adf_bank_state *state);
318 void (*reset_device)(struct adf_accel_dev *accel_dev);
319 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
320 const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
321 u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
322 int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
323 u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
324 int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
325 u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
326 int (*dev_config)(struct adf_accel_dev *accel_dev);
327 bool (*services_supported)(unsigned long mask);
328 u32 (*get_svc_slice_cnt)(struct adf_accel_dev *accel_dev,
329 enum adf_base_services svc);
330 struct adf_pfvf_ops pfvf_ops;
331 struct adf_hw_csr_ops csr_ops;
332 struct adf_dc_ops dc_ops;
333 struct adf_ras_ops ras_ops;
334 struct adf_dev_err_mask dev_err_mask;
335 struct adf_rl_hw_data rl_data;
336 struct adf_tl_hw_data tl_data;
337 struct adf_anti_rb_hw_data anti_rb_data;
338 struct qat_migdev_ops vfmig_ops;
339 const char *fw_name;
340 const char *fw_mmp_name;
341 u32 fuses[ADF_MAX_FUSES];
342 u32 straps;
343 u32 accel_capabilities_mask;
344 u32 accel_capabilities_ext_mask;
345 u32 extended_dc_capabilities;
346 u16 fw_capabilities;
347 u32 clock_frequency;
348 u32 instance_id;
349 u16 accel_mask;
350 u32 ae_mask;
351 u32 admin_ae_mask;
352 u16 tx_rings_mask;
353 u16 ring_to_svc_map;
354 u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
355 u8 tx_rx_gap;
356 u8 num_banks;
357 u16 num_banks_per_vf;
358 u8 num_rings_per_bank;
359 u8 num_accel;
360 u8 num_logical_accel;
361 u8 num_engines;
362 u32 num_hb_ctrs;
363 u8 num_rps;
364 };
365
366 /* CSR write macro */
367 #define ADF_CSR_WR(csr_base, csr_offset, val) \
368 __raw_writel(val, csr_base + csr_offset)
369 /*
370 * CSR write macro to handle cases where the high and low
371 * offsets are sparsely located.
372 */
373 #define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \
374 do { \
375 ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \
376 ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \
377 } while (0)
378
379 /* CSR read macro */
380 #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
381
382 #define ADF_CFG_NUM_SERVICES 4
383 #define ADF_SRV_TYPE_BIT_LEN 3
384 #define ADF_SRV_TYPE_MASK 0x7
385 #define ADF_AE_ADMIN_THREAD 7
386 #define ADF_NUM_THREADS_PER_AE 8
387 #define ADF_NUM_PKE_STRAND 2
388 #define ADF_AE_STRAND0_THREAD 8
389 #define ADF_AE_STRAND1_THREAD 9
390
391 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
392 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
393 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
394 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
395 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
396 GET_HW_DATA(accel_dev)->num_rings_per_bank
397 #define GET_SRV_TYPE(accel_dev, idx) \
398 (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
399 & ADF_SRV_TYPE_MASK)
400 #define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
401 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
402 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
403 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
404 #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
405 #define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
406 #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
407 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
408
409 struct adf_admin_comms;
410 struct icp_qat_fw_loader_handle;
411 struct adf_fw_loader_data {
412 struct icp_qat_fw_loader_handle *fw_loader;
413 const struct firmware *uof_fw;
414 const struct firmware *mmp_fw;
415 };
416
417 struct adf_accel_vf_info {
418 struct adf_accel_dev *accel_dev;
419 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
420 struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
421 struct ratelimit_state vf2pf_ratelimit;
422 u32 vf_nr;
423 bool init;
424 bool restarting;
425 u8 vf_compat_ver;
426 /*
427 * Private area used for device migration.
428 * Memory allocation and free is managed by migration driver.
429 */
430 void *mig_priv;
431 };
432
433 struct adf_dc_data {
434 u8 *ovf_buff;
435 size_t ovf_buff_sz;
436 dma_addr_t ovf_buff_p;
437 };
438
439 struct adf_pm {
440 struct dentry *debugfs_pm_status;
441 bool present;
442 int idle_irq_counters;
443 int throttle_irq_counters;
444 int fw_irq_counters;
445 int host_ack_counter;
446 int host_nack_counter;
447 ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
448 char __user *buf, size_t count, loff_t *pos);
449 };
450
451 struct adf_sysfs {
452 int ring_num;
453 struct rw_semaphore lock; /* protects access to the fields in this struct */
454 };
455
456 struct adf_accel_dev {
457 struct adf_etr_data *transport;
458 struct adf_hw_device_data *hw_device;
459 struct adf_cfg_device_data *cfg;
460 struct adf_fw_loader_data *fw_loader;
461 struct adf_admin_comms *admin;
462 struct adf_telemetry *telemetry;
463 struct adf_dc_data *dc_data;
464 struct adf_pm power_management;
465 struct list_head crypto_list;
466 struct list_head compression_list;
467 unsigned long status;
468 atomic_t ref_count;
469 struct dentry *debugfs_dir;
470 struct dentry *fw_cntr_dbgfile;
471 struct dentry *cnv_dbgfile;
472 struct list_head list;
473 struct module *owner;
474 struct adf_accel_pci accel_pci_dev;
475 struct adf_timer *timer;
476 struct adf_heartbeat *heartbeat;
477 struct adf_rl *rate_limiting;
478 struct adf_sysfs sysfs;
479 union {
480 struct {
481 /* protects VF2PF interrupts access */
482 spinlock_t vf2pf_ints_lock;
483 /* vf_info is non-zero when SR-IOV is init'ed */
484 struct adf_accel_vf_info *vf_info;
485 } pf;
486 struct {
487 bool irq_enabled;
488 char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
489 struct tasklet_struct pf2vf_bh_tasklet;
490 struct mutex vf2pf_lock; /* protect CSR access */
491 struct completion msg_received;
492 struct pfvf_message response; /* temp field holding pf2vf response */
493 u8 pf_compat_ver;
494 } vf;
495 };
496 struct adf_error_counters ras_errors;
497 struct mutex state_lock; /* protect state of the device */
498 bool is_vf;
499 bool autoreset_on_error;
500 u32 accel_id;
501 };
502 #endif
503