1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #ifndef ADF_ACCEL_DEVICES_H_
4 #define ADF_ACCEL_DEVICES_H_
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
7 #include <linux/list.h>
8 #include <linux/io.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/types.h>
12 #include <linux/qat/qat_mig_dev.h>
13 #include <linux/wordpart.h>
14 #include "adf_cfg_common.h"
15 #include "adf_dc.h"
16 #include "adf_rl.h"
17 #include "adf_telemetry.h"
18 #include "adf_pfvf_msg.h"
19 #include "icp_qat_hw.h"
20
21 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
22 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
23 #define ADF_C62X_DEVICE_NAME "c6xx"
24 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
25 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
26 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
27 #define ADF_4XXX_DEVICE_NAME "4xxx"
28 #define ADF_420XX_DEVICE_NAME "420xx"
29 #define ADF_6XXX_DEVICE_NAME "6xxx"
30 #define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
31 #define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
32 #define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
33 #define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
34 #define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
35 #define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
36 #define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
37 #define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
38 #define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
39 #define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
40
41 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
42 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
43 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
44 #define ADF_PCI_MAX_BARS 3
45 #define ADF_DEVICE_NAME_LENGTH 32
46 #define ADF_ETR_MAX_RINGS_PER_BANK 16
47 #define ADF_MAX_MSIX_VECTOR_NAME 48
48 #define ADF_DEVICE_NAME_PREFIX "qat_"
49
50 enum adf_accel_capabilities {
51 ADF_ACCEL_CAPABILITIES_NULL = 0,
52 ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
53 ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
54 ADF_ACCEL_CAPABILITIES_CIPHER = 4,
55 ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
56 ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
57 ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
58 ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
59 };
60
61 enum adf_fuses {
62 ADF_FUSECTL0,
63 ADF_FUSECTL1,
64 ADF_FUSECTL2,
65 ADF_FUSECTL3,
66 ADF_FUSECTL4,
67 ADF_FUSECTL5,
68 ADF_MAX_FUSES
69 };
70
71 struct adf_bar {
72 resource_size_t base_addr;
73 void __iomem *virt_addr;
74 resource_size_t size;
75 };
76
77 struct adf_irq {
78 bool enabled;
79 char name[ADF_MAX_MSIX_VECTOR_NAME];
80 };
81
82 struct adf_accel_msix {
83 struct adf_irq *irqs;
84 u32 num_entries;
85 };
86
87 struct adf_accel_pci {
88 struct pci_dev *pci_dev;
89 struct adf_accel_msix msix_entries;
90 struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
91 u8 revid;
92 u8 sku;
93 };
94
95 enum dev_state {
96 DEV_DOWN = 0,
97 DEV_UP
98 };
99
100 enum dev_sku_info {
101 DEV_SKU_1 = 0,
102 DEV_SKU_2,
103 DEV_SKU_3,
104 DEV_SKU_4,
105 DEV_SKU_VF,
106 DEV_SKU_UNKNOWN,
107 };
108
109 enum ras_errors {
110 ADF_RAS_CORR,
111 ADF_RAS_UNCORR,
112 ADF_RAS_FATAL,
113 ADF_RAS_ERRORS,
114 };
115
116 struct adf_error_counters {
117 atomic_t counter[ADF_RAS_ERRORS];
118 bool sysfs_added;
119 bool enabled;
120 };
121
get_sku_info(enum dev_sku_info info)122 static inline const char *get_sku_info(enum dev_sku_info info)
123 {
124 switch (info) {
125 case DEV_SKU_1:
126 return "SKU1";
127 case DEV_SKU_2:
128 return "SKU2";
129 case DEV_SKU_3:
130 return "SKU3";
131 case DEV_SKU_4:
132 return "SKU4";
133 case DEV_SKU_VF:
134 return "SKUVF";
135 case DEV_SKU_UNKNOWN:
136 default:
137 break;
138 }
139 return "Unknown SKU";
140 }
141
142 struct adf_hw_device_class {
143 const char *name;
144 const enum adf_device_type type;
145 u32 instances;
146 };
147
148 struct arb_info {
149 u32 arb_cfg;
150 u32 arb_offset;
151 u32 wt2sam_offset;
152 };
153
154 struct admin_info {
155 u32 admin_msg_ur;
156 u32 admin_msg_lr;
157 u32 mailbox_offset;
158 };
159
160 struct adf_bank_state;
161
162 struct adf_hw_csr_ops {
163 u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
164 u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
165 u32 ring);
166 void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
167 u32 ring, u32 value);
168 u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
169 u32 ring);
170 void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
171 u32 ring, u32 value);
172 u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
173 u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
174 u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
175 u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
176 u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
177 u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
178 u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
179 u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
180 u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
181 void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
182 u32 value);
183 u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
184 u32 ring);
185 void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
186 u32 ring, u32 value);
187 dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
188 u32 ring);
189 void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
190 u32 ring, dma_addr_t addr);
191 u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
192 void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
193 u32 value);
194 u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
195 void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
196 u32 value);
197 u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
198 void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
199 void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
200 u32 bank, u32 value);
201 u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
202 void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
203 u32 value);
204 u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
205 void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
206 u32 value);
207 u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
208 u32 bank);
209 void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
210 u32 bank, u32 value);
211 u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
212 void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
213 u32 value);
214 u32 (*get_int_col_ctl_enable_mask)(void);
215 };
216
217 struct adf_cfg_device_data;
218 struct adf_accel_dev;
219 struct adf_etr_data;
220 struct adf_etr_ring_data;
221
222 struct adf_ras_ops {
223 void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
224 void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
225 bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
226 bool *reset_required);
227 };
228
229 struct adf_pfvf_ops {
230 int (*enable_comms)(struct adf_accel_dev *accel_dev);
231 u32 (*get_pf2vf_offset)(u32 i);
232 u32 (*get_vf2pf_offset)(u32 i);
233 void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
234 void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
235 u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
236 int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
237 u32 pfvf_offset, struct mutex *csr_lock);
238 struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
239 u32 pfvf_offset, u8 compat_ver);
240 };
241
242 struct adf_dc_ops {
243 int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
244 int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
245 };
246
247 struct qat_migdev_ops {
248 int (*init)(struct qat_mig_dev *mdev);
249 void (*cleanup)(struct qat_mig_dev *mdev);
250 void (*reset)(struct qat_mig_dev *mdev);
251 int (*open)(struct qat_mig_dev *mdev);
252 void (*close)(struct qat_mig_dev *mdev);
253 int (*suspend)(struct qat_mig_dev *mdev);
254 int (*resume)(struct qat_mig_dev *mdev);
255 int (*save_state)(struct qat_mig_dev *mdev);
256 int (*save_setup)(struct qat_mig_dev *mdev);
257 int (*load_state)(struct qat_mig_dev *mdev);
258 int (*load_setup)(struct qat_mig_dev *mdev, int size);
259 };
260
261 struct adf_dev_err_mask {
262 u32 cppagentcmdpar_mask;
263 u32 parerr_ath_cph_mask;
264 u32 parerr_cpr_xlt_mask;
265 u32 parerr_dcpr_ucs_mask;
266 u32 parerr_pke_mask;
267 u32 parerr_wat_wcp_mask;
268 u32 ssmfeatren_mask;
269 };
270
271 struct adf_hw_device_data {
272 struct adf_hw_device_class *dev_class;
273 u32 (*get_accel_mask)(struct adf_hw_device_data *self);
274 u32 (*get_ae_mask)(struct adf_hw_device_data *self);
275 u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
276 u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
277 u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
278 u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
279 u32 (*get_num_aes)(struct adf_hw_device_data *self);
280 u32 (*get_num_accels)(struct adf_hw_device_data *self);
281 void (*get_arb_info)(struct arb_info *arb_csrs_info);
282 void (*get_admin_info)(struct admin_info *admin_csrs_info);
283 enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
284 u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
285 int (*alloc_irq)(struct adf_accel_dev *accel_dev);
286 void (*free_irq)(struct adf_accel_dev *accel_dev);
287 void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
288 int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
289 void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
290 int (*send_admin_init)(struct adf_accel_dev *accel_dev);
291 int (*start_timer)(struct adf_accel_dev *accel_dev);
292 void (*stop_timer)(struct adf_accel_dev *accel_dev);
293 void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
294 uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
295 int (*measure_clock)(struct adf_accel_dev *accel_dev);
296 int (*init_arb)(struct adf_accel_dev *accel_dev);
297 void (*exit_arb)(struct adf_accel_dev *accel_dev);
298 const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
299 int (*init_device)(struct adf_accel_dev *accel_dev);
300 int (*enable_pm)(struct adf_accel_dev *accel_dev);
301 bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
302 void (*disable_iov)(struct adf_accel_dev *accel_dev);
303 void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
304 bool enable);
305 void (*enable_ints)(struct adf_accel_dev *accel_dev);
306 void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
307 int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
308 int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
309 struct adf_bank_state *state);
310 int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
311 u32 bank_number, struct adf_bank_state *state);
312 void (*reset_device)(struct adf_accel_dev *accel_dev);
313 void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
314 const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
315 u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
316 int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
317 u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
318 int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
319 u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
320 int (*dev_config)(struct adf_accel_dev *accel_dev);
321 bool (*services_supported)(unsigned long mask);
322 u32 (*get_svc_slice_cnt)(struct adf_accel_dev *accel_dev,
323 enum adf_base_services svc);
324 struct adf_pfvf_ops pfvf_ops;
325 struct adf_hw_csr_ops csr_ops;
326 struct adf_dc_ops dc_ops;
327 struct adf_ras_ops ras_ops;
328 struct adf_dev_err_mask dev_err_mask;
329 struct adf_rl_hw_data rl_data;
330 struct adf_tl_hw_data tl_data;
331 struct qat_migdev_ops vfmig_ops;
332 const char *fw_name;
333 const char *fw_mmp_name;
334 u32 fuses[ADF_MAX_FUSES];
335 u32 straps;
336 u32 accel_capabilities_mask;
337 u32 extended_dc_capabilities;
338 u16 fw_capabilities;
339 u32 clock_frequency;
340 u32 instance_id;
341 u16 accel_mask;
342 u32 ae_mask;
343 u32 admin_ae_mask;
344 u16 tx_rings_mask;
345 u16 ring_to_svc_map;
346 u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
347 u8 tx_rx_gap;
348 u8 num_banks;
349 u16 num_banks_per_vf;
350 u8 num_rings_per_bank;
351 u8 num_accel;
352 u8 num_logical_accel;
353 u8 num_engines;
354 u32 num_hb_ctrs;
355 u8 num_rps;
356 };
357
358 /* CSR write macro */
359 #define ADF_CSR_WR(csr_base, csr_offset, val) \
360 __raw_writel(val, csr_base + csr_offset)
361 /*
362 * CSR write macro to handle cases where the high and low
363 * offsets are sparsely located.
364 */
365 #define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val) \
366 do { \
367 ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val)); \
368 ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val)); \
369 } while (0)
370
371 /* CSR read macro */
372 #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
373
374 #define ADF_CFG_NUM_SERVICES 4
375 #define ADF_SRV_TYPE_BIT_LEN 3
376 #define ADF_SRV_TYPE_MASK 0x7
377 #define ADF_AE_ADMIN_THREAD 7
378 #define ADF_NUM_THREADS_PER_AE 8
379 #define ADF_NUM_PKE_STRAND 2
380 #define ADF_AE_STRAND0_THREAD 8
381 #define ADF_AE_STRAND1_THREAD 9
382
383 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
384 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
385 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
386 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
387 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
388 GET_HW_DATA(accel_dev)->num_rings_per_bank
389 #define GET_SRV_TYPE(accel_dev, idx) \
390 (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
391 & ADF_SRV_TYPE_MASK)
392 #define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
393 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
394 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
395 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
396 #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
397 #define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
398 #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
399 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
400
401 struct adf_admin_comms;
402 struct icp_qat_fw_loader_handle;
403 struct adf_fw_loader_data {
404 struct icp_qat_fw_loader_handle *fw_loader;
405 const struct firmware *uof_fw;
406 const struct firmware *mmp_fw;
407 };
408
409 struct adf_accel_vf_info {
410 struct adf_accel_dev *accel_dev;
411 struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
412 struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
413 struct ratelimit_state vf2pf_ratelimit;
414 u32 vf_nr;
415 bool init;
416 bool restarting;
417 u8 vf_compat_ver;
418 /*
419 * Private area used for device migration.
420 * Memory allocation and free is managed by migration driver.
421 */
422 void *mig_priv;
423 };
424
425 struct adf_dc_data {
426 u8 *ovf_buff;
427 size_t ovf_buff_sz;
428 dma_addr_t ovf_buff_p;
429 };
430
431 struct adf_pm {
432 struct dentry *debugfs_pm_status;
433 bool present;
434 int idle_irq_counters;
435 int throttle_irq_counters;
436 int fw_irq_counters;
437 int host_ack_counter;
438 int host_nack_counter;
439 ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
440 char __user *buf, size_t count, loff_t *pos);
441 };
442
443 struct adf_sysfs {
444 int ring_num;
445 struct rw_semaphore lock; /* protects access to the fields in this struct */
446 };
447
448 struct adf_accel_dev {
449 struct adf_etr_data *transport;
450 struct adf_hw_device_data *hw_device;
451 struct adf_cfg_device_data *cfg;
452 struct adf_fw_loader_data *fw_loader;
453 struct adf_admin_comms *admin;
454 struct adf_telemetry *telemetry;
455 struct adf_dc_data *dc_data;
456 struct adf_pm power_management;
457 struct list_head crypto_list;
458 struct list_head compression_list;
459 unsigned long status;
460 atomic_t ref_count;
461 struct dentry *debugfs_dir;
462 struct dentry *fw_cntr_dbgfile;
463 struct dentry *cnv_dbgfile;
464 struct list_head list;
465 struct module *owner;
466 struct adf_accel_pci accel_pci_dev;
467 struct adf_timer *timer;
468 struct adf_heartbeat *heartbeat;
469 struct adf_rl *rate_limiting;
470 struct adf_sysfs sysfs;
471 union {
472 struct {
473 /* protects VF2PF interrupts access */
474 spinlock_t vf2pf_ints_lock;
475 /* vf_info is non-zero when SR-IOV is init'ed */
476 struct adf_accel_vf_info *vf_info;
477 } pf;
478 struct {
479 bool irq_enabled;
480 char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
481 struct tasklet_struct pf2vf_bh_tasklet;
482 struct mutex vf2pf_lock; /* protect CSR access */
483 struct completion msg_received;
484 struct pfvf_message response; /* temp field holding pf2vf response */
485 u8 pf_compat_ver;
486 } vf;
487 };
488 struct adf_error_counters ras_errors;
489 struct mutex state_lock; /* protect state of the device */
490 bool is_vf;
491 bool autoreset_on_error;
492 u32 accel_id;
493 };
494 #endif
495