1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2019 HiSilicon Limited. */
3 #include <linux/acpi.h>
4 #include <linux/bitops.h>
5 #include <linux/debugfs.h>
6 #include <linux/init.h>
7 #include <linux/io.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/topology.h>
13 #include <linux/uacce.h>
14 #include "hpre.h"
15
16 #define CAP_FILE_PERMISSION 0444
17 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0)
18 #define HPRE_CTRL_CNT_CLR_CE 0x301000
19 #define HPRE_FSM_MAX_CNT 0x301008
20 #define HPRE_VFG_AXQOS 0x30100c
21 #define HPRE_VFG_AXCACHE 0x301010
22 #define HPRE_RDCHN_INI_CFG 0x301014
23 #define HPRE_AWUSR_FP_CFG 0x301018
24 #define HPRE_BD_ENDIAN 0x301020
25 #define HPRE_ECC_BYPASS 0x301024
26 #define HPRE_RAS_WIDTH_CFG 0x301028
27 #define HPRE_POISON_BYPASS 0x30102c
28 #define HPRE_BD_ARUSR_CFG 0x301030
29 #define HPRE_BD_AWUSR_CFG 0x301034
30 #define HPRE_TYPES_ENB 0x301038
31 #define HPRE_RSA_ENB BIT(0)
32 #define HPRE_ECC_ENB BIT(1)
33 #define HPRE_DATA_RUSER_CFG 0x30103c
34 #define HPRE_DATA_WUSER_CFG 0x301040
35 #define HPRE_INT_MASK 0x301400
36 #define HPRE_INT_STATUS 0x301800
37 #define HPRE_HAC_INT_MSK 0x301400
38 #define HPRE_HAC_RAS_CE_ENB 0x301410
39 #define HPRE_HAC_RAS_NFE_ENB 0x301414
40 #define HPRE_HAC_RAS_FE_ENB 0x301418
41 #define HPRE_HAC_INT_SET 0x301500
42 #define HPRE_AXI_ERROR_MASK GENMASK(21, 10)
43 #define HPRE_RNG_TIMEOUT_NUM 0x301A34
44 #define HPRE_CORE_INT_ENABLE 0
45 #define HPRE_RDCHN_INI_ST 0x301a00
46 #define HPRE_CLSTR_BASE 0x302000
47 #define HPRE_CORE_EN_OFFSET 0x04
48 #define HPRE_CORE_INI_CFG_OFFSET 0x20
49 #define HPRE_CORE_INI_STATUS_OFFSET 0x80
50 #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c
51 #define HPRE_CORE_IS_SCHD_OFFSET 0x90
52
53 #define HPRE_RAS_CE_ENB 0x301410
54 #define HPRE_RAS_NFE_ENB 0x301414
55 #define HPRE_RAS_FE_ENB 0x301418
56 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c
57 #define HPRE_HAC_RAS_FE_ENABLE 0
58
59 #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET)
60 #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET)
61 #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET)
62 #define HPRE_HAC_ECC1_CNT 0x301a04
63 #define HPRE_HAC_ECC2_CNT 0x301a08
64 #define HPRE_HAC_SOURCE_INT 0x301600
65 #define HPRE_CLSTR_ADDR_INTRVL 0x1000
66 #define HPRE_CLUSTER_INQURY 0x100
67 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104
68 #define HPRE_PASID_EN_BIT 9
69 #define HPRE_REG_RD_INTVRL_US 10
70 #define HPRE_REG_RD_TMOUT_US 1000
71 #define HPRE_DBGFS_VAL_MAX_LEN 20
72 #define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258
73 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1)
74 #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0)
75 #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0)
76 #define HPRE_BD_USR_MASK GENMASK(1, 0)
77 #define HPRE_PREFETCH_CFG 0x301130
78 #define HPRE_SVA_PREFTCH_DFX 0x30115C
79 #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30)))
80 #define HPRE_PREFETCH_DISABLE BIT(30)
81 #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8))
82 #define HPRE_SVA_PREFTCH_DFX4 0x301144
83 #define HPRE_WAIT_SVA_READY 500000
84 #define HPRE_READ_SVA_STATUS_TIMES 3
85 #define HPRE_WAIT_US_MIN 10
86 #define HPRE_WAIT_US_MAX 20
87
88 /* clock gate */
89 #define HPRE_CLKGATE_CTL 0x301a10
90 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c
91 #define HPRE_CLUSTER_DYN_CTL 0x302010
92 #define HPRE_CORE_SHB_CFG 0x302088
93 #define HPRE_CLKGATE_CTL_EN BIT(0)
94 #define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0)
95 #define HPRE_CLUSTER_DYN_CTL_EN BIT(0)
96 #define HPRE_CORE_GATE_EN (BIT(30) | BIT(31))
97
98 #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044
99 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0)
100 #define HPRE_WR_MSI_PORT BIT(2)
101
102 #define HPRE_CORE_ECC_2BIT_ERR BIT(1)
103 #define HPRE_OOO_ECC_2BIT_ERR BIT(5)
104
105 #define HPRE_QM_BME_FLR BIT(7)
106 #define HPRE_QM_PM_FLR BIT(11)
107 #define HPRE_QM_SRIOV_FLR BIT(12)
108
109 #define HPRE_SHAPER_TYPE_RATE 640
110 #define HPRE_VIA_MSI_DSM 1
111 #define HPRE_SQE_MASK_OFFSET 8
112 #define HPRE_SQE_MASK_LEN 44
113 #define HPRE_CTX_Q_NUM_DEF 1
114
115 #define HPRE_DFX_BASE 0x301000
116 #define HPRE_DFX_COMMON1 0x301400
117 #define HPRE_DFX_COMMON2 0x301A00
118 #define HPRE_DFX_CORE 0x302000
119 #define HPRE_DFX_BASE_LEN 0x55
120 #define HPRE_DFX_COMMON1_LEN 0x41
121 #define HPRE_DFX_COMMON2_LEN 0xE
122 #define HPRE_DFX_CORE_LEN 0x43
123
124 static const char hpre_name[] = "hisi_hpre";
125 static struct dentry *hpre_debugfs_root;
126 static const struct pci_device_id hpre_dev_ids[] = {
127 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) },
128 { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) },
129 { 0, }
130 };
131
132 MODULE_DEVICE_TABLE(pci, hpre_dev_ids);
133
134 struct hpre_hw_error {
135 u32 int_msk;
136 const char *msg;
137 };
138
139 static const struct qm_dev_alg hpre_dev_algs[] = {
140 {
141 .alg_msk = BIT(0),
142 .alg = "rsa\n"
143 }, {
144 .alg_msk = BIT(1),
145 .alg = "dh\n"
146 }, {
147 .alg_msk = BIT(2),
148 .alg = "ecdh\n"
149 }, {
150 .alg_msk = BIT(3),
151 .alg = "ecdsa\n"
152 }, {
153 .alg_msk = BIT(4),
154 .alg = "sm2\n"
155 }, {
156 .alg_msk = BIT(5),
157 .alg = "x25519\n"
158 }, {
159 .alg_msk = BIT(6),
160 .alg = "x448\n"
161 }, {
162 /* sentinel */
163 }
164 };
165
166 static struct hisi_qm_list hpre_devices = {
167 .register_to_crypto = hpre_algs_register,
168 .unregister_from_crypto = hpre_algs_unregister,
169 };
170
171 static const char * const hpre_debug_file_name[] = {
172 [HPRE_CLEAR_ENABLE] = "rdclr_en",
173 [HPRE_CLUSTER_CTRL] = "cluster_ctrl",
174 };
175
176 enum hpre_cap_type {
177 HPRE_QM_NFE_MASK_CAP,
178 HPRE_QM_RESET_MASK_CAP,
179 HPRE_QM_OOO_SHUTDOWN_MASK_CAP,
180 HPRE_QM_CE_MASK_CAP,
181 HPRE_NFE_MASK_CAP,
182 HPRE_RESET_MASK_CAP,
183 HPRE_OOO_SHUTDOWN_MASK_CAP,
184 HPRE_CE_MASK_CAP,
185 HPRE_CLUSTER_NUM_CAP,
186 HPRE_CORE_TYPE_NUM_CAP,
187 HPRE_CORE_NUM_CAP,
188 HPRE_CLUSTER_CORE_NUM_CAP,
189 HPRE_CORE_ENABLE_BITMAP_CAP,
190 HPRE_DRV_ALG_BITMAP_CAP,
191 HPRE_DEV_ALG_BITMAP_CAP,
192 HPRE_CORE1_ALG_BITMAP_CAP,
193 HPRE_CORE2_ALG_BITMAP_CAP,
194 HPRE_CORE3_ALG_BITMAP_CAP,
195 HPRE_CORE4_ALG_BITMAP_CAP,
196 HPRE_CORE5_ALG_BITMAP_CAP,
197 HPRE_CORE6_ALG_BITMAP_CAP,
198 HPRE_CORE7_ALG_BITMAP_CAP,
199 HPRE_CORE8_ALG_BITMAP_CAP,
200 HPRE_CORE9_ALG_BITMAP_CAP,
201 HPRE_CORE10_ALG_BITMAP_CAP
202 };
203
204 static const struct hisi_qm_cap_info hpre_basic_info[] = {
205 {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37},
206 {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37},
207 {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37},
208 {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8},
209 {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFC3E},
210 {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFC3E},
211 {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFC3E},
212 {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1},
213 {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1},
214 {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2},
215 {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA},
216 {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA},
217 {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF},
218 {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27},
219 {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F},
220 {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
221 {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
222 {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
223 {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
224 {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
225 {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
226 {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
227 {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F},
228 {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10},
229 {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}
230 };
231
232 static const struct hisi_qm_cap_query_info hpre_cap_query_info[] = {
233 {QM_RAS_NFE_TYPE, "QM_RAS_NFE_TYPE ", 0x3124, 0x0, 0x1C37, 0x7C37},
234 {QM_RAS_NFE_RESET, "QM_RAS_NFE_RESET ", 0x3128, 0x0, 0xC77, 0x6C77},
235 {QM_RAS_CE_TYPE, "QM_RAS_CE_TYPE ", 0x312C, 0x0, 0x8, 0x8},
236 {HPRE_RAS_NFE_TYPE, "HPRE_RAS_NFE_TYPE ", 0x3130, 0x0, 0x3FFFFE, 0x1FFFC3E},
237 {HPRE_RAS_NFE_RESET, "HPRE_RAS_NFE_RESET ", 0x3134, 0x0, 0x3FFFFE, 0xBFFC3E},
238 {HPRE_RAS_CE_TYPE, "HPRE_RAS_CE_TYPE ", 0x3138, 0x0, 0x1, 0x1},
239 {HPRE_CORE_INFO, "HPRE_CORE_INFO ", 0x313c, 0x0, 0x420802, 0x120A0A},
240 {HPRE_CORE_EN, "HPRE_CORE_EN ", 0x3140, 0x0, 0xF, 0x3FF},
241 {HPRE_DRV_ALG_BITMAP, "HPRE_DRV_ALG_BITMAP ", 0x3144, 0x0, 0x03, 0x27},
242 {HPRE_ALG_BITMAP, "HPRE_ALG_BITMAP ", 0x3148, 0x0, 0x03, 0x7F},
243 {HPRE_CORE1_BITMAP_CAP, "HPRE_CORE1_BITMAP_CAP ", 0x314c, 0x0, 0x7F, 0x7F},
244 {HPRE_CORE2_BITMAP_CAP, "HPRE_CORE2_BITMAP_CAP ", 0x3150, 0x0, 0x7F, 0x7F},
245 {HPRE_CORE3_BITMAP_CAP, "HPRE_CORE3_BITMAP_CAP ", 0x3154, 0x0, 0x7F, 0x7F},
246 {HPRE_CORE4_BITMAP_CAP, "HPRE_CORE4_BITMAP_CAP ", 0x3158, 0x0, 0x7F, 0x7F},
247 {HPRE_CORE5_BITMAP_CAP, "HPRE_CORE5_BITMAP_CAP ", 0x315c, 0x0, 0x7F, 0x7F},
248 {HPRE_CORE6_BITMAP_CAP, "HPRE_CORE6_BITMAP_CAP ", 0x3160, 0x0, 0x7F, 0x7F},
249 {HPRE_CORE7_BITMAP_CAP, "HPRE_CORE7_BITMAP_CAP ", 0x3164, 0x0, 0x7F, 0x7F},
250 {HPRE_CORE8_BITMAP_CAP, "HPRE_CORE8_BITMAP_CAP ", 0x3168, 0x0, 0x7F, 0x7F},
251 {HPRE_CORE9_BITMAP_CAP, "HPRE_CORE9_BITMAP_CAP ", 0x316c, 0x0, 0x10, 0x10},
252 {HPRE_CORE10_BITMAP_CAP, "HPRE_CORE10_BITMAP_CAP ", 0x3170, 0x0, 0x10, 0x10},
253 };
254
255 static const struct hpre_hw_error hpre_hw_errors[] = {
256 {
257 .int_msk = BIT(0),
258 .msg = "core_ecc_1bit_err_int_set"
259 }, {
260 .int_msk = BIT(1),
261 .msg = "core_ecc_2bit_err_int_set"
262 }, {
263 .int_msk = BIT(2),
264 .msg = "dat_wb_poison_int_set"
265 }, {
266 .int_msk = BIT(3),
267 .msg = "dat_rd_poison_int_set"
268 }, {
269 .int_msk = BIT(4),
270 .msg = "bd_rd_poison_int_set"
271 }, {
272 .int_msk = BIT(5),
273 .msg = "ooo_ecc_2bit_err_int_set"
274 }, {
275 .int_msk = BIT(6),
276 .msg = "cluster1_shb_timeout_int_set"
277 }, {
278 .int_msk = BIT(7),
279 .msg = "cluster2_shb_timeout_int_set"
280 }, {
281 .int_msk = BIT(8),
282 .msg = "cluster3_shb_timeout_int_set"
283 }, {
284 .int_msk = BIT(9),
285 .msg = "cluster4_shb_timeout_int_set"
286 }, {
287 .int_msk = GENMASK(15, 10),
288 .msg = "ooo_rdrsp_err_int_set"
289 }, {
290 .int_msk = GENMASK(21, 16),
291 .msg = "ooo_wrrsp_err_int_set"
292 }, {
293 .int_msk = BIT(22),
294 .msg = "pt_rng_timeout_int_set"
295 }, {
296 .int_msk = BIT(23),
297 .msg = "sva_fsm_timeout_int_set"
298 }, {
299 .int_msk = BIT(24),
300 .msg = "sva_int_set"
301 }, {
302 /* sentinel */
303 }
304 };
305
306 static const u64 hpre_cluster_offsets[] = {
307 [HPRE_CLUSTER0] =
308 HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL,
309 [HPRE_CLUSTER1] =
310 HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL,
311 [HPRE_CLUSTER2] =
312 HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL,
313 [HPRE_CLUSTER3] =
314 HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL,
315 };
316
317 static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = {
318 {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET},
319 {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET},
320 {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET},
321 {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET},
322 {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET},
323 };
324
325 static const struct debugfs_reg32 hpre_com_dfx_regs[] = {
326 {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE},
327 {"AXQOS ", HPRE_VFG_AXQOS},
328 {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG},
329 {"BD_ENDIAN ", HPRE_BD_ENDIAN},
330 {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS},
331 {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG},
332 {"POISON_BYPASS ", HPRE_POISON_BYPASS},
333 {"BD_ARUSER ", HPRE_BD_ARUSR_CFG},
334 {"BD_AWUSER ", HPRE_BD_AWUSR_CFG},
335 {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG},
336 {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG},
337 {"INT_STATUS ", HPRE_INT_STATUS},
338 {"INT_MASK ", HPRE_HAC_INT_MSK},
339 {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB},
340 {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB},
341 {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB},
342 {"INT_SET ", HPRE_HAC_INT_SET},
343 {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM},
344 };
345
346 static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = {
347 "send_cnt",
348 "recv_cnt",
349 "send_fail_cnt",
350 "send_busy_cnt",
351 "over_thrhld_cnt",
352 "overtime_thrhld",
353 "invalid_req_cnt"
354 };
355
356 /* define the HPRE's dfx regs region and region length */
357 static struct dfx_diff_registers hpre_diff_regs[] = {
358 {
359 .reg_offset = HPRE_DFX_BASE,
360 .reg_len = HPRE_DFX_BASE_LEN,
361 }, {
362 .reg_offset = HPRE_DFX_COMMON1,
363 .reg_len = HPRE_DFX_COMMON1_LEN,
364 }, {
365 .reg_offset = HPRE_DFX_COMMON2,
366 .reg_len = HPRE_DFX_COMMON2_LEN,
367 }, {
368 .reg_offset = HPRE_DFX_CORE,
369 .reg_len = HPRE_DFX_CORE_LEN,
370 },
371 };
372
373 static const struct hisi_qm_err_ini hpre_err_ini;
374
hpre_check_alg_support(struct hisi_qm * qm,u32 alg)375 bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg)
376 {
377 u32 cap_val;
378
379 cap_val = qm->cap_tables.dev_cap_table[HPRE_DRV_ALG_BITMAP].cap_val;
380 if (alg & cap_val)
381 return true;
382
383 return false;
384 }
385
hpre_diff_regs_show(struct seq_file * s,void * unused)386 static int hpre_diff_regs_show(struct seq_file *s, void *unused)
387 {
388 struct hisi_qm *qm = s->private;
389
390 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs,
391 ARRAY_SIZE(hpre_diff_regs));
392
393 return 0;
394 }
395
396 DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs);
397
hpre_com_regs_show(struct seq_file * s,void * unused)398 static int hpre_com_regs_show(struct seq_file *s, void *unused)
399 {
400 hisi_qm_regs_dump(s, s->private);
401
402 return 0;
403 }
404
405 DEFINE_SHOW_ATTRIBUTE(hpre_com_regs);
406
hpre_cluster_regs_show(struct seq_file * s,void * unused)407 static int hpre_cluster_regs_show(struct seq_file *s, void *unused)
408 {
409 hisi_qm_regs_dump(s, s->private);
410
411 return 0;
412 }
413
414 DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs);
415
416 static const struct kernel_param_ops hpre_uacce_mode_ops = {
417 .set = uacce_mode_set,
418 .get = param_get_int,
419 };
420
421 /*
422 * uacce_mode = 0 means hpre only register to crypto,
423 * uacce_mode = 1 means hpre both register to crypto and uacce.
424 */
425 static u32 uacce_mode = UACCE_MODE_NOUACCE;
426 module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444);
427 MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC);
428
429 static bool pf_q_num_flag;
pf_q_num_set(const char * val,const struct kernel_param * kp)430 static int pf_q_num_set(const char *val, const struct kernel_param *kp)
431 {
432 pf_q_num_flag = true;
433
434 return hisi_qm_q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF);
435 }
436
437 static const struct kernel_param_ops hpre_pf_q_num_ops = {
438 .set = pf_q_num_set,
439 .get = param_get_int,
440 };
441
442 static u32 pf_q_num = HPRE_PF_DEF_Q_NUM;
443 module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444);
444 MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)");
445
446 static const struct kernel_param_ops vfs_num_ops = {
447 .set = vfs_num_set,
448 .get = param_get_int,
449 };
450
451 static u32 vfs_num;
452 module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444);
453 MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)");
454
hpre_create_qp(u8 type)455 struct hisi_qp *hpre_create_qp(u8 type)
456 {
457 int node = cpu_to_node(raw_smp_processor_id());
458 struct hisi_qp *qp = NULL;
459 int ret;
460
461 if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE)
462 return NULL;
463
464 /*
465 * type: 0 - RSA/DH. algorithm supported in V2,
466 * 1 - ECC algorithm in V3.
467 */
468 ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, &type, node, &qp);
469 if (!ret)
470 return qp;
471
472 return NULL;
473 }
474
hpre_wait_sva_ready(struct hisi_qm * qm)475 static int hpre_wait_sva_ready(struct hisi_qm *qm)
476 {
477 u32 val, try_times = 0;
478 u8 count = 0;
479
480 /*
481 * Read the register value every 10-20us. If the value is 0 for three
482 * consecutive times, the SVA module is ready.
483 */
484 do {
485 val = readl(qm->io_base + HPRE_SVA_PREFTCH_DFX4);
486 if (val)
487 count = 0;
488 else if (++count == HPRE_READ_SVA_STATUS_TIMES)
489 break;
490
491 usleep_range(HPRE_WAIT_US_MIN, HPRE_WAIT_US_MAX);
492 } while (++try_times < HPRE_WAIT_SVA_READY);
493
494 if (try_times == HPRE_WAIT_SVA_READY) {
495 pci_err(qm->pdev, "failed to wait sva prefetch ready\n");
496 return -ETIMEDOUT;
497 }
498
499 return 0;
500 }
501
hpre_config_pasid(struct hisi_qm * qm)502 static void hpre_config_pasid(struct hisi_qm *qm)
503 {
504 u32 val1, val2;
505
506 if (qm->ver >= QM_HW_V3)
507 return;
508
509 val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG);
510 val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG);
511 if (qm->use_sva) {
512 val1 |= BIT(HPRE_PASID_EN_BIT);
513 val2 |= BIT(HPRE_PASID_EN_BIT);
514 } else {
515 val1 &= ~BIT(HPRE_PASID_EN_BIT);
516 val2 &= ~BIT(HPRE_PASID_EN_BIT);
517 }
518 writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG);
519 writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG);
520 }
521
hpre_cfg_by_dsm(struct hisi_qm * qm)522 static int hpre_cfg_by_dsm(struct hisi_qm *qm)
523 {
524 struct device *dev = &qm->pdev->dev;
525 union acpi_object *obj;
526 guid_t guid;
527
528 if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) {
529 dev_err(dev, "Hpre GUID failed\n");
530 return -EINVAL;
531 }
532
533 /* Switch over to MSI handling due to non-standard PCI implementation */
534 obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid,
535 0, HPRE_VIA_MSI_DSM, NULL);
536 if (!obj) {
537 dev_err(dev, "ACPI handle failed!\n");
538 return -EIO;
539 }
540
541 ACPI_FREE(obj);
542
543 return 0;
544 }
545
hpre_set_cluster(struct hisi_qm * qm)546 static int hpre_set_cluster(struct hisi_qm *qm)
547 {
548 struct device *dev = &qm->pdev->dev;
549 u32 cluster_core_mask;
550 unsigned long offset;
551 u32 hpre_core_info;
552 u8 clusters_num;
553 u32 val = 0;
554 int ret, i;
555
556 cluster_core_mask = qm->cap_tables.dev_cap_table[HPRE_CORE_EN].cap_val;
557 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
558 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
559 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
560 for (i = 0; i < clusters_num; i++) {
561 offset = i * HPRE_CLSTR_ADDR_INTRVL;
562
563 /* clusters initiating */
564 writel(cluster_core_mask,
565 qm->io_base + offset + HPRE_CORE_ENB);
566 writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG);
567 ret = readl_relaxed_poll_timeout(qm->io_base + offset +
568 HPRE_CORE_INI_STATUS, val,
569 ((val & cluster_core_mask) ==
570 cluster_core_mask),
571 HPRE_REG_RD_INTVRL_US,
572 HPRE_REG_RD_TMOUT_US);
573 if (ret) {
574 dev_err(dev,
575 "cluster %d int st status timeout!\n", i);
576 return -ETIMEDOUT;
577 }
578 }
579
580 return 0;
581 }
582
583 /*
584 * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV).
585 * Or it may stay in D3 state when we bind and unbind hpre quickly,
586 * as it does FLR triggered by hardware.
587 */
disable_flr_of_bme(struct hisi_qm * qm)588 static void disable_flr_of_bme(struct hisi_qm *qm)
589 {
590 u32 val;
591
592 val = readl(qm->io_base + QM_PEH_AXUSER_CFG);
593 val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR);
594 val |= HPRE_QM_PM_FLR;
595 writel(val, qm->io_base + QM_PEH_AXUSER_CFG);
596 writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE);
597 }
598
hpre_close_sva_prefetch(struct hisi_qm * qm)599 static void hpre_close_sva_prefetch(struct hisi_qm *qm)
600 {
601 u32 val;
602 int ret;
603
604 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
605 return;
606
607 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
608 val |= HPRE_PREFETCH_DISABLE;
609 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
610
611 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX,
612 val, !(val & HPRE_SVA_DISABLE_READY),
613 HPRE_REG_RD_INTVRL_US,
614 HPRE_REG_RD_TMOUT_US);
615 if (ret)
616 pci_err(qm->pdev, "failed to close sva prefetch\n");
617
618 (void)hpre_wait_sva_ready(qm);
619 }
620
hpre_open_sva_prefetch(struct hisi_qm * qm)621 static void hpre_open_sva_prefetch(struct hisi_qm *qm)
622 {
623 u32 val;
624 int ret;
625
626 if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps))
627 return;
628
629 /* Enable prefetch */
630 val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG);
631 val &= HPRE_PREFETCH_ENABLE;
632 writel(val, qm->io_base + HPRE_PREFETCH_CFG);
633
634 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG,
635 val, !(val & HPRE_PREFETCH_DISABLE),
636 HPRE_REG_RD_INTVRL_US,
637 HPRE_REG_RD_TMOUT_US);
638 if (ret) {
639 pci_err(qm->pdev, "failed to open sva prefetch\n");
640 hpre_close_sva_prefetch(qm);
641 return;
642 }
643
644 ret = hpre_wait_sva_ready(qm);
645 if (ret)
646 hpre_close_sva_prefetch(qm);
647 }
648
hpre_enable_clock_gate(struct hisi_qm * qm)649 static void hpre_enable_clock_gate(struct hisi_qm *qm)
650 {
651 unsigned long offset;
652 u8 clusters_num, i;
653 u32 hpre_core_info;
654 u32 val;
655
656 if (qm->ver < QM_HW_V3)
657 return;
658
659 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
660 val |= HPRE_CLKGATE_CTL_EN;
661 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
662
663 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
664 val |= HPRE_PEH_CFG_AUTO_GATE_EN;
665 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
666
667 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
668 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
669 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
670 for (i = 0; i < clusters_num; i++) {
671 offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
672 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
673 val |= HPRE_CLUSTER_DYN_CTL_EN;
674 writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
675
676 val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
677 val |= HPRE_CORE_GATE_EN;
678 writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
679 }
680 }
681
hpre_disable_clock_gate(struct hisi_qm * qm)682 static void hpre_disable_clock_gate(struct hisi_qm *qm)
683 {
684 unsigned long offset;
685 u8 clusters_num, i;
686 u32 hpre_core_info;
687 u32 val;
688
689 if (qm->ver < QM_HW_V3)
690 return;
691
692 val = readl(qm->io_base + HPRE_CLKGATE_CTL);
693 val &= ~HPRE_CLKGATE_CTL_EN;
694 writel(val, qm->io_base + HPRE_CLKGATE_CTL);
695
696 val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
697 val &= ~HPRE_PEH_CFG_AUTO_GATE_EN;
698 writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE);
699
700 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
701 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
702 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
703 for (i = 0; i < clusters_num; i++) {
704 offset = (unsigned long)i * HPRE_CLSTR_ADDR_INTRVL;
705 val = readl(qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
706 val &= ~HPRE_CLUSTER_DYN_CTL_EN;
707 writel(val, qm->io_base + offset + HPRE_CLUSTER_DYN_CTL);
708
709 val = readl(qm->io_base + offset + HPRE_CORE_SHB_CFG);
710 val &= ~HPRE_CORE_GATE_EN;
711 writel(val, qm->io_base + offset + HPRE_CORE_SHB_CFG);
712 }
713 }
714
hpre_set_user_domain_and_cache(struct hisi_qm * qm)715 static int hpre_set_user_domain_and_cache(struct hisi_qm *qm)
716 {
717 struct device *dev = &qm->pdev->dev;
718 u32 val;
719 int ret;
720
721 /* disabel dynamic clock gate before sram init */
722 hpre_disable_clock_gate(qm);
723
724 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE);
725 writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE);
726 writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG);
727
728 if (qm->ver >= QM_HW_V3)
729 writel(HPRE_RSA_ENB | HPRE_ECC_ENB,
730 qm->io_base + HPRE_TYPES_ENB);
731 else
732 writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB);
733
734 writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE);
735 writel(0x0, qm->io_base + HPRE_BD_ENDIAN);
736 writel(0x0, qm->io_base + HPRE_POISON_BYPASS);
737 writel(0x0, qm->io_base + HPRE_ECC_BYPASS);
738
739 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG);
740 writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG);
741 writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG);
742 ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val,
743 val & BIT(0),
744 HPRE_REG_RD_INTVRL_US,
745 HPRE_REG_RD_TMOUT_US);
746 if (ret) {
747 dev_err(dev, "read rd channel timeout fail!\n");
748 return -ETIMEDOUT;
749 }
750
751 ret = hpre_set_cluster(qm);
752 if (ret)
753 return -ETIMEDOUT;
754
755 /* This setting is only needed by Kunpeng 920. */
756 if (qm->ver == QM_HW_V2) {
757 ret = hpre_cfg_by_dsm(qm);
758 if (ret)
759 return ret;
760
761 disable_flr_of_bme(qm);
762 }
763
764 /* Config data buffer pasid needed by Kunpeng 920 */
765 hpre_config_pasid(qm);
766 hpre_open_sva_prefetch(qm);
767
768 hpre_enable_clock_gate(qm);
769
770 return ret;
771 }
772
hpre_cnt_regs_clear(struct hisi_qm * qm)773 static void hpre_cnt_regs_clear(struct hisi_qm *qm)
774 {
775 unsigned long offset;
776 u32 hpre_core_info;
777 u8 clusters_num;
778 int i;
779
780 /* clear clusterX/cluster_ctrl */
781 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
782 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
783 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
784 for (i = 0; i < clusters_num; i++) {
785 offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL;
786 writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY);
787 }
788
789 /* clear rdclr_en */
790 writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
791
792 hisi_qm_debug_regs_clear(qm);
793 }
794
hpre_master_ooo_ctrl(struct hisi_qm * qm,bool enable)795 static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
796 {
797 u32 val1, val2;
798
799 val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
800 if (enable) {
801 val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
802 val2 = qm->err_info.dev_err.shutdown_mask;
803 } else {
804 val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
805 val2 = 0x0;
806 }
807
808 if (qm->ver > QM_HW_V2)
809 writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
810
811 writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
812 }
813
hpre_hw_error_disable(struct hisi_qm * qm)814 static void hpre_hw_error_disable(struct hisi_qm *qm)
815 {
816 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
817 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
818
819 /* disable hpre hw error interrupts */
820 writel(err_mask, qm->io_base + HPRE_INT_MASK);
821 /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
822 hpre_master_ooo_ctrl(qm, false);
823 }
824
hpre_hw_error_enable(struct hisi_qm * qm)825 static void hpre_hw_error_enable(struct hisi_qm *qm)
826 {
827 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
828 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
829
830 /* clear HPRE hw error source if having */
831 writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT);
832
833 /* configure error type */
834 writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB);
835 writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB);
836 writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB);
837
838 /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
839 hpre_master_ooo_ctrl(qm, true);
840
841 /* enable hpre hw error interrupts */
842 writel(~err_mask, qm->io_base + HPRE_INT_MASK);
843 }
844
hpre_file_to_qm(struct hpre_debugfs_file * file)845 static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
846 {
847 struct hpre *hpre = container_of(file->debug, struct hpre, debug);
848
849 return &hpre->qm;
850 }
851
hpre_clear_enable_read(struct hpre_debugfs_file * file)852 static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file)
853 {
854 struct hisi_qm *qm = hpre_file_to_qm(file);
855
856 return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
857 HPRE_CTRL_CNT_CLR_CE_BIT;
858 }
859
hpre_clear_enable_write(struct hpre_debugfs_file * file,u32 val)860 static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val)
861 {
862 struct hisi_qm *qm = hpre_file_to_qm(file);
863 u32 tmp;
864
865 if (val != 1 && val != 0)
866 return -EINVAL;
867
868 tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) &
869 ~HPRE_CTRL_CNT_CLR_CE_BIT) | val;
870 writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE);
871
872 return 0;
873 }
874
hpre_cluster_inqry_read(struct hpre_debugfs_file * file)875 static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file)
876 {
877 struct hisi_qm *qm = hpre_file_to_qm(file);
878 int cluster_index = file->index - HPRE_CLUSTER_CTRL;
879 unsigned long offset = HPRE_CLSTR_BASE +
880 cluster_index * HPRE_CLSTR_ADDR_INTRVL;
881
882 return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT);
883 }
884
hpre_cluster_inqry_write(struct hpre_debugfs_file * file,u32 val)885 static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val)
886 {
887 struct hisi_qm *qm = hpre_file_to_qm(file);
888 int cluster_index = file->index - HPRE_CLUSTER_CTRL;
889 unsigned long offset = HPRE_CLSTR_BASE + cluster_index *
890 HPRE_CLSTR_ADDR_INTRVL;
891
892 writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY);
893 }
894
hpre_ctrl_debug_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)895 static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf,
896 size_t count, loff_t *pos)
897 {
898 struct hpre_debugfs_file *file = filp->private_data;
899 struct hisi_qm *qm = hpre_file_to_qm(file);
900 char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
901 u32 val;
902 int ret;
903
904 ret = hisi_qm_get_dfx_access(qm);
905 if (ret)
906 return ret;
907
908 spin_lock_irq(&file->lock);
909 switch (file->type) {
910 case HPRE_CLEAR_ENABLE:
911 val = hpre_clear_enable_read(file);
912 break;
913 case HPRE_CLUSTER_CTRL:
914 val = hpre_cluster_inqry_read(file);
915 break;
916 default:
917 goto err_input;
918 }
919 spin_unlock_irq(&file->lock);
920
921 hisi_qm_put_dfx_access(qm);
922 ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val);
923 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
924
925 err_input:
926 spin_unlock_irq(&file->lock);
927 hisi_qm_put_dfx_access(qm);
928 return -EINVAL;
929 }
930
hpre_ctrl_debug_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)931 static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf,
932 size_t count, loff_t *pos)
933 {
934 struct hpre_debugfs_file *file = filp->private_data;
935 struct hisi_qm *qm = hpre_file_to_qm(file);
936 char tbuf[HPRE_DBGFS_VAL_MAX_LEN];
937 unsigned long val;
938 int len, ret;
939
940 if (*pos != 0)
941 return 0;
942
943 if (count >= HPRE_DBGFS_VAL_MAX_LEN)
944 return -ENOSPC;
945
946 len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1,
947 pos, buf, count);
948 if (len < 0)
949 return len;
950
951 tbuf[len] = '\0';
952 if (kstrtoul(tbuf, 0, &val))
953 return -EFAULT;
954
955 ret = hisi_qm_get_dfx_access(qm);
956 if (ret)
957 return ret;
958
959 spin_lock_irq(&file->lock);
960 switch (file->type) {
961 case HPRE_CLEAR_ENABLE:
962 ret = hpre_clear_enable_write(file, val);
963 if (ret)
964 goto err_input;
965 break;
966 case HPRE_CLUSTER_CTRL:
967 hpre_cluster_inqry_write(file, val);
968 break;
969 default:
970 ret = -EINVAL;
971 goto err_input;
972 }
973
974 ret = count;
975
976 err_input:
977 spin_unlock_irq(&file->lock);
978 hisi_qm_put_dfx_access(qm);
979 return ret;
980 }
981
982 static const struct file_operations hpre_ctrl_debug_fops = {
983 .owner = THIS_MODULE,
984 .open = simple_open,
985 .read = hpre_ctrl_debug_read,
986 .write = hpre_ctrl_debug_write,
987 };
988
hpre_debugfs_atomic64_get(void * data,u64 * val)989 static int hpre_debugfs_atomic64_get(void *data, u64 *val)
990 {
991 struct hpre_dfx *dfx_item = data;
992
993 *val = atomic64_read(&dfx_item->value);
994
995 return 0;
996 }
997
hpre_debugfs_atomic64_set(void * data,u64 val)998 static int hpre_debugfs_atomic64_set(void *data, u64 val)
999 {
1000 struct hpre_dfx *dfx_item = data;
1001 struct hpre_dfx *hpre_dfx = NULL;
1002
1003 if (dfx_item->type == HPRE_OVERTIME_THRHLD) {
1004 hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD;
1005 atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0);
1006 } else if (val) {
1007 return -EINVAL;
1008 }
1009
1010 atomic64_set(&dfx_item->value, val);
1011
1012 return 0;
1013 }
1014
1015 DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get,
1016 hpre_debugfs_atomic64_set, "%llu\n");
1017
hpre_create_debugfs_file(struct hisi_qm * qm,struct dentry * dir,enum hpre_ctrl_dbgfs_file type,int indx)1018 static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
1019 enum hpre_ctrl_dbgfs_file type, int indx)
1020 {
1021 struct hpre *hpre = container_of(qm, struct hpre, qm);
1022 struct hpre_debug *dbg = &hpre->debug;
1023 struct dentry *file_dir;
1024
1025 if (dir)
1026 file_dir = dir;
1027 else
1028 file_dir = qm->debug.debug_root;
1029
1030 if (type >= HPRE_DEBUG_FILE_NUM)
1031 return -EINVAL;
1032
1033 spin_lock_init(&dbg->files[indx].lock);
1034 dbg->files[indx].debug = dbg;
1035 dbg->files[indx].type = type;
1036 dbg->files[indx].index = indx;
1037 debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir,
1038 dbg->files + indx, &hpre_ctrl_debug_fops);
1039
1040 return 0;
1041 }
1042
hpre_pf_comm_regs_debugfs_init(struct hisi_qm * qm)1043 static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm)
1044 {
1045 struct device *dev = &qm->pdev->dev;
1046 struct debugfs_regset32 *regset;
1047
1048 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
1049 if (!regset)
1050 return -ENOMEM;
1051
1052 regset->regs = hpre_com_dfx_regs;
1053 regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs);
1054 regset->base = qm->io_base;
1055 regset->dev = dev;
1056
1057 debugfs_create_file("regs", 0444, qm->debug.debug_root,
1058 regset, &hpre_com_regs_fops);
1059
1060 return 0;
1061 }
1062
hpre_cluster_debugfs_init(struct hisi_qm * qm)1063 static int hpre_cluster_debugfs_init(struct hisi_qm *qm)
1064 {
1065 struct device *dev = &qm->pdev->dev;
1066 char buf[HPRE_DBGFS_VAL_MAX_LEN];
1067 struct debugfs_regset32 *regset;
1068 struct dentry *tmp_d;
1069 u32 hpre_core_info;
1070 u8 clusters_num;
1071 int i, ret;
1072
1073 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
1074 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
1075 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
1076 for (i = 0; i < clusters_num; i++) {
1077 ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i);
1078 if (ret >= HPRE_DBGFS_VAL_MAX_LEN)
1079 return -EINVAL;
1080 tmp_d = debugfs_create_dir(buf, qm->debug.debug_root);
1081
1082 regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
1083 if (!regset)
1084 return -ENOMEM;
1085
1086 regset->regs = hpre_cluster_dfx_regs;
1087 regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs);
1088 regset->base = qm->io_base + hpre_cluster_offsets[i];
1089 regset->dev = dev;
1090
1091 debugfs_create_file("regs", 0444, tmp_d, regset,
1092 &hpre_cluster_regs_fops);
1093 ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL,
1094 i + HPRE_CLUSTER_CTRL);
1095 if (ret)
1096 return ret;
1097 }
1098
1099 return 0;
1100 }
1101
hpre_ctrl_debug_init(struct hisi_qm * qm)1102 static int hpre_ctrl_debug_init(struct hisi_qm *qm)
1103 {
1104 int ret;
1105
1106 ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE,
1107 HPRE_CLEAR_ENABLE);
1108 if (ret)
1109 return ret;
1110
1111 ret = hpre_pf_comm_regs_debugfs_init(qm);
1112 if (ret)
1113 return ret;
1114
1115 return hpre_cluster_debugfs_init(qm);
1116 }
1117
hpre_cap_regs_show(struct seq_file * s,void * unused)1118 static int hpre_cap_regs_show(struct seq_file *s, void *unused)
1119 {
1120 struct hisi_qm *qm = s->private;
1121 u32 i, size;
1122
1123 size = qm->cap_tables.qm_cap_size;
1124 for (i = 0; i < size; i++)
1125 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.qm_cap_table[i].name,
1126 qm->cap_tables.qm_cap_table[i].cap_val);
1127
1128 size = qm->cap_tables.dev_cap_size;
1129 for (i = 0; i < size; i++)
1130 seq_printf(s, "%s= 0x%08x\n", qm->cap_tables.dev_cap_table[i].name,
1131 qm->cap_tables.dev_cap_table[i].cap_val);
1132
1133 return 0;
1134 }
1135
1136 DEFINE_SHOW_ATTRIBUTE(hpre_cap_regs);
1137
hpre_dfx_debug_init(struct hisi_qm * qm)1138 static void hpre_dfx_debug_init(struct hisi_qm *qm)
1139 {
1140 struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs;
1141 struct hpre *hpre = container_of(qm, struct hpre, qm);
1142 struct hpre_dfx *dfx = hpre->debug.dfx;
1143 struct dentry *parent;
1144 int i;
1145
1146 parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root);
1147 for (i = 0; i < HPRE_DFX_FILE_NUM; i++) {
1148 dfx[i].type = i;
1149 debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i],
1150 &hpre_atomic64_ops);
1151 }
1152
1153 if (qm->fun_type == QM_HW_PF && hpre_regs)
1154 debugfs_create_file("diff_regs", 0444, parent,
1155 qm, &hpre_diff_regs_fops);
1156
1157 debugfs_create_file("cap_regs", CAP_FILE_PERMISSION,
1158 qm->debug.debug_root, qm, &hpre_cap_regs_fops);
1159 }
1160
hpre_debugfs_init(struct hisi_qm * qm)1161 static int hpre_debugfs_init(struct hisi_qm *qm)
1162 {
1163 struct device *dev = &qm->pdev->dev;
1164 int ret;
1165
1166 ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs));
1167 if (ret) {
1168 dev_warn(dev, "Failed to init HPRE diff regs!\n");
1169 return ret;
1170 }
1171
1172 qm->debug.debug_root = debugfs_create_dir(dev_name(dev),
1173 hpre_debugfs_root);
1174 qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET;
1175 qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN;
1176
1177 hisi_qm_debug_init(qm);
1178
1179 if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) {
1180 ret = hpre_ctrl_debug_init(qm);
1181 if (ret)
1182 goto debugfs_remove;
1183 }
1184
1185 hpre_dfx_debug_init(qm);
1186
1187 return 0;
1188
1189 debugfs_remove:
1190 debugfs_remove_recursive(qm->debug.debug_root);
1191 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1192 return ret;
1193 }
1194
hpre_debugfs_exit(struct hisi_qm * qm)1195 static void hpre_debugfs_exit(struct hisi_qm *qm)
1196 {
1197 debugfs_remove_recursive(qm->debug.debug_root);
1198
1199 hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs));
1200 }
1201
hpre_pre_store_cap_reg(struct hisi_qm * qm)1202 static int hpre_pre_store_cap_reg(struct hisi_qm *qm)
1203 {
1204 struct hisi_qm_cap_record *hpre_cap;
1205 struct device *dev = &qm->pdev->dev;
1206 u32 hpre_core_info;
1207 u8 clusters_num;
1208 size_t i, size;
1209
1210 size = ARRAY_SIZE(hpre_cap_query_info);
1211 hpre_cap = devm_kcalloc(dev, size, sizeof(*hpre_cap), GFP_KERNEL);
1212 if (!hpre_cap)
1213 return -ENOMEM;
1214
1215 for (i = 0; i < size; i++) {
1216 hpre_cap[i].type = hpre_cap_query_info[i].type;
1217 hpre_cap[i].name = hpre_cap_query_info[i].name;
1218 hpre_cap[i].cap_val = hisi_qm_get_cap_value(qm, hpre_cap_query_info,
1219 i, qm->cap_ver);
1220 }
1221
1222 hpre_core_info = hpre_cap[HPRE_CORE_INFO].cap_val;
1223 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
1224 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
1225 if (clusters_num > HPRE_CLUSTERS_NUM_MAX) {
1226 dev_err(dev, "Device cluster num %u is out of range for driver supports %d!\n",
1227 clusters_num, HPRE_CLUSTERS_NUM_MAX);
1228 return -EINVAL;
1229 }
1230
1231 qm->cap_tables.dev_cap_table = hpre_cap;
1232 qm->cap_tables.dev_cap_size = size;
1233
1234 return 0;
1235 }
1236
hpre_qm_init(struct hisi_qm * qm,struct pci_dev * pdev)1237 static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev)
1238 {
1239 u64 alg_msk;
1240 int ret;
1241
1242 if (pdev->revision == QM_HW_V1) {
1243 pci_warn(pdev, "HPRE version 1 is not supported!\n");
1244 return -EINVAL;
1245 }
1246
1247 qm->mode = uacce_mode;
1248 qm->pdev = pdev;
1249 qm->sqe_size = HPRE_SQE_SIZE;
1250 qm->dev_name = hpre_name;
1251
1252 qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ?
1253 QM_HW_PF : QM_HW_VF;
1254 if (qm->fun_type == QM_HW_PF) {
1255 qm->qp_base = HPRE_PF_DEF_Q_BASE;
1256 qm->qp_num = pf_q_num;
1257 qm->debug.curr_qm_qp_num = pf_q_num;
1258 qm->qm_list = &hpre_devices;
1259 qm->err_ini = &hpre_err_ini;
1260 if (pf_q_num_flag)
1261 set_bit(QM_MODULE_PARAM, &qm->misc_ctl);
1262 }
1263
1264 ret = hisi_qm_init(qm);
1265 if (ret) {
1266 pci_err(pdev, "Failed to init hpre qm configures!\n");
1267 return ret;
1268 }
1269
1270 /* Fetch and save the value of capability registers */
1271 ret = hpre_pre_store_cap_reg(qm);
1272 if (ret) {
1273 pci_err(pdev, "Failed to pre-store capability registers!\n");
1274 hisi_qm_uninit(qm);
1275 return ret;
1276 }
1277
1278 alg_msk = qm->cap_tables.dev_cap_table[HPRE_ALG_BITMAP].cap_val;
1279 ret = hisi_qm_set_algs(qm, alg_msk, hpre_dev_algs, ARRAY_SIZE(hpre_dev_algs));
1280 if (ret) {
1281 pci_err(pdev, "Failed to set hpre algs!\n");
1282 hisi_qm_uninit(qm);
1283 }
1284
1285 return ret;
1286 }
1287
hpre_show_last_regs_init(struct hisi_qm * qm)1288 static int hpre_show_last_regs_init(struct hisi_qm *qm)
1289 {
1290 int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
1291 int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
1292 struct qm_debug *debug = &qm->debug;
1293 void __iomem *io_base;
1294 u32 hpre_core_info;
1295 u8 clusters_num;
1296 int i, j, idx;
1297
1298 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
1299 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
1300 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
1301 debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num +
1302 com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL);
1303 if (!debug->last_words)
1304 return -ENOMEM;
1305
1306 for (i = 0; i < com_dfx_regs_num; i++)
1307 debug->last_words[i] = readl_relaxed(qm->io_base +
1308 hpre_com_dfx_regs[i].offset);
1309
1310 for (i = 0; i < clusters_num; i++) {
1311 io_base = qm->io_base + hpre_cluster_offsets[i];
1312 for (j = 0; j < cluster_dfx_regs_num; j++) {
1313 idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
1314 debug->last_words[idx] = readl_relaxed(
1315 io_base + hpre_cluster_dfx_regs[j].offset);
1316 }
1317 }
1318
1319 return 0;
1320 }
1321
hpre_show_last_regs_uninit(struct hisi_qm * qm)1322 static void hpre_show_last_regs_uninit(struct hisi_qm *qm)
1323 {
1324 struct qm_debug *debug = &qm->debug;
1325
1326 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1327 return;
1328
1329 kfree(debug->last_words);
1330 debug->last_words = NULL;
1331 }
1332
hpre_show_last_dfx_regs(struct hisi_qm * qm)1333 static void hpre_show_last_dfx_regs(struct hisi_qm *qm)
1334 {
1335 int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs);
1336 int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs);
1337 struct qm_debug *debug = &qm->debug;
1338 struct pci_dev *pdev = qm->pdev;
1339 void __iomem *io_base;
1340 u32 hpre_core_info;
1341 u8 clusters_num;
1342 int i, j, idx;
1343 u32 val;
1344
1345 if (qm->fun_type == QM_HW_VF || !debug->last_words)
1346 return;
1347
1348 /* dumps last word of the debugging registers during controller reset */
1349 for (i = 0; i < com_dfx_regs_num; i++) {
1350 val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset);
1351 if (debug->last_words[i] != val)
1352 pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n",
1353 hpre_com_dfx_regs[i].name, debug->last_words[i], val);
1354 }
1355
1356 hpre_core_info = qm->cap_tables.dev_cap_table[HPRE_CORE_INFO].cap_val;
1357 clusters_num = (hpre_core_info >> hpre_basic_info[HPRE_CLUSTER_NUM_CAP].shift) &
1358 hpre_basic_info[HPRE_CLUSTER_NUM_CAP].mask;
1359 for (i = 0; i < clusters_num; i++) {
1360 io_base = qm->io_base + hpre_cluster_offsets[i];
1361 for (j = 0; j < cluster_dfx_regs_num; j++) {
1362 val = readl_relaxed(io_base +
1363 hpre_cluster_dfx_regs[j].offset);
1364 idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j;
1365 if (debug->last_words[idx] != val)
1366 pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n",
1367 i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val);
1368 }
1369 }
1370 }
1371
hpre_log_hw_error(struct hisi_qm * qm,u32 err_sts)1372 static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts)
1373 {
1374 const struct hpre_hw_error *err = hpre_hw_errors;
1375 struct device *dev = &qm->pdev->dev;
1376
1377 while (err->msg) {
1378 if (err->int_msk & err_sts)
1379 dev_warn(dev, "%s [error status=0x%x] found\n",
1380 err->msg, err->int_msk);
1381 err++;
1382 }
1383 }
1384
hpre_get_hw_err_status(struct hisi_qm * qm)1385 static u32 hpre_get_hw_err_status(struct hisi_qm *qm)
1386 {
1387 return readl(qm->io_base + HPRE_INT_STATUS);
1388 }
1389
hpre_clear_hw_err_status(struct hisi_qm * qm,u32 err_sts)1390 static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
1391 {
1392 writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT);
1393 }
1394
hpre_disable_error_report(struct hisi_qm * qm,u32 err_type)1395 static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
1396 {
1397 u32 nfe_mask = qm->err_info.dev_err.nfe;
1398
1399 writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
1400 }
1401
hpre_enable_error_report(struct hisi_qm * qm)1402 static void hpre_enable_error_report(struct hisi_qm *qm)
1403 {
1404 u32 nfe_mask = qm->err_info.dev_err.nfe;
1405 u32 ce_mask = qm->err_info.dev_err.ce;
1406
1407 writel(nfe_mask, qm->io_base + HPRE_RAS_NFE_ENB);
1408 writel(ce_mask, qm->io_base + HPRE_RAS_CE_ENB);
1409 }
1410
hpre_open_axi_master_ooo(struct hisi_qm * qm)1411 static void hpre_open_axi_master_ooo(struct hisi_qm *qm)
1412 {
1413 u32 value;
1414
1415 value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1416 writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE,
1417 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1418 writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE,
1419 qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
1420 }
1421
hpre_get_err_result(struct hisi_qm * qm)1422 static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
1423 {
1424 u32 err_status;
1425
1426 err_status = hpre_get_hw_err_status(qm);
1427 if (err_status) {
1428 if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
1429 qm->err_status.is_dev_ecc_mbit = true;
1430 hpre_log_hw_error(qm, err_status);
1431
1432 if (err_status & qm->err_info.dev_err.reset_mask) {
1433 /* Disable the same error reporting until device is recovered. */
1434 hpre_disable_error_report(qm, err_status);
1435 return ACC_ERR_NEED_RESET;
1436 }
1437 hpre_clear_hw_err_status(qm, err_status);
1438 /* Avoid firmware disable error report, re-enable. */
1439 hpre_enable_error_report(qm);
1440 }
1441
1442 return ACC_ERR_RECOVERED;
1443 }
1444
hpre_dev_is_abnormal(struct hisi_qm * qm)1445 static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
1446 {
1447 u32 err_status;
1448
1449 err_status = hpre_get_hw_err_status(qm);
1450 if (err_status & qm->err_info.dev_err.shutdown_mask)
1451 return true;
1452
1453 return false;
1454 }
1455
hpre_disable_axi_error(struct hisi_qm * qm)1456 static void hpre_disable_axi_error(struct hisi_qm *qm)
1457 {
1458 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
1459 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
1460 u32 val;
1461
1462 val = ~(err_mask & (~HPRE_AXI_ERROR_MASK));
1463 writel(val, qm->io_base + HPRE_INT_MASK);
1464
1465 if (qm->ver > QM_HW_V2)
1466 writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK),
1467 qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
1468 }
1469
hpre_enable_axi_error(struct hisi_qm * qm)1470 static void hpre_enable_axi_error(struct hisi_qm *qm)
1471 {
1472 struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
1473 u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
1474
1475 /* clear axi error source */
1476 writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT);
1477
1478 writel(~err_mask, qm->io_base + HPRE_INT_MASK);
1479
1480 if (qm->ver > QM_HW_V2)
1481 writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
1482 }
1483
hpre_err_info_init(struct hisi_qm * qm)1484 static void hpre_err_info_init(struct hisi_qm *qm)
1485 {
1486 struct hisi_qm_err_info *err_info = &qm->err_info;
1487 struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
1488 struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
1489
1490 qm_err->fe = HPRE_HAC_RAS_FE_ENABLE;
1491 qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
1492 qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
1493 qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1494 HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1495 qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1496 HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
1497 qm_err->ecc_2bits_mask = QM_ECC_MBIT;
1498
1499 dev_err->fe = HPRE_HAC_RAS_FE_ENABLE;
1500 dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
1501 dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
1502 dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1503 HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1504 dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1505 HPRE_RESET_MASK_CAP, qm->cap_ver);
1506 dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
1507
1508 err_info->msi_wr_port = HPRE_WR_MSI_PORT;
1509 err_info->acpi_rst = "HRST";
1510 }
1511
1512 static const struct hisi_qm_err_ini hpre_err_ini = {
1513 .hw_init = hpre_set_user_domain_and_cache,
1514 .hw_err_enable = hpre_hw_error_enable,
1515 .hw_err_disable = hpre_hw_error_disable,
1516 .get_dev_hw_err_status = hpre_get_hw_err_status,
1517 .clear_dev_hw_err_status = hpre_clear_hw_err_status,
1518 .open_axi_master_ooo = hpre_open_axi_master_ooo,
1519 .open_sva_prefetch = hpre_open_sva_prefetch,
1520 .close_sva_prefetch = hpre_close_sva_prefetch,
1521 .show_last_dfx_regs = hpre_show_last_dfx_regs,
1522 .err_info_init = hpre_err_info_init,
1523 .get_err_result = hpre_get_err_result,
1524 .dev_is_abnormal = hpre_dev_is_abnormal,
1525 .disable_axi_error = hpre_disable_axi_error,
1526 .enable_axi_error = hpre_enable_axi_error,
1527 };
1528
hpre_pf_probe_init(struct hpre * hpre)1529 static int hpre_pf_probe_init(struct hpre *hpre)
1530 {
1531 struct hisi_qm *qm = &hpre->qm;
1532 int ret;
1533
1534 ret = hpre_set_user_domain_and_cache(qm);
1535 if (ret)
1536 return ret;
1537
1538 hisi_qm_dev_err_init(qm);
1539 ret = hpre_show_last_regs_init(qm);
1540 if (ret)
1541 pci_err(qm->pdev, "Failed to init last word regs!\n");
1542
1543 return ret;
1544 }
1545
hpre_probe_init(struct hpre * hpre)1546 static int hpre_probe_init(struct hpre *hpre)
1547 {
1548 u32 type_rate = HPRE_SHAPER_TYPE_RATE;
1549 struct hisi_qm *qm = &hpre->qm;
1550 int ret;
1551
1552 if (qm->fun_type == QM_HW_PF) {
1553 ret = hpre_pf_probe_init(hpre);
1554 if (ret)
1555 return ret;
1556 /* Enable shaper type 0 */
1557 if (qm->ver >= QM_HW_V3) {
1558 type_rate |= QM_SHAPER_ENABLE;
1559 qm->type_rate = type_rate;
1560 }
1561 }
1562
1563 return 0;
1564 }
1565
hpre_probe_uninit(struct hisi_qm * qm)1566 static void hpre_probe_uninit(struct hisi_qm *qm)
1567 {
1568 if (qm->fun_type == QM_HW_VF)
1569 return;
1570
1571 hpre_cnt_regs_clear(qm);
1572 qm->debug.curr_qm_qp_num = 0;
1573 hpre_show_last_regs_uninit(qm);
1574 hpre_close_sva_prefetch(qm);
1575 hisi_qm_dev_err_uninit(qm);
1576 }
1577
hpre_probe(struct pci_dev * pdev,const struct pci_device_id * id)1578 static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1579 {
1580 struct hisi_qm *qm;
1581 struct hpre *hpre;
1582 int ret;
1583
1584 hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL);
1585 if (!hpre)
1586 return -ENOMEM;
1587
1588 qm = &hpre->qm;
1589 ret = hpre_qm_init(qm, pdev);
1590 if (ret) {
1591 pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret);
1592 return ret;
1593 }
1594
1595 ret = hpre_probe_init(hpre);
1596 if (ret) {
1597 pci_err(pdev, "Failed to probe (%d)!\n", ret);
1598 goto err_with_qm_init;
1599 }
1600
1601 ret = hisi_qm_start(qm);
1602 if (ret)
1603 goto err_with_probe_init;
1604
1605 ret = hpre_debugfs_init(qm);
1606 if (ret)
1607 dev_warn(&pdev->dev, "init debugfs fail!\n");
1608
1609 hisi_qm_add_list(qm, &hpre_devices);
1610 ret = hisi_qm_alg_register(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
1611 if (ret < 0) {
1612 pci_err(pdev, "fail to register algs to crypto!\n");
1613 goto err_qm_del_list;
1614 }
1615
1616 if (qm->uacce) {
1617 ret = uacce_register(qm->uacce);
1618 if (ret) {
1619 pci_err(pdev, "failed to register uacce (%d)!\n", ret);
1620 goto err_with_alg_register;
1621 }
1622 }
1623
1624 if (qm->fun_type == QM_HW_PF && vfs_num) {
1625 ret = hisi_qm_sriov_enable(pdev, vfs_num);
1626 if (ret < 0)
1627 goto err_with_alg_register;
1628 }
1629
1630 hisi_qm_pm_init(qm);
1631
1632 return 0;
1633
1634 err_with_alg_register:
1635 hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
1636
1637 err_qm_del_list:
1638 hisi_qm_del_list(qm, &hpre_devices);
1639 hpre_debugfs_exit(qm);
1640 hisi_qm_stop(qm, QM_NORMAL);
1641
1642 err_with_probe_init:
1643 hpre_probe_uninit(qm);
1644
1645 err_with_qm_init:
1646 hisi_qm_uninit(qm);
1647
1648 return ret;
1649 }
1650
hpre_remove(struct pci_dev * pdev)1651 static void hpre_remove(struct pci_dev *pdev)
1652 {
1653 struct hisi_qm *qm = pci_get_drvdata(pdev);
1654
1655 hisi_qm_pm_uninit(qm);
1656 hisi_qm_wait_task_finish(qm, &hpre_devices);
1657 hisi_qm_alg_unregister(qm, &hpre_devices, HPRE_CTX_Q_NUM_DEF);
1658 hisi_qm_del_list(qm, &hpre_devices);
1659 if (qm->fun_type == QM_HW_PF && qm->vfs_num)
1660 hisi_qm_sriov_disable(pdev, true);
1661
1662 hpre_debugfs_exit(qm);
1663 hisi_qm_stop(qm, QM_NORMAL);
1664
1665 hpre_probe_uninit(qm);
1666 hisi_qm_uninit(qm);
1667 }
1668
1669 static const struct dev_pm_ops hpre_pm_ops = {
1670 SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL)
1671 };
1672
1673 static const struct pci_error_handlers hpre_err_handler = {
1674 .error_detected = hisi_qm_dev_err_detected,
1675 .slot_reset = hisi_qm_dev_slot_reset,
1676 .reset_prepare = hisi_qm_reset_prepare,
1677 .reset_done = hisi_qm_reset_done,
1678 };
1679
1680 static struct pci_driver hpre_pci_driver = {
1681 .name = hpre_name,
1682 .id_table = hpre_dev_ids,
1683 .probe = hpre_probe,
1684 .remove = hpre_remove,
1685 .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ?
1686 hisi_qm_sriov_configure : NULL,
1687 .err_handler = &hpre_err_handler,
1688 .shutdown = hisi_qm_dev_shutdown,
1689 .driver.pm = &hpre_pm_ops,
1690 };
1691
hisi_hpre_get_pf_driver(void)1692 struct pci_driver *hisi_hpre_get_pf_driver(void)
1693 {
1694 return &hpre_pci_driver;
1695 }
1696 EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver);
1697
hpre_register_debugfs(void)1698 static void hpre_register_debugfs(void)
1699 {
1700 if (!debugfs_initialized())
1701 return;
1702
1703 hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL);
1704 }
1705
hpre_unregister_debugfs(void)1706 static void hpre_unregister_debugfs(void)
1707 {
1708 debugfs_remove_recursive(hpre_debugfs_root);
1709 }
1710
hpre_init(void)1711 static int __init hpre_init(void)
1712 {
1713 int ret;
1714
1715 hisi_qm_init_list(&hpre_devices);
1716 hpre_register_debugfs();
1717
1718 ret = pci_register_driver(&hpre_pci_driver);
1719 if (ret) {
1720 hpre_unregister_debugfs();
1721 pr_err("hpre: can't register hisi hpre driver.\n");
1722 }
1723
1724 return ret;
1725 }
1726
hpre_exit(void)1727 static void __exit hpre_exit(void)
1728 {
1729 pci_unregister_driver(&hpre_pci_driver);
1730 hpre_unregister_debugfs();
1731 }
1732
1733 module_init(hpre_init);
1734 module_exit(hpre_exit);
1735
1736 MODULE_LICENSE("GPL v2");
1737 MODULE_AUTHOR("Zaibo Xu <xuzaibo@huawei.com>");
1738 MODULE_AUTHOR("Meng Yu <yumeng18@huawei.com>");
1739 MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
1740