Lines Matching +full:0 +full:x100008

21 #define QM_VF_AEQ_INT_SOURCE		0x0
22 #define QM_VF_AEQ_INT_MASK 0x4
23 #define QM_VF_EQ_INT_SOURCE 0x8
24 #define QM_VF_EQ_INT_MASK 0xc
26 #define QM_IRQ_VECTOR_MASK GENMASK(15, 0)
27 #define QM_IRQ_TYPE_MASK GENMASK(15, 0)
29 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0)
32 #define QM_MB_PING_ALL_VFS 0xffff
34 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
38 #define QM_SQ_HOP_NUM_SHIFT 0
42 #define QM_SQ_PRIORITY_SHIFT 0
45 #define QM_QC_PASID_ENABLE 0x1
48 #define QM_SQ_TYPE_MASK GENMASK(3, 0)
49 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc).w11) >> 6) & 0x1)
52 #define QM_CQ_HOP_NUM_SHIFT 0
56 #define QM_CQ_PHASE_SHIFT 0
59 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
61 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc).w11) >> 6) & 0x1)
67 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
68 #define QM_EQE_CQN_MASK GENMASK(15, 0)
70 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
72 #define QM_AEQE_TYPE_MASK 0xf
73 #define QM_AEQE_CQN_MASK GENMASK(15, 0)
74 #define QM_CQ_OVERFLOW 0
79 #define QM_XQ_DEPTH_MASK GENMASK(15, 0)
81 #define QM_DOORBELL_CMD_SQ 0
86 #define QM_DOORBELL_BASE_V1 0x340
90 #define QM_PAGE_SIZE 0x0034
91 #define QM_QP_DB_INTERVAL 0x10000
92 #define QM_DB_TIMEOUT_CFG 0x100074
93 #define QM_DB_TIMEOUT_SET 0x1fffff
95 #define QM_MEM_START_INIT 0x100040
96 #define QM_MEM_INIT_DONE 0x100044
97 #define QM_VFT_CFG_RDY 0x10006c
98 #define QM_VFT_CFG_OP_WR 0x100058
99 #define QM_VFT_CFG_TYPE 0x10005c
100 #define QM_VFT_CFG 0x100060
101 #define QM_VFT_CFG_OP_ENABLE 0x100054
102 #define QM_PM_CTRL 0x100148
105 #define QM_VFT_CFG_DATA_L 0x100064
106 #define QM_VFT_CFG_DATA_H 0x100068
119 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
121 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
123 #define QM_ABNORMAL_INT_SOURCE 0x100000
124 #define QM_ABNORMAL_INT_MASK 0x100004
125 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
126 #define QM_ABNORMAL_INT_STATUS 0x100008
127 #define QM_ABNORMAL_INT_SET 0x10000c
128 #define QM_ABNORMAL_INF00 0x100010
129 #define QM_FIFO_OVERFLOW_TYPE 0xc0
131 #define QM_FIFO_OVERFLOW_VF 0x3f
133 #define QM_ABNORMAL_INF01 0x100014
134 #define QM_DB_TIMEOUT_TYPE 0xc0
136 #define QM_DB_TIMEOUT_VF 0x3f
138 #define QM_ABNORMAL_INF02 0x100018
140 #define QM_RAS_CE_ENABLE 0x1000ec
141 #define QM_RAS_FE_ENABLE 0x1000f0
142 #define QM_RAS_NFE_ENABLE 0x1000f4
143 #define QM_RAS_CE_THRESHOLD 0x1000f8
145 #define QM_OOO_SHUTDOWN_SEL 0x1040f8
146 #define QM_AXI_RRESP_ERR BIT(0)
152 #define QM_PEH_VENDOR_ID 0x1000d8
153 #define ACC_VENDOR_ID_VALUE 0x5a5a
154 #define QM_PEH_DFX_INFO0 0x1000fc
155 #define QM_PEH_DFX_INFO1 0x100100
156 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
159 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
160 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
162 #define ACC_MASTER_TRANS_RETURN 0x300150
163 #define ACC_MASTER_GLOBAL_CTRL 0x300000
164 #define ACC_AM_CFG_PORT_WR_EN 0x30001c
166 #define ACC_AM_ROB_ECC_INT_STS 0x300104
171 #define QM_IFC_READY_STATUS 0x100128
172 #define QM_IFC_INT_SET_P 0x100130
173 #define QM_IFC_INT_CFG 0x100134
174 #define QM_IFC_INT_SOURCE_P 0x100138
175 #define QM_IFC_INT_SOURCE_V 0x0020
176 #define QM_IFC_INT_MASK 0x0024
177 #define QM_IFC_INT_STATUS 0x0028
178 #define QM_IFC_INT_SET_V 0x002C
179 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
180 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
181 #define QM_IFC_INT_SOURCE_MASK BIT(0)
182 #define QM_IFC_INT_DISABLE BIT(0)
183 #define QM_IFC_INT_STATUS_MASK BIT(0)
184 #define QM_IFC_INT_SET_MASK BIT(0)
198 #define QM_CACHE_WB_START 0x204
199 #define QM_CACHE_WB_DONE 0x208
200 #define QM_FUNC_CAPS_REG 0x3100
201 #define QM_CAPBILITY_VERSION GENMASK(7, 0)
208 #define QM_PCI_COMMAND_INVALID ~0
219 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
230 #define QM_QOS_TICK 0x300U
231 #define QM_QOS_DIVISOR_CLK 0x1f40U
263 SQC_VFT = 0,
280 QM_PF_FLR_PREPARE = 0x01,
292 QM_TOTAL_QP_NUM_CAP = 0x0,
305 QM_EQ_IRQ_TYPE_CAP_IDX = 0x0,
312 {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0},
313 {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1},
314 {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1},
315 {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1},
316 {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1},
320 {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1},
324 {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0},
328 {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400},
329 {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400},
330 {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800},
331 {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400},
332 {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000},
333 {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001},
334 {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003},
335 {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002},
336 {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4},
337 {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3},
395 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
432 {1100, 100000, 0},
473 int delay = 0; in qm_wait_reset_finish()
482 return 0; in qm_wait_reset_finish()
515 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | in qm_mb_pre_init()
516 (0x1 << QM_MB_BUSY_SHIFT)); in qm_mb_pre_init()
520 mailbox->rsvd = 0; in qm_mb_pre_init()
523 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
530 0x1), POLL_PERIOD, POLL_TIMEOUT); in hisi_qm_wait_mb_ready()
540 unsigned long tmp0 = 0, tmp1 = 0; in qm_mb_write()
550 asm volatile("ldp %0, %1, %3\n" in qm_mb_write()
551 "stp %0, %1, %2\n" in qm_mb_write()
587 return 0; in qm_mb_nolock()
610 /* op 0: set xqc information to hardware, 1: get xqc information from hardware. */
677 u16 randata = 0; in qm_db_v2()
719 writel(0x1, qm->io_base + QM_MEM_START_INIT); in qm_dev_mem_reset()
721 val & BIT(0), POLL_PERIOD, in qm_dev_mem_reset()
730 * @is_read: Whether read from reg, 0: not support read from reg.
773 return 0; in hisi_qm_set_algs()
785 for (i = 0; i < dev_algs_size; i++) in hisi_qm_set_algs()
791 *ptr = '\0'; in hisi_qm_set_algs()
795 return 0; in hisi_qm_set_algs()
813 return 0; in qm_pm_get_sync()
816 if (ret < 0) { in qm_pm_get_sync()
821 return 0; in qm_pm_get_sync()
839 qp->qp_status.cq_head = 0; in qm_cq_head_update()
857 qp->qp_status.cq_head, 0); in qm_poll_req_cb()
876 for (i = eqe_num - 1; i >= 0; i--) { in qm_work_process()
896 u16 cqn, eqe_num = 0; in qm_get_complete_eqe_num()
900 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
917 qm->status.eq_head = 0; in qm_get_complete_eqe_num()
929 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_get_complete_eqe_num()
1049 qm->status.aeq_head = 0; in qm_aeq_thread()
1056 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_aeq_thread()
1065 qp_status->sq_tail = 0; in qm_init_qp_status()
1066 qp_status->cq_head = 0; in qm_init_qp_status()
1068 atomic_set(&qp_status->used, 0); in qm_init_qp_status()
1074 u32 page_type = 0x0; in qm_init_prefetch()
1081 page_type = 0x0; in qm_init_prefetch()
1084 page_type = 0x1; in qm_init_prefetch()
1087 page_type = 0x2; in qm_init_prefetch()
1118 for (i = 0; i < table_size; i++) { in acc_shaper_calc_cbs_s()
1131 for (i = 0; i < table_size; i++) { in acc_shaper_calc_cir_s()
1136 return 0; in acc_shaper_calc_cir_s()
1148 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { in qm_get_shaper_para()
1156 return 0; in qm_get_shaper_para()
1167 u64 tmp = 0; in qm_vft_data_cfg()
1169 if (number > 0) { in qm_vft_data_cfg()
1221 val & BIT(0), POLL_PERIOD, in qm_set_vft_common()
1226 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); in qm_set_vft_common()
1235 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_set_vft_common()
1236 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_set_vft_common()
1239 val & BIT(0), POLL_PERIOD, in qm_set_vft_common()
1261 return 0; in qm_shaper_init_vft()
1283 return 0; in qm_set_sqc_cqc_vft()
1286 qm_set_vft_common(qm, i, fun_num, 0, 0); in qm_set_sqc_cqc_vft()
1296 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); in qm_get_vft_v2()
1306 return 0; in qm_get_vft_v2()
1370 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); in qm_hw_error_uninit_v3()
1380 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { in qm_log_hw_error()
1385 dev_err(dev, "%s [error status=0x%x] found\n", in qm_log_hw_error()
1443 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); in qm_get_mb_cmd()
1503 int cnt = 0; in qm_wait_vf_prepare_finish()
1504 int ret = 0; in qm_wait_vf_prepare_finish()
1509 return 0; in qm_wait_vf_prepare_finish()
1566 int cnt = 0; in qm_ping_single_vf()
1570 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); in qm_ping_single_vf()
1603 u64 val = 0; in qm_ping_all_vfs()
1604 int cnt = 0; in qm_ping_all_vfs()
1608 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); in qm_ping_all_vfs()
1625 return 0; in qm_ping_all_vfs()
1646 int cnt = 0; in qm_ping_pf()
1650 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); in qm_ping_pf()
1679 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); in qm_stop_qp()
1688 0); in qm_set_msi()
1694 return 0; in qm_set_msi()
1701 return 0; in qm_set_msi()
1707 u32 cmd = ~0; in qm_wait_msi_finish()
1708 int cnt = 0; in qm_wait_msi_finish()
1753 for (i = 0; i < MAX_WAIT_COUNTS; i++) { in qm_set_msi_v3()
1756 return 0; in qm_set_msi_v3()
1763 ret = 0; in qm_set_msi_v3()
1810 *addr = 0; in hisi_qm_unset_hw_reset()
1831 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); in qm_create_qp_nolock()
1832 if (qp_id < 0) { in qm_create_qp_nolock()
1841 memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); in qm_create_qp_nolock()
1903 struct qm_sqc sqc = {0}; in qm_sq_ctx_cfg()
1906 sqc.dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); in qm_sq_ctx_cfg()
1910 sqc.w8 = 0; /* rand_qc */ in qm_sq_ctx_cfg()
1912 sqc.w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); in qm_sq_ctx_cfg()
1922 return qm_set_and_get_xqc(qm, QM_MB_CMD_SQC, &sqc, qp_id, 0); in qm_sq_ctx_cfg()
1929 struct qm_cqc cqc = {0}; in qm_cq_ctx_cfg()
1932 cqc.dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); in qm_cq_ctx_cfg()
1936 cqc.w8 = 0; /* rand_qc */ in qm_cq_ctx_cfg()
1951 return qm_set_and_get_xqc(qm, QM_MB_CMD_CQC, &cqc, qp_id, 0); in qm_cq_ctx_cfg()
1987 return 0; in qm_start_qp_nolock()
1995 * After this function, qp can receive request from user. Return 0 if
2027 for (i = 0; i < qp_used; i++) { in qp_stop_fail_cb()
2047 int ret, i = 0; in qm_drain_qp()
2051 return 0; in qm_drain_qp()
2086 return 0; in qm_drain_qp()
2102 return 0; in qm_stop_qp_nolock()
2117 return 0; in qm_stop_qp_nolock()
2124 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
2151 * done function should clear used sqe to 0.
2172 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); in hisi_qp_send()
2176 return 0; in hisi_qp_send()
2187 writel(0x1, qm->io_base + QM_CACHE_WB_START); in hisi_qm_cache_wb()
2189 val, val & BIT(0), POLL_PERIOD, in hisi_qm_cache_wb()
2216 for (i = 0; i < qm->qp_num; i++) in hisi_qm_set_hw_reset()
2226 u8 alg_type = 0; in hisi_qm_uacce_get_queue()
2239 return 0; in hisi_qm_uacce_get_queue()
2288 * dma_mmap_coherent() requires vm_pgoff as 0 in hisi_qm_uacce_mmap()
2292 vma->vm_pgoff = 0; in hisi_qm_uacce_mmap()
2319 int updated = 0; in hisi_qm_is_q_updated()
2354 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) in hisi_qm_uacce_ioctl()
2364 return 0; in hisi_qm_uacce_ioctl()
2378 return 0; in hisi_qm_uacce_ioctl()
2393 u32 count = 0; in qm_hw_err_isolate()
2401 return 0; in qm_hw_err_isolate()
2430 return 0; in qm_hw_err_isolate()
2475 return 0; in hisi_qm_isolate_threshold_write()
2531 if (ret < 0) in qm_alloc_uacce()
2578 return 0; in qm_alloc_uacce()
2591 return 0; in qm_frozen()
2599 return 0; in qm_frozen()
2612 int ret = 0; in qm_try_frozen_vfs()
2668 for (i = num - 1; i >= 0; i--) { in hisi_qp_memory_uninit()
2707 return 0; in hisi_qp_memory_init()
2728 qm->qp_in_used = 0; in hisi_qm_pre_init()
2880 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2882 * (VF function number 0x2)
2900 status->eq_head = 0; in qm_init_eq_aeq_status()
2901 status->aeq_head = 0; in qm_init_eq_aeq_status()
2909 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); in qm_enable_eq_aeq_interrupts()
2910 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); in qm_enable_eq_aeq_interrupts()
2912 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
2913 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_enable_eq_aeq_interrupts()
2918 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
2919 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); in qm_disable_eq_aeq_interrupts()
2924 struct qm_eqc eqc = {0}; in qm_eq_ctx_cfg()
2932 return qm_set_and_get_xqc(qm, QM_MB_CMD_EQC, &eqc, 0, 0); in qm_eq_ctx_cfg()
2937 struct qm_aeqc aeqc = {0}; in qm_aeq_ctx_cfg()
2943 return qm_set_and_get_xqc(qm, QM_MB_CMD_AEQC, &aeqc, 0, 0); in qm_aeq_ctx_cfg()
2969 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); in __hisi_qm_start()
2978 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); in __hisi_qm_start()
2982 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); in __hisi_qm_start()
2989 return 0; in __hisi_qm_start()
3001 int ret = 0; in hisi_qm_start()
3008 dev_err(dev, "qp_num should not be 0\n"); in hisi_qm_start()
3033 if (ret < 0) in qm_restart()
3037 for (i = 0; i < qm->qp_num; i++) { in qm_restart()
3041 ret = qm_start_qp_nolock(qp, 0); in qm_restart()
3042 if (ret < 0) { in qm_restart()
3053 return 0; in qm_restart()
3063 for (i = 0; i < qm->qp_num; i++) { in qm_stop_started_qp()
3068 if (ret < 0) { in qm_stop_started_qp()
3075 return 0; in qm_stop_started_qp()
3090 for (i = 0; i < qm->qp_num; i++) { in qm_clear_queues()
3093 memset(qp->qdma.va, 0, qp->qdma.size); in qm_clear_queues()
3096 memset(qm->qdma.va, 0, qm->qdma.size); in qm_clear_queues()
3111 int ret = 0; in hisi_qm_stop()
3126 if (ret < 0) { in hisi_qm_stop()
3135 ret = hisi_qm_set_vft(qm, 0, 0, 0); in hisi_qm_stop()
3136 if (ret < 0) { in hisi_qm_stop()
3232 if (!qps || qp_num <= 0) in hisi_qm_free_qps()
3235 for (i = qp_num - 1; i >= 0; i--) in hisi_qm_free_qps()
3263 if (dev_node < 0) in hisi_qm_sort_devices()
3264 dev_node = 0; in hisi_qm_sort_devices()
3282 return 0; in hisi_qm_sort_devices()
3305 if (!qps || !qm_list || qp_num <= 0) in hisi_qm_alloc_qps_node()
3315 for (i = 0; i < qp_num; i++) { in hisi_qm_alloc_qps_node()
3324 ret = 0; in hisi_qm_alloc_qps_node()
3359 for (i = num_vfs; i > 0; i--) { in qm_vf_q_assign()
3366 remain_q_num = 0; in qm_vf_q_assign()
3367 } else if (remain_q_num > 0) { in qm_vf_q_assign()
3378 hisi_qm_set_vft(qm, j, 0, 0); in qm_vf_q_assign()
3384 return 0; in qm_vf_q_assign()
3393 ret = hisi_qm_set_vft(qm, i, 0, 0); in qm_clear_vft_config()
3397 qm->vfs_num = 0; in qm_clear_vft_config()
3399 return 0; in qm_clear_vft_config()
3429 return 0; in qm_func_shaper_enable()
3434 u64 cir_u = 0, cir_b = 0, cir_s = 0; in qm_get_shaper_vft_qos()
3441 val & BIT(0), POLL_PERIOD, in qm_get_shaper_vft_qos()
3444 return 0; in qm_get_shaper_vft_qos()
3446 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); in qm_get_shaper_vft_qos()
3450 writel(0x0, qm->io_base + QM_VFT_CFG_RDY); in qm_get_shaper_vft_qos()
3451 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); in qm_get_shaper_vft_qos()
3454 val & BIT(0), POLL_PERIOD, in qm_get_shaper_vft_qos()
3457 return 0; in qm_get_shaper_vft_qos()
3476 return 0; in qm_get_shaper_vft_qos()
3503 int cnt = 0; in qm_vf_read_qos()
3507 qm->mb_qos = 0; in qm_vf_read_qos()
3550 ir = qm_get_shaper_vft_qos(qm, 0); in qm_algqos_read()
3575 char tbuf_bdf[QM_DBG_READ_LEN] = {0}; in qm_get_qos_value()
3576 char val_buf[QM_DBG_READ_LEN] = {0}; in qm_get_qos_value()
3586 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { in qm_get_qos_value()
3601 return 0; in qm_get_qos_value()
3613 if (*pos != 0) in qm_algqos_write()
3614 return 0; in qm_algqos_write()
3620 if (len < 0) in qm_algqos_write()
3623 tbuf[len] = '\0'; in qm_algqos_write()
3782 return 0; in hisi_qm_sriov_disable()
3791 * Enable SR-IOV according to num_vfs, 0 means disable.
3795 if (num_vfs == 0) in hisi_qm_sriov_configure()
3881 return 0; in qm_check_req_recv()
3915 for (i = 0; i < MAX_WAIT_COUNTS; i++) { in qm_set_pf_mse()
3918 return 0; in qm_set_pf_mse()
3935 * pci_find_ext_capability cannot return 0, pos does not need to be in qm_set_vf_mse()
3946 for (i = 0; i < MAX_WAIT_COUNTS; i++) { in qm_set_vf_mse()
3950 return 0; in qm_set_vf_mse()
3965 int ret = 0; in qm_vf_reset_prepare()
3995 return 0; in qm_try_stop_vfs()
4049 return 0; in qm_controller_reset_prepare()
4054 u32 nfe_enb = 0; in qm_dev_ecc_mbit_handle()
4126 unsigned long long value = 0; in qm_soft_reset()
4146 return 0; in qm_soft_reset()
4155 int ret = 0; in qm_vf_reset_done()
4184 return 0; in qm_try_start_vfs()
4324 return 0; in qm_controller_reset_done()
4356 return 0; in qm_controller_reset()
4398 u32 delay = 0; in hisi_qm_reset_prepare()
4617 val == BIT(0), QM_VF_RESET_WAIT_US, in qm_wait_pf_reset_finish()
4629 ret = qm_get_mb_cmd(qm, &msg, 0); in qm_wait_pf_reset_finish()
4630 qm_clear_cmd_interrupt(qm, 0); in qm_wait_pf_reset_finish()
4731 qm_handle_cmd_msg(qm, 0); in qm_cmd_process()
4748 return 0; in hisi_qm_alg_register()
4753 return 0; in hisi_qm_alg_register()
4803 return 0; in qm_register_abnormal_irq()
4807 return 0; in qm_register_abnormal_irq()
4810 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); in qm_register_abnormal_irq()
4838 return 0; in qm_register_mb_cmd_irq()
4841 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); in qm_register_mb_cmd_irq()
4869 return 0; in qm_register_aeq_irq()
4901 return 0; in qm_register_eq_irq()
4904 ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); in qm_register_eq_irq()
4939 return 0; in qm_irqs_register()
4961 return 0; in qm_get_qp_num()
4970 return 0; in qm_get_qp_num()
4984 return 0; in qm_get_qp_num()
4998 for (i = 0; i < size; i++) { in qm_pre_store_irq_type_caps()
5006 return 0; in qm_pre_store_irq_type_caps()
5035 for (i = 0; i < size; i++) { in qm_get_hw_caps()
5052 if (ret < 0) { in qm_get_pci_res()
5080 qm->db_interval = 0; in qm_get_pci_res()
5087 return 0; in qm_get_pci_res()
5107 if (ret < 0) { in hisi_qm_pci_init()
5117 if (ret < 0) in hisi_qm_pci_init()
5123 if (ret < 0) { in hisi_qm_pci_init()
5128 return 0; in hisi_qm_pci_init()
5141 for (i = 0; i < qm->qp_num; i++) in hisi_qm_init_work()
5158 return 0; in hisi_qm_init_work()
5183 for (i = 0; i < qm->qp_num; i++) { in hisi_qp_alloc_memory()
5192 return 0; in hisi_qp_alloc_memory()
5204 size_t off = 0; in hisi_qm_alloc_rsv_buf()
5210 } while (0) in hisi_qm_alloc_rsv_buf()
5226 return 0; in hisi_qm_alloc_rsv_buf()
5233 size_t off = 0; in hisi_qm_memory_init()
5242 qm->factor[0].func_qos = QM_QOS_MAX_VAL; in hisi_qm_memory_init()
5249 } while (0) in hisi_qm_memory_init()
5278 return 0; in hisi_qm_memory_init()
5327 if (ret < 0) in hisi_qm_init()
5341 return 0; in hisi_qm_init()
5545 return 0; in hisi_qm_resume()