Lines Matching +full:pd +full:- +full:revision

2  * Copyright (c) 2016-2017 Hisilicon Limited.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
98 dseg->lkey = cpu_to_le32(sg->lkey); in set_data_seg_v2()
99 dseg->addr = cpu_to_le64(sg->addr); in set_data_seg_v2()
100 dseg->len = cpu_to_le32(sg->length); in set_data_seg_v2()
104 * mapped-value = 1 + real-value
132 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 : in to_hr_opcode()
141 struct hns_roce_mr *mr = to_hr_mr(wr->mr); in set_frmr_seg()
145 hr_reg_write_bool(fseg, FRMR_BIND_EN, wr->access & IB_ACCESS_MW_BIND); in set_frmr_seg()
147 wr->access & IB_ACCESS_REMOTE_ATOMIC); in set_frmr_seg()
148 hr_reg_write_bool(fseg, FRMR_RR, wr->access & IB_ACCESS_REMOTE_READ); in set_frmr_seg()
149 hr_reg_write_bool(fseg, FRMR_RW, wr->access & IB_ACCESS_REMOTE_WRITE); in set_frmr_seg()
150 hr_reg_write_bool(fseg, FRMR_LW, wr->access & IB_ACCESS_LOCAL_WRITE); in set_frmr_seg()
153 pbl_ba = mr->pbl_mtr.hem_cfg.root_ba; in set_frmr_seg()
154 rc_sq_wqe->msg_len = cpu_to_le32(lower_32_bits(pbl_ba)); in set_frmr_seg()
155 rc_sq_wqe->inv_key = cpu_to_le32(upper_32_bits(pbl_ba)); in set_frmr_seg()
157 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff); in set_frmr_seg()
158 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32); in set_frmr_seg()
159 rc_sq_wqe->rkey = cpu_to_le32(wr->key); in set_frmr_seg()
160 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova); in set_frmr_seg()
162 hr_reg_write(fseg, FRMR_PBL_SIZE, mr->npages); in set_frmr_seg()
164 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); in set_frmr_seg()
177 set_data_seg_v2(dseg, wr->sg_list); in set_atomic_seg()
179 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) { in set_atomic_seg()
180 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap); in set_atomic_seg()
181 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add); in set_atomic_seg()
183 aseg->fetchadd_swap_data = in set_atomic_seg()
184 cpu_to_le64(atomic_wr(wr)->compare_add); in set_atomic_seg()
185 aseg->cmp_data = 0; in set_atomic_seg()
195 struct ib_device *ibdev = &(to_hr_dev(qp->ibqp.device))->ib_dev; in fill_ext_sge_inl_data()
203 if (msg_len > qp->sq.ext_sge_cnt * HNS_ROCE_SGE_SIZE) { in fill_ext_sge_inl_data()
206 return -EINVAL; in fill_ext_sge_inl_data()
209 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); in fill_ext_sge_inl_data()
210 left_len_in_pg = hr_hw_page_align((uintptr_t)dseg) - (uintptr_t)dseg; in fill_ext_sge_inl_data()
211 len = wr->sg_list[0].length; in fill_ext_sge_inl_data()
212 addr = (void *)(unsigned long)(wr->sg_list[0].addr); in fill_ext_sge_inl_data()
226 if (i >= wr->num_sge) in fill_ext_sge_inl_data()
229 left_len_in_pg -= len; in fill_ext_sge_inl_data()
230 len = wr->sg_list[i].length; in fill_ext_sge_inl_data()
231 addr = (void *)(unsigned long)(wr->sg_list[i].addr); in fill_ext_sge_inl_data()
236 len -= left_len_in_pg; in fill_ext_sge_inl_data()
240 idx & (qp->sge.sge_cnt - 1)); in fill_ext_sge_inl_data()
257 dseg = hns_roce_get_extend_sge(qp, idx & (qp->sge.sge_cnt - 1)); in set_extend_sge()
258 if (likely(sge->length)) { in set_extend_sge()
261 cnt--; in set_extend_sge()
271 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in check_inl_data_len()
272 int mtu = ib_mtu_enum_to_int(qp->path_mtu); in check_inl_data_len()
274 if (mtu < 0 || len > qp->max_inline_data || len > mtu) { in check_inl_data_len()
275 ibdev_err(&hr_dev->ib_dev, in check_inl_data_len()
277 len, qp->max_inline_data, mtu); in check_inl_data_len()
288 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_inl()
289 u32 msg_len = le32_to_cpu(rc_sq_wqe->msg_len); in set_rc_inl()
290 struct ib_device *ibdev = &hr_dev->ib_dev; in set_rc_inl()
296 if (unlikely(wr->opcode == IB_WR_RDMA_READ)) { in set_rc_inl()
298 return -EINVAL; in set_rc_inl()
302 return -EINVAL; in set_rc_inl()
309 for (i = 0; i < wr->num_sge; i++) { in set_rc_inl()
310 memcpy(dseg, ((void *)wr->sg_list[i].addr), in set_rc_inl()
311 wr->sg_list[i].length); in set_rc_inl()
312 dseg += wr->sg_list[i].length; in set_rc_inl()
321 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SGE_NUM, curr_idx - *sge_idx); in set_rc_inl()
341 (*sge_ind) & (qp->sge.sge_cnt - 1)); in set_rwqe_data_seg()
344 !!(wr->send_flags & IB_SEND_INLINE)); in set_rwqe_data_seg()
345 if (wr->send_flags & IB_SEND_INLINE) in set_rwqe_data_seg()
349 for (i = 0; i < wr->num_sge; i++) { in set_rwqe_data_seg()
350 if (likely(wr->sg_list[i].length)) { in set_rwqe_data_seg()
351 set_data_seg_v2(dseg, wr->sg_list + i); in set_rwqe_data_seg()
356 for (i = 0; i < wr->num_sge && j < HNS_ROCE_SGE_IN_WQE; i++) { in set_rwqe_data_seg()
357 if (likely(wr->sg_list[i].length)) { in set_rwqe_data_seg()
358 set_data_seg_v2(dseg, wr->sg_list + i); in set_rwqe_data_seg()
364 set_extend_sge(qp, wr->sg_list + i, sge_ind, in set_rwqe_data_seg()
365 valid_num_sge - HNS_ROCE_SGE_IN_WQE); in set_rwqe_data_seg()
376 if (unlikely(hr_qp->state == IB_QPS_RESET || in check_send_valid()
377 hr_qp->state == IB_QPS_INIT || in check_send_valid()
378 hr_qp->state == IB_QPS_RTR)) in check_send_valid()
379 return -EINVAL; in check_send_valid()
380 else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) in check_send_valid()
381 return -EIO; in check_send_valid()
393 for (i = 0; i < wr->num_sge; i++) { in calc_wr_sge_num()
394 if (likely(wr->sg_list[i].length)) { in calc_wr_sge_num()
395 len += wr->sg_list[i].length; in calc_wr_sge_num()
406 switch (wr->opcode) { in get_immtdata()
409 return cpu_to_le32(be32_to_cpu(wr->ex.imm_data)); in get_immtdata()
418 u32 ib_op = wr->opcode; in set_ud_opcode()
421 return -EINVAL; in set_ud_opcode()
423 ud_sq_wqe->immtdata = get_immtdata(wr); in set_ud_opcode()
433 struct ib_device *ib_dev = ah->ibah.device; in fill_ud_av()
436 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_UDPSPN, ah->av.udp_sport); in fill_ud_av()
437 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_HOPLIMIT, ah->av.hop_limit); in fill_ud_av()
438 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_TCLASS, ah->av.tclass); in fill_ud_av()
439 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_FLOW_LABEL, ah->av.flowlabel); in fill_ud_av()
440 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_SL, ah->av.sl); in fill_ud_av()
442 ud_sq_wqe->sgid_index = ah->av.gid_index; in fill_ud_av()
444 memcpy(ud_sq_wqe->dmac, ah->av.mac, ETH_ALEN); in fill_ud_av()
445 memcpy(ud_sq_wqe->dgid, ah->av.dgid, GID_LEN_V2); in fill_ud_av()
447 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in fill_ud_av()
450 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN_EN, ah->av.vlan_en); in fill_ud_av()
451 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_VLAN, ah->av.vlan_id); in fill_ud_av()
461 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); in set_ud_wqe()
474 ud_sq_wqe->msg_len = cpu_to_le32(msg_len); in set_ud_wqe()
477 !!(wr->send_flags & IB_SEND_SIGNALED)); in set_ud_wqe()
479 !!(wr->send_flags & IB_SEND_SOLICITED)); in set_ud_wqe()
481 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_PD, to_hr_pd(qp->ibqp.pd)->pdn); in set_ud_wqe()
484 curr_idx & (qp->sge.sge_cnt - 1)); in set_ud_wqe()
486 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ? in set_ud_wqe()
487 qp->qkey : ud_wr(wr)->remote_qkey); in set_ud_wqe()
488 hr_reg_write(ud_sq_wqe, UD_SEND_WQE_DQPN, ud_wr(wr)->remote_qpn); in set_ud_wqe()
494 qp->sl = to_hr_ah(ud_wr(wr)->ah)->av.sl; in set_ud_wqe()
496 set_extend_sge(qp, wr->sg_list, &curr_idx, valid_num_sge); in set_ud_wqe()
504 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) in set_ud_wqe()
517 u32 ib_op = wr->opcode; in set_rc_opcode()
520 rc_sq_wqe->immtdata = get_immtdata(wr); in set_rc_opcode()
526 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey); in set_rc_opcode()
527 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr); in set_rc_opcode()
534 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey); in set_rc_opcode()
535 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr); in set_rc_opcode()
538 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in set_rc_opcode()
541 ret = -EOPNOTSUPP; in set_rc_opcode()
544 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); in set_rc_opcode()
547 ret = -EINVAL; in set_rc_opcode()
563 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device); in set_rc_wqe()
572 rc_sq_wqe->msg_len = cpu_to_le32(msg_len); in set_rc_wqe()
579 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0); in set_rc_wqe()
582 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0); in set_rc_wqe()
585 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0); in set_rc_wqe()
587 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP || in set_rc_wqe()
588 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { in set_rc_wqe()
590 return -EINVAL; in set_rc_wqe()
592 } else if (wr->opcode != IB_WR_REG_MR) { in set_rc_wqe()
593 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe, in set_rc_wqe()
605 if (qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) in set_rc_wqe()
617 if (unlikely(qp->state == IB_QPS_ERR)) { in update_sq_db()
622 hr_reg_write(&sq_db, DB_TAG, qp->qpn); in update_sq_db()
624 hr_reg_write(&sq_db, DB_PI, qp->sq.head); in update_sq_db()
625 hr_reg_write(&sq_db, DB_SL, qp->sl); in update_sq_db()
627 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg); in update_sq_db()
634 if (unlikely(qp->state == IB_QPS_ERR)) { in update_rq_db()
637 if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) { in update_rq_db()
638 *qp->rdb.db_record = in update_rq_db()
639 qp->rq.head & V2_DB_PRODUCER_IDX_M; in update_rq_db()
643 hr_reg_write(&rq_db, DB_TAG, qp->qpn); in update_rq_db()
645 hr_reg_write(&rq_db, DB_PI, qp->rq.head); in update_rq_db()
648 qp->rq.db_reg); in update_rq_db()
657 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv; in hns_roce_write512()
658 struct hnae3_handle *handle = priv->handle; in hns_roce_write512()
659 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns_roce_write512()
662 if (!hr_dev->dis_db && !ops->get_hw_reset_stat(handle)) in hns_roce_write512()
673 if (unlikely(qp->state == IB_QPS_ERR)) { in write_dwqe()
679 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl); in write_dwqe()
681 qp->sl >> HNS_ROCE_SL_SHIFT); in write_dwqe()
682 hr_reg_write(rc_sq_wqe, RC_SEND_WQE_WQE_INDEX, qp->sq.head); in write_dwqe()
684 hns_roce_write512(hr_dev, wqe, qp->sq.db_reg); in write_dwqe()
691 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_post_send()
692 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_post_send()
702 spin_lock_irqsave(&qp->sq.lock, flags); in hns_roce_v2_post_send()
711 sge_idx = qp->next_sge; in hns_roce_v2_post_send()
713 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_send()
714 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in hns_roce_v2_post_send()
715 ret = -ENOMEM; in hns_roce_v2_post_send()
720 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); in hns_roce_v2_post_send()
722 if (unlikely(wr->num_sge > qp->sq.max_gs)) { in hns_roce_v2_post_send()
723 ibdev_err(ibdev, "num_sge = %d > qp->sq.max_gs = %u.\n", in hns_roce_v2_post_send()
724 wr->num_sge, qp->sq.max_gs); in hns_roce_v2_post_send()
725 ret = -EINVAL; in hns_roce_v2_post_send()
731 qp->sq.wrid[wqe_idx] = wr->wr_id; in hns_roce_v2_post_send()
733 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); in hns_roce_v2_post_send()
736 if (ibqp->qp_type == IB_QPT_RC) in hns_roce_v2_post_send()
749 qp->sq.head += nreq; in hns_roce_v2_post_send()
750 qp->next_sge = sge_idx; in hns_roce_v2_post_send()
753 (qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)) in hns_roce_v2_post_send()
759 spin_unlock_irqrestore(&qp->sq.lock, flags); in hns_roce_v2_post_send()
767 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) in check_recv_valid()
768 return -EIO; in check_recv_valid()
770 if (hr_qp->state == IB_QPS_RESET) in check_recv_valid()
771 return -EINVAL; in check_recv_valid()
782 for (i = 0, cnt = 0; i < wr->num_sge; i++) { in fill_recv_sge_to_wqe()
783 /* Skip zero-length sge */ in fill_recv_sge_to_wqe()
784 if (!wr->sg_list[i].length) in fill_recv_sge_to_wqe()
786 set_data_seg_v2(dseg + cnt, wr->sg_list + i); in fill_recv_sge_to_wqe()
799 (max_sge - cnt) * HNS_ROCE_SGE_SIZE); in fill_recv_sge_to_wqe()
809 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge); in fill_rq_wqe()
816 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_post_recv()
818 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_post_recv()
823 spin_lock_irqsave(&hr_qp->rq.lock, flags); in hns_roce_v2_post_recv()
832 max_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; in hns_roce_v2_post_recv()
833 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_recv()
834 if (unlikely(hns_roce_wq_overflow(&hr_qp->rq, nreq, in hns_roce_v2_post_recv()
835 hr_qp->ibqp.recv_cq))) { in hns_roce_v2_post_recv()
836 ret = -ENOMEM; in hns_roce_v2_post_recv()
841 if (unlikely(wr->num_sge > max_sge)) { in hns_roce_v2_post_recv()
843 wr->num_sge, max_sge); in hns_roce_v2_post_recv()
844 ret = -EINVAL; in hns_roce_v2_post_recv()
849 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); in hns_roce_v2_post_recv()
851 hr_qp->rq.wrid[wqe_idx] = wr->wr_id; in hns_roce_v2_post_recv()
856 hr_qp->rq.head += nreq; in hns_roce_v2_post_recv()
860 spin_unlock_irqrestore(&hr_qp->rq.lock, flags); in hns_roce_v2_post_recv()
867 return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift); in get_srq_wqe_buf()
872 return hns_roce_buf_offset(idx_que->mtr.kmem, in get_idx_buf()
873 n << idx_que->entry_shift); in get_idx_buf()
879 spin_lock(&srq->lock); in hns_roce_free_srq_wqe()
881 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1); in hns_roce_free_srq_wqe()
882 srq->idx_que.tail++; in hns_roce_free_srq_wqe()
884 spin_unlock(&srq->lock); in hns_roce_free_srq_wqe()
889 struct hns_roce_idx_que *idx_que = &srq->idx_que; in hns_roce_srqwq_overflow()
891 return idx_que->head - idx_que->tail >= srq->wqe_cnt; in hns_roce_srqwq_overflow()
897 struct ib_device *ib_dev = srq->ibsrq.device; in check_post_srq_valid()
899 if (unlikely(wr->num_sge > max_sge)) { in check_post_srq_valid()
901 "failed to check sge, wr->num_sge = %d, max_sge = %u.\n", in check_post_srq_valid()
902 wr->num_sge, max_sge); in check_post_srq_valid()
903 return -EINVAL; in check_post_srq_valid()
909 return -ENOMEM; in check_post_srq_valid()
917 struct hns_roce_idx_que *idx_que = &srq->idx_que; in get_srq_wqe_idx()
920 pos = find_first_zero_bit(idx_que->bitmap, srq->wqe_cnt); in get_srq_wqe_idx()
921 if (unlikely(pos == srq->wqe_cnt)) in get_srq_wqe_idx()
922 return -ENOSPC; in get_srq_wqe_idx()
924 bitmap_set(idx_que->bitmap, pos, 1); in get_srq_wqe_idx()
931 struct hns_roce_idx_que *idx_que = &srq->idx_que; in fill_wqe_idx()
935 head = idx_que->head & (srq->wqe_cnt - 1); in fill_wqe_idx()
940 idx_que->head++; in fill_wqe_idx()
945 struct hns_roce_dev *hr_dev = to_hr_dev(srq->ibsrq.device); in update_srq_db()
948 hr_reg_write(&db, DB_TAG, srq->srqn); in update_srq_db()
950 hr_reg_write(&db, DB_PI, srq->idx_que.head); in update_srq_db()
952 hns_roce_write64(hr_dev, (__le32 *)&db, srq->db_reg); in update_srq_db()
967 spin_lock_irqsave(&srq->lock, flags); in hns_roce_v2_post_srq_recv()
969 max_sge = srq->max_gs - srq->rsv_sge; in hns_roce_v2_post_srq_recv()
970 for (nreq = 0; wr; ++nreq, wr = wr->next) { in hns_roce_v2_post_srq_recv()
984 fill_recv_sge_to_wqe(wr, wqe, max_sge, srq->rsv_sge); in hns_roce_v2_post_srq_recv()
986 srq->wrid[wqe_idx] = wr->wr_id; in hns_roce_v2_post_srq_recv()
990 if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) in hns_roce_v2_post_srq_recv()
991 *srq->rdb.db_record = srq->idx_que.head & in hns_roce_v2_post_srq_recv()
997 spin_unlock_irqrestore(&srq->lock, flags); in hns_roce_v2_post_srq_recv()
1015 hr_dev->is_reset = true; in hns_roce_v2_cmd_hw_reseted()
1016 hr_dev->dis_db = true; in hns_roce_v2_cmd_hw_reseted()
1032 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_v2_cmd_hw_resetting()
1033 struct hnae3_handle *handle = priv->handle; in hns_roce_v2_cmd_hw_resetting()
1034 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns_roce_v2_cmd_hw_resetting()
1047 hr_dev->dis_db = true; in hns_roce_v2_cmd_hw_resetting()
1049 ret = read_poll_timeout(ops->ae_dev_reset_cnt, val, in hns_roce_v2_cmd_hw_resetting()
1050 val > hr_dev->reset_cnt, HW_RESET_SLEEP_US, in hns_roce_v2_cmd_hw_resetting()
1053 hr_dev->is_reset = true; in hns_roce_v2_cmd_hw_resetting()
1055 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || in hns_roce_v2_cmd_hw_resetting()
1064 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_v2_cmd_sw_resetting()
1065 struct hnae3_handle *handle = priv->handle; in hns_roce_v2_cmd_sw_resetting()
1066 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns_roce_v2_cmd_sw_resetting()
1072 hr_dev->dis_db = true; in hns_roce_v2_cmd_sw_resetting()
1073 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) in hns_roce_v2_cmd_sw_resetting()
1074 hr_dev->is_reset = true; in hns_roce_v2_cmd_sw_resetting()
1082 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in check_aedev_reset_status()
1092 * reset_cnt -- The count value of completed hardware reset. in check_aedev_reset_status()
1093 * hw_resetting -- Whether hardware device is resetting now. in check_aedev_reset_status()
1094 * sw_resetting -- Whether NIC's software reset process is running now. in check_aedev_reset_status()
1096 instance_stage = handle->rinfo.instance_state; in check_aedev_reset_status()
1097 reset_stage = handle->rinfo.reset_state; in check_aedev_reset_status()
1098 reset_cnt = ops->ae_dev_reset_cnt(handle); in check_aedev_reset_status()
1099 if (reset_cnt != hr_dev->reset_cnt) in check_aedev_reset_status()
1103 hw_resetting = ops->get_cmdq_stat(handle); in check_aedev_reset_status()
1108 sw_resetting = ops->ae_dev_resetting(handle); in check_aedev_reset_status()
1117 struct hns_roce_v2_priv *priv = hr_dev->priv; in check_device_is_in_reset()
1118 struct hnae3_handle *handle = priv->handle; in check_device_is_in_reset()
1119 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in check_device_is_in_reset()
1121 if (hr_dev->reset_cnt != ops->ae_dev_reset_cnt(handle)) in check_device_is_in_reset()
1124 if (ops->get_hw_reset_stat(handle)) in check_device_is_in_reset()
1127 if (ops->ae_dev_resetting(handle)) in check_device_is_in_reset()
1135 struct hns_roce_v2_priv *priv = hr_dev->priv; in v2_chk_mbox_is_avail()
1138 if (hr_dev->is_reset) in v2_chk_mbox_is_avail()
1141 status = check_aedev_reset_status(hr_dev, priv->handle); in v2_chk_mbox_is_avail()
1151 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc); in hns_roce_alloc_cmq_desc()
1153 ring->desc = dma_alloc_coherent(hr_dev->dev, size, in hns_roce_alloc_cmq_desc()
1154 &ring->desc_dma_addr, GFP_KERNEL); in hns_roce_alloc_cmq_desc()
1155 if (!ring->desc) in hns_roce_alloc_cmq_desc()
1156 return -ENOMEM; in hns_roce_alloc_cmq_desc()
1164 dma_free_coherent(hr_dev->dev, in hns_roce_free_cmq_desc()
1165 ring->desc_num * sizeof(struct hns_roce_cmq_desc), in hns_roce_free_cmq_desc()
1166 ring->desc, ring->desc_dma_addr); in hns_roce_free_cmq_desc()
1168 ring->desc_dma_addr = 0; in hns_roce_free_cmq_desc()
1177 csq->desc_num = CMD_CSQ_DESC_NUM; in init_csq()
1178 spin_lock_init(&csq->lock); in init_csq()
1179 csq->flag = TYPE_CSQ; in init_csq()
1180 csq->head = 0; in init_csq()
1186 dma = csq->desc_dma_addr; in init_csq()
1190 (u32)csq->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S); in init_csq()
1201 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_v2_cmq_init()
1204 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT; in hns_roce_v2_cmq_init()
1206 ret = init_csq(hr_dev, &priv->cmq.csq); in hns_roce_v2_cmq_init()
1208 dev_err(hr_dev->dev, "failed to init CSQ, ret = %d.\n", ret); in hns_roce_v2_cmq_init()
1215 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_v2_cmq_exit()
1217 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq); in hns_roce_v2_cmq_exit()
1225 desc->opcode = cpu_to_le16(opcode); in hns_roce_cmq_setup_basic_desc()
1226 desc->flag = cpu_to_le16(HNS_ROCE_CMD_FLAG_IN); in hns_roce_cmq_setup_basic_desc()
1228 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR); in hns_roce_cmq_setup_basic_desc()
1230 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR); in hns_roce_cmq_setup_basic_desc()
1236 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_cmq_csq_done()
1238 return tail == priv->cmq.csq.head; in hns_roce_cmq_csq_done()
1243 struct hns_roce_v2_priv *priv = hr_dev->priv; in update_cmdq_status()
1244 struct hnae3_handle *handle = priv->handle; in update_cmdq_status()
1246 if (handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || in update_cmdq_status()
1247 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) in update_cmdq_status()
1248 hr_dev->cmd.state = HNS_ROCE_CMDQ_STATE_FATAL_ERR; in update_cmdq_status()
1255 {CMD_NO_AUTH, -EPERM}, in hns_roce_cmd_err_convert_errno()
1256 {CMD_NOT_EXIST, -EOPNOTSUPP}, in hns_roce_cmd_err_convert_errno()
1257 {CMD_CRQ_FULL, -EXFULL}, in hns_roce_cmd_err_convert_errno()
1258 {CMD_NEXT_ERR, -ENOSR}, in hns_roce_cmd_err_convert_errno()
1259 {CMD_NOT_EXEC, -ENOTBLK}, in hns_roce_cmd_err_convert_errno()
1260 {CMD_PARA_ERR, -EINVAL}, in hns_roce_cmd_err_convert_errno()
1261 {CMD_RESULT_ERR, -ERANGE}, in hns_roce_cmd_err_convert_errno()
1262 {CMD_TIMEOUT, -ETIME}, in hns_roce_cmd_err_convert_errno()
1263 {CMD_HILINK_ERR, -ENOLINK}, in hns_roce_cmd_err_convert_errno()
1264 {CMD_INFO_ILLEGAL, -ENXIO}, in hns_roce_cmd_err_convert_errno()
1265 {CMD_INVALID, -EBADR}, in hns_roce_cmd_err_convert_errno()
1272 return -EIO; in hns_roce_cmd_err_convert_errno()
1304 struct hns_roce_v2_priv *priv = hr_dev->priv; in __hns_roce_cmq_send_one()
1305 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; in __hns_roce_cmq_send_one()
1311 tail = csq->head; in __hns_roce_cmq_send_one()
1314 csq->desc[csq->head++] = desc[i]; in __hns_roce_cmq_send_one()
1315 if (csq->head == csq->desc_num) in __hns_roce_cmq_send_one()
1316 csq->head = 0; in __hns_roce_cmq_send_one()
1320 roce_write(hr_dev, ROCEE_TX_CMQ_PI_REG, csq->head); in __hns_roce_cmq_send_one()
1322 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]); in __hns_roce_cmq_send_one()
1329 desc_ret = le16_to_cpu(csq->desc[tail++].retval); in __hns_roce_cmq_send_one()
1330 if (tail == csq->desc_num) in __hns_roce_cmq_send_one()
1340 dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", in __hns_roce_cmq_send_one()
1341 csq->head, tail); in __hns_roce_cmq_send_one()
1342 csq->head = tail; in __hns_roce_cmq_send_one()
1346 ret = -EAGAIN; in __hns_roce_cmq_send_one()
1350 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]); in __hns_roce_cmq_send_one()
1358 struct hns_roce_v2_priv *priv = hr_dev->priv; in __hns_roce_cmq_send()
1359 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq; in __hns_roce_cmq_send()
1360 u16 opcode = le16_to_cpu(desc->opcode); in __hns_roce_cmq_send()
1361 u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout); in __hns_roce_cmq_send()
1368 try_cnt--; in __hns_roce_cmq_send()
1370 spin_lock_bh(&csq->lock); in __hns_roce_cmq_send()
1371 rsv_tail = csq->head; in __hns_roce_cmq_send()
1373 if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME && in __hns_roce_cmq_send()
1375 spin_unlock_bh(&csq->lock); in __hns_roce_cmq_send()
1381 desc[i] = csq->desc[rsv_tail++]; in __hns_roce_cmq_send()
1382 if (rsv_tail == csq->desc_num) in __hns_roce_cmq_send()
1385 spin_unlock_bh(&csq->lock); in __hns_roce_cmq_send()
1390 dev_err_ratelimited(hr_dev->dev, in __hns_roce_cmq_send()
1403 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) in hns_roce_cmq_send()
1404 return -EIO; in hns_roce_cmq_send()
1407 return busy ? -EBUSY : 0; in hns_roce_cmq_send()
1412 return busy ? -EBUSY : 0; in hns_roce_cmq_send()
1428 ret = hns_roce_cmd_mbox(hr_dev, base_addr, mbox->dma, cmd, tag); in config_hem_ba_to_hw()
1445 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version); in hns_roce_cmq_query_hw_info()
1446 hr_dev->vendor_id = hr_dev->pci_dev->vendor; in hns_roce_cmq_query_hw_info()
1454 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in func_clr_hw_resetting_state()
1457 hr_dev->dis_db = true; in func_clr_hw_resetting_state()
1459 dev_warn(hr_dev->dev, in func_clr_hw_resetting_state()
1463 if (!ops->get_hw_reset_stat(handle)) { in func_clr_hw_resetting_state()
1464 hr_dev->is_reset = true; in func_clr_hw_resetting_state()
1465 dev_info(hr_dev->dev, in func_clr_hw_resetting_state()
1470 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; in func_clr_hw_resetting_state()
1473 dev_warn(hr_dev->dev, "func clear failed.\n"); in func_clr_hw_resetting_state()
1479 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in func_clr_sw_resetting_state()
1482 hr_dev->dis_db = true; in func_clr_sw_resetting_state()
1484 dev_warn(hr_dev->dev, in func_clr_sw_resetting_state()
1488 if (ops->ae_dev_reset_cnt(handle) != in func_clr_sw_resetting_state()
1489 hr_dev->reset_cnt) { in func_clr_sw_resetting_state()
1490 hr_dev->is_reset = true; in func_clr_sw_resetting_state()
1491 dev_info(hr_dev->dev, in func_clr_sw_resetting_state()
1496 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT; in func_clr_sw_resetting_state()
1499 dev_warn(hr_dev->dev, "func clear failed because of unfinished sw reset\n"); in func_clr_sw_resetting_state()
1505 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_func_clr_rst_proc()
1506 struct hnae3_handle *handle = priv->handle; in hns_roce_func_clr_rst_proc()
1507 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns_roce_func_clr_rst_proc()
1509 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt) { in hns_roce_func_clr_rst_proc()
1510 hr_dev->dis_db = true; in hns_roce_func_clr_rst_proc()
1511 hr_dev->is_reset = true; in hns_roce_func_clr_rst_proc()
1512 dev_info(hr_dev->dev, "func clear success after reset.\n"); in hns_roce_func_clr_rst_proc()
1516 if (ops->get_hw_reset_stat(handle)) { in hns_roce_func_clr_rst_proc()
1521 if (ops->ae_dev_resetting(handle) && in hns_roce_func_clr_rst_proc()
1522 handle->rinfo.instance_state == HNS_ROCE_STATE_INIT) { in hns_roce_func_clr_rst_proc()
1528 dev_warn(hr_dev->dev, in hns_roce_func_clr_rst_proc()
1531 dev_warn(hr_dev->dev, "func clear failed.\n"); in hns_roce_func_clr_rst_proc()
1547 resp->rst_funcid_en = cpu_to_le32(vf_id); in __hns_roce_function_clear()
1552 dev_err(hr_dev->dev, "func clear write failed, ret = %d.\n", in __hns_roce_function_clear()
1563 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT; in __hns_roce_function_clear()
1568 resp->rst_funcid_en = cpu_to_le32(vf_id); in __hns_roce_function_clear()
1575 hr_dev->is_reset = true; in __hns_roce_function_clear()
1604 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) in hns_roce_function_clear()
1607 for (i = hr_dev->func_num - 1; i >= 0; i--) { in hns_roce_function_clear()
1615 ibdev_err(&hr_dev->ib_dev, in hns_roce_function_clear()
1630 ibdev_err(&hr_dev->ib_dev, in hns_roce_clear_extdb_list_info()
1649 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver)); in hns_roce_query_fw_ver()
1659 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in hns_roce_query_func_info()
1660 hr_dev->func_num = 1; in hns_roce_query_func_info()
1668 hr_dev->func_num = 1; in hns_roce_query_func_info()
1672 hr_dev->func_num = le32_to_cpu(desc.func_info.own_func_num); in hns_roce_query_func_info()
1673 hr_dev->cong_algo_tmpl_id = le32_to_cpu(desc.func_info.own_mac_id); in hns_roce_query_func_info()
1689 if (port > hr_dev->caps.num_ports) in hns_roce_hw_v2_query_counter()
1690 return -EINVAL; in hns_roce_hw_v2_query_counter()
1695 return -ENOMEM; in hns_roce_hw_v2_query_counter()
1700 if (i != desc_num - 1) in hns_roce_hw_v2_query_counter()
1706 ibdev_err(&hr_dev->ib_dev, in hns_roce_hw_v2_query_counter()
1737 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) in hns_roce_config_global_param()
1753 struct hns_roce_caps *caps = &hr_dev->caps; in load_func_res_caps()
1763 func_num = hr_dev->func_num; in load_func_res_caps()
1774 caps->qpc_bt_num = hr_reg_read(r_a, FUNC_RES_A_QPC_BT_NUM) / func_num; in load_func_res_caps()
1775 caps->srqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_SRQC_BT_NUM) / func_num; in load_func_res_caps()
1776 caps->cqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_CQC_BT_NUM) / func_num; in load_func_res_caps()
1777 caps->mpt_bt_num = hr_reg_read(r_a, FUNC_RES_A_MPT_BT_NUM) / func_num; in load_func_res_caps()
1778 caps->eqc_bt_num = hr_reg_read(r_a, FUNC_RES_A_EQC_BT_NUM) / func_num; in load_func_res_caps()
1779 caps->smac_bt_num = hr_reg_read(r_b, FUNC_RES_B_SMAC_NUM) / func_num; in load_func_res_caps()
1780 caps->sgid_bt_num = hr_reg_read(r_b, FUNC_RES_B_SGID_NUM) / func_num; in load_func_res_caps()
1781 caps->sccc_bt_num = hr_reg_read(r_b, FUNC_RES_B_SCCC_BT_NUM) / func_num; in load_func_res_caps()
1784 caps->sl_num = hr_reg_read(r_b, FUNC_RES_V_QID_NUM) / func_num; in load_func_res_caps()
1785 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_V_GMV_BT_NUM) / in load_func_res_caps()
1788 caps->sl_num = hr_reg_read(r_b, FUNC_RES_B_QID_NUM) / func_num; in load_func_res_caps()
1789 caps->gmv_bt_num = hr_reg_read(r_b, FUNC_RES_B_GMV_BT_NUM) / in load_func_res_caps()
1800 struct hns_roce_caps *caps = &hr_dev->caps; in load_pf_timer_res_caps()
1810 caps->qpc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_QPC_ITEM_NUM); in load_pf_timer_res_caps()
1811 caps->cqc_timer_bt_num = hr_reg_read(req, PF_TIMER_RES_CQC_ITEM_NUM); in load_pf_timer_res_caps()
1818 struct device *dev = hr_dev->dev; in hns_roce_query_pf_resource()
1837 struct device *dev = hr_dev->dev; in hns_roce_query_vf_resource()
1856 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL); in __hns_roce_set_vf_switch_param()
1876 for (vf_id = 0; vf_id < hr_dev->func_num; vf_id++) { in hns_roce_set_vf_switch_param()
1890 struct hns_roce_caps *caps = &hr_dev->caps; in config_vf_hem_resource()
1898 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_NUM, caps->qpc_bt_num); in config_vf_hem_resource()
1899 hr_reg_write(r_a, FUNC_RES_A_QPC_BT_IDX, vf_id * caps->qpc_bt_num); in config_vf_hem_resource()
1900 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_NUM, caps->srqc_bt_num); in config_vf_hem_resource()
1901 hr_reg_write(r_a, FUNC_RES_A_SRQC_BT_IDX, vf_id * caps->srqc_bt_num); in config_vf_hem_resource()
1902 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_NUM, caps->cqc_bt_num); in config_vf_hem_resource()
1903 hr_reg_write(r_a, FUNC_RES_A_CQC_BT_IDX, vf_id * caps->cqc_bt_num); in config_vf_hem_resource()
1904 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_NUM, caps->mpt_bt_num); in config_vf_hem_resource()
1905 hr_reg_write(r_a, FUNC_RES_A_MPT_BT_IDX, vf_id * caps->mpt_bt_num); in config_vf_hem_resource()
1906 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_NUM, caps->eqc_bt_num); in config_vf_hem_resource()
1907 hr_reg_write(r_a, FUNC_RES_A_EQC_BT_IDX, vf_id * caps->eqc_bt_num); in config_vf_hem_resource()
1908 hr_reg_write(r_b, FUNC_RES_V_QID_NUM, caps->sl_num); in config_vf_hem_resource()
1909 hr_reg_write(r_b, FUNC_RES_B_QID_IDX, vf_id * caps->sl_num); in config_vf_hem_resource()
1910 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_NUM, caps->sccc_bt_num); in config_vf_hem_resource()
1911 hr_reg_write(r_b, FUNC_RES_B_SCCC_BT_IDX, vf_id * caps->sccc_bt_num); in config_vf_hem_resource()
1913 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { in config_vf_hem_resource()
1914 hr_reg_write(r_b, FUNC_RES_V_GMV_BT_NUM, caps->gmv_bt_num); in config_vf_hem_resource()
1916 vf_id * caps->gmv_bt_num); in config_vf_hem_resource()
1918 hr_reg_write(r_b, FUNC_RES_B_SGID_NUM, caps->sgid_bt_num); in config_vf_hem_resource()
1920 vf_id * caps->sgid_bt_num); in config_vf_hem_resource()
1921 hr_reg_write(r_b, FUNC_RES_B_SMAC_NUM, caps->smac_bt_num); in config_vf_hem_resource()
1923 vf_id * caps->smac_bt_num); in config_vf_hem_resource()
1931 u32 func_num = max_t(u32, 1, hr_dev->func_num); in hns_roce_alloc_vf_resource()
1938 dev_err(hr_dev->dev, in hns_roce_alloc_vf_resource()
1939 "failed to config vf-%u hem res, ret = %d.\n", in hns_roce_alloc_vf_resource()
1952 struct hns_roce_caps *caps = &hr_dev->caps; in hns_roce_v2_set_bt()
1957 caps->qpc_ba_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1959 caps->qpc_buf_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1961 to_hr_hem_hopnum(caps->qpc_hop_num, caps->num_qps)); in hns_roce_v2_set_bt()
1964 caps->srqc_ba_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1966 caps->srqc_buf_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1968 to_hr_hem_hopnum(caps->srqc_hop_num, caps->num_srqs)); in hns_roce_v2_set_bt()
1971 caps->cqc_ba_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1973 caps->cqc_buf_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1975 to_hr_hem_hopnum(caps->cqc_hop_num, caps->num_cqs)); in hns_roce_v2_set_bt()
1978 caps->mpt_ba_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1980 caps->mpt_buf_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1982 to_hr_hem_hopnum(caps->mpt_hop_num, caps->num_mtpts)); in hns_roce_v2_set_bt()
1985 caps->sccc_ba_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1987 caps->sccc_buf_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_set_bt()
1989 to_hr_hem_hopnum(caps->sccc_hop_num, caps->num_qps)); in hns_roce_v2_set_bt()
2038 struct hns_roce_caps *caps = &hr_dev->caps; in set_hem_page_size()
2041 caps->eqe_ba_pg_sz = 0; in set_hem_page_size()
2042 caps->eqe_buf_pg_sz = 0; in set_hem_page_size()
2045 caps->llm_buf_pg_sz = 0; in set_hem_page_size()
2048 caps->mpt_ba_pg_sz = 0; in set_hem_page_size()
2049 caps->mpt_buf_pg_sz = 0; in set_hem_page_size()
2050 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K; in set_hem_page_size()
2051 caps->pbl_buf_pg_sz = 0; in set_hem_page_size()
2052 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num, in set_hem_page_size()
2053 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz, in set_hem_page_size()
2057 caps->qpc_ba_pg_sz = 0; in set_hem_page_size()
2058 caps->qpc_buf_pg_sz = 0; in set_hem_page_size()
2059 caps->qpc_timer_ba_pg_sz = 0; in set_hem_page_size()
2060 caps->qpc_timer_buf_pg_sz = 0; in set_hem_page_size()
2061 caps->sccc_ba_pg_sz = 0; in set_hem_page_size()
2062 caps->sccc_buf_pg_sz = 0; in set_hem_page_size()
2063 caps->mtt_ba_pg_sz = 0; in set_hem_page_size()
2064 caps->mtt_buf_pg_sz = 0; in set_hem_page_size()
2065 calc_pg_sz(caps->num_qps, caps->qpc_sz, caps->qpc_hop_num, in set_hem_page_size()
2066 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz, in set_hem_page_size()
2069 if (caps->flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) in set_hem_page_size()
2070 calc_pg_sz(caps->num_qps, caps->sccc_sz, caps->sccc_hop_num, in set_hem_page_size()
2071 caps->sccc_bt_num, &caps->sccc_buf_pg_sz, in set_hem_page_size()
2072 &caps->sccc_ba_pg_sz, HEM_TYPE_SCCC); in set_hem_page_size()
2075 caps->cqc_ba_pg_sz = 0; in set_hem_page_size()
2076 caps->cqc_buf_pg_sz = 0; in set_hem_page_size()
2077 caps->cqc_timer_ba_pg_sz = 0; in set_hem_page_size()
2078 caps->cqc_timer_buf_pg_sz = 0; in set_hem_page_size()
2079 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K; in set_hem_page_size()
2080 caps->cqe_buf_pg_sz = 0; in set_hem_page_size()
2081 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num, in set_hem_page_size()
2082 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz, in set_hem_page_size()
2084 calc_pg_sz(caps->max_cqes, caps->cqe_sz, caps->cqe_hop_num, in set_hem_page_size()
2085 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE); in set_hem_page_size()
2088 if (caps->flags & HNS_ROCE_CAP_FLAG_SRQ) { in set_hem_page_size()
2089 caps->srqc_ba_pg_sz = 0; in set_hem_page_size()
2090 caps->srqc_buf_pg_sz = 0; in set_hem_page_size()
2091 caps->srqwqe_ba_pg_sz = 0; in set_hem_page_size()
2092 caps->srqwqe_buf_pg_sz = 0; in set_hem_page_size()
2093 caps->idx_ba_pg_sz = 0; in set_hem_page_size()
2094 caps->idx_buf_pg_sz = 0; in set_hem_page_size()
2095 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, in set_hem_page_size()
2096 caps->srqc_hop_num, caps->srqc_bt_num, in set_hem_page_size()
2097 &caps->srqc_buf_pg_sz, &caps->srqc_ba_pg_sz, in set_hem_page_size()
2099 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz, in set_hem_page_size()
2100 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz, in set_hem_page_size()
2101 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE); in set_hem_page_size()
2102 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, in set_hem_page_size()
2103 caps->idx_hop_num, 1, &caps->idx_buf_pg_sz, in set_hem_page_size()
2104 &caps->idx_ba_pg_sz, HEM_TYPE_IDX); in set_hem_page_size()
2108 caps->gmv_ba_pg_sz = 0; in set_hem_page_size()
2109 caps->gmv_buf_pg_sz = 0; in set_hem_page_size()
2116 struct hns_roce_caps *caps = &hr_dev->caps; in apply_func_caps()
2117 struct hns_roce_v2_priv *priv = hr_dev->priv; in apply_func_caps()
2120 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ; in apply_func_caps()
2121 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; in apply_func_caps()
2122 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; in apply_func_caps()
2124 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; in apply_func_caps()
2125 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; in apply_func_caps()
2126 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; in apply_func_caps()
2128 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS; in apply_func_caps()
2129 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS; in apply_func_caps()
2131 if (!caps->num_comp_vectors) in apply_func_caps()
2132 caps->num_comp_vectors = in apply_func_caps()
2133 min_t(u32, caps->eqc_bt_num - HNS_ROCE_V2_AEQE_VEC_NUM, in apply_func_caps()
2134 (u32)priv->handle->rinfo.num_vectors - in apply_func_caps()
2137 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { in apply_func_caps()
2138 caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; in apply_func_caps()
2139 caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; in apply_func_caps()
2140 caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; in apply_func_caps()
2143 caps->qpc_sz = HNS_ROCE_V3_QPC_SZ; in apply_func_caps()
2144 caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; in apply_func_caps()
2145 caps->sccc_sz = HNS_ROCE_V3_SCCC_SZ; in apply_func_caps()
2148 caps->gmv_entry_sz = HNS_ROCE_V3_GMV_ENTRY_SZ; in apply_func_caps()
2150 caps->gmv_hop_num = HNS_ROCE_HOP_NUM_0; in apply_func_caps()
2154 * and userspace struct ibv_global_route are u8/uint8_t (0-255). in apply_func_caps()
2156 caps->gid_table_len[0] = min_t(u32, MAX_GID_TBL_LEN, in apply_func_caps()
2157 caps->gmv_bt_num * in apply_func_caps()
2158 (HNS_HW_PAGE_SIZE / caps->gmv_entry_sz)); in apply_func_caps()
2160 caps->gmv_entry_num = caps->gmv_bt_num * (HNS_HW_PAGE_SIZE / in apply_func_caps()
2161 caps->gmv_entry_sz); in apply_func_caps()
2163 u32 func_num = max_t(u32, 1, hr_dev->func_num); in apply_func_caps()
2165 caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; in apply_func_caps()
2166 caps->ceqe_size = HNS_ROCE_CEQE_SIZE; in apply_func_caps()
2167 caps->aeqe_size = HNS_ROCE_AEQE_SIZE; in apply_func_caps()
2168 caps->gid_table_len[0] /= func_num; in apply_func_caps()
2171 if (hr_dev->is_vf) { in apply_func_caps()
2172 caps->default_aeq_arm_st = 0x3; in apply_func_caps()
2173 caps->default_ceq_arm_st = 0x3; in apply_func_caps()
2174 caps->default_ceq_max_cnt = 0x1; in apply_func_caps()
2175 caps->default_ceq_period = 0x10; in apply_func_caps()
2176 caps->default_aeq_max_cnt = 0x1; in apply_func_caps()
2177 caps->default_aeq_period = 0x10; in apply_func_caps()
2186 struct hns_roce_caps *caps = &hr_dev->caps; in hns_roce_query_caps()
2198 cmd = hr_dev->is_vf ? HNS_ROCE_OPC_QUERY_VF_CAPS_NUM : in hns_roce_query_caps()
2203 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1)) in hns_roce_query_caps()
2219 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay; in hns_roce_query_caps()
2220 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg); in hns_roce_query_caps()
2221 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline); in hns_roce_query_caps()
2222 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg); in hns_roce_query_caps()
2223 caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg); in hns_roce_query_caps()
2224 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges); in hns_roce_query_caps()
2225 caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges); in hns_roce_query_caps()
2226 caps->num_aeq_vectors = resp_a->num_aeq_vectors; in hns_roce_query_caps()
2227 caps->num_other_vectors = resp_a->num_other_vectors; in hns_roce_query_caps()
2228 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; in hns_roce_query_caps()
2229 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; in hns_roce_query_caps()
2231 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; in hns_roce_query_caps()
2232 caps->irrl_entry_sz = resp_b->irrl_entry_sz; in hns_roce_query_caps()
2233 caps->trrl_entry_sz = resp_b->trrl_entry_sz; in hns_roce_query_caps()
2234 caps->cqc_entry_sz = resp_b->cqc_entry_sz; in hns_roce_query_caps()
2235 caps->srqc_entry_sz = resp_b->srqc_entry_sz; in hns_roce_query_caps()
2236 caps->idx_entry_sz = resp_b->idx_entry_sz; in hns_roce_query_caps()
2237 caps->sccc_sz = resp_b->sccc_sz; in hns_roce_query_caps()
2238 caps->max_mtu = resp_b->max_mtu; in hns_roce_query_caps()
2239 caps->min_cqes = resp_b->min_cqes; in hns_roce_query_caps()
2240 caps->min_wqes = resp_b->min_wqes; in hns_roce_query_caps()
2241 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap); in hns_roce_query_caps()
2242 caps->pkey_table_len[0] = resp_b->pkey_table_len; in hns_roce_query_caps()
2243 caps->phy_num_uars = resp_b->phy_num_uars; in hns_roce_query_caps()
2244 ctx_hop_num = resp_b->ctx_hop_num; in hns_roce_query_caps()
2245 pbl_hop_num = resp_b->pbl_hop_num; in hns_roce_query_caps()
2247 caps->num_pds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_PDS); in hns_roce_query_caps()
2249 caps->flags = hr_reg_read(resp_c, PF_CAPS_C_CAP_FLAGS); in hns_roce_query_caps()
2250 caps->flags |= le16_to_cpu(resp_d->cap_flags_ex) << in hns_roce_query_caps()
2253 caps->num_cqs = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_CQS); in hns_roce_query_caps()
2254 caps->gid_table_len[0] = hr_reg_read(resp_c, PF_CAPS_C_MAX_GID); in hns_roce_query_caps()
2255 caps->max_cqes = 1 << hr_reg_read(resp_c, PF_CAPS_C_CQ_DEPTH); in hns_roce_query_caps()
2256 caps->num_xrcds = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_XRCDS); in hns_roce_query_caps()
2257 caps->num_mtpts = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_MRWS); in hns_roce_query_caps()
2258 caps->num_qps = 1 << hr_reg_read(resp_c, PF_CAPS_C_NUM_QPS); in hns_roce_query_caps()
2259 caps->max_qp_init_rdma = hr_reg_read(resp_c, PF_CAPS_C_MAX_ORD); in hns_roce_query_caps()
2260 caps->max_qp_dest_rdma = caps->max_qp_init_rdma; in hns_roce_query_caps()
2261 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth); in hns_roce_query_caps()
2263 caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS); in hns_roce_query_caps()
2264 caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP); in hns_roce_query_caps()
2265 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth); in hns_roce_query_caps()
2266 caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH); in hns_roce_query_caps()
2267 caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS); in hns_roce_query_caps()
2268 caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH); in hns_roce_query_caps()
2269 caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG); in hns_roce_query_caps()
2270 caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS); in hns_roce_query_caps()
2271 caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS); in hns_roce_query_caps()
2272 caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS); in hns_roce_query_caps()
2273 caps->reserved_uars = hr_reg_read(resp_d, PF_CAPS_D_RSV_UARS); in hns_roce_query_caps()
2275 caps->reserved_mrws = hr_reg_read(resp_e, PF_CAPS_E_RSV_MRWS); in hns_roce_query_caps()
2276 caps->chunk_sz = 1 << hr_reg_read(resp_e, PF_CAPS_E_CHUNK_SIZE_SHIFT); in hns_roce_query_caps()
2277 caps->reserved_cqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_CQS); in hns_roce_query_caps()
2278 caps->reserved_xrcds = hr_reg_read(resp_e, PF_CAPS_E_RSV_XRCDS); in hns_roce_query_caps()
2279 caps->reserved_srqs = hr_reg_read(resp_e, PF_CAPS_E_RSV_SRQS); in hns_roce_query_caps()
2280 caps->reserved_lkey = hr_reg_read(resp_e, PF_CAPS_E_RSV_LKEYS); in hns_roce_query_caps()
2282 caps->qpc_hop_num = ctx_hop_num; in hns_roce_query_caps()
2283 caps->sccc_hop_num = ctx_hop_num; in hns_roce_query_caps()
2284 caps->srqc_hop_num = ctx_hop_num; in hns_roce_query_caps()
2285 caps->cqc_hop_num = ctx_hop_num; in hns_roce_query_caps()
2286 caps->mpt_hop_num = ctx_hop_num; in hns_roce_query_caps()
2287 caps->mtt_hop_num = pbl_hop_num; in hns_roce_query_caps()
2288 caps->cqe_hop_num = pbl_hop_num; in hns_roce_query_caps()
2289 caps->srqwqe_hop_num = pbl_hop_num; in hns_roce_query_caps()
2290 caps->idx_hop_num = pbl_hop_num; in hns_roce_query_caps()
2291 caps->wqe_sq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_SQWQE_HOP_NUM); in hns_roce_query_caps()
2292 caps->wqe_sge_hop_num = hr_reg_read(resp_d, PF_CAPS_D_EX_SGE_HOP_NUM); in hns_roce_query_caps()
2293 caps->wqe_rq_hop_num = hr_reg_read(resp_d, PF_CAPS_D_RQWQE_HOP_NUM); in hns_roce_query_caps()
2295 if (!(caps->page_size_cap & PAGE_SIZE)) in hns_roce_query_caps()
2296 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; in hns_roce_query_caps()
2298 if (!hr_dev->is_vf) { in hns_roce_query_caps()
2299 caps->cqe_sz = resp_a->cqe_sz; in hns_roce_query_caps()
2300 caps->qpc_sz = le16_to_cpu(resp_b->qpc_sz); in hns_roce_query_caps()
2301 caps->default_aeq_arm_st = in hns_roce_query_caps()
2303 caps->default_ceq_arm_st = in hns_roce_query_caps()
2305 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt); in hns_roce_query_caps()
2306 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period); in hns_roce_query_caps()
2307 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt); in hns_roce_query_caps()
2308 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period); in hns_roce_query_caps()
2330 struct hns_roce_caps *caps = &hr_dev->caps; in hns_roce_config_entry_size()
2333 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) in hns_roce_config_entry_size()
2337 caps->qpc_sz); in hns_roce_config_entry_size()
2339 dev_err(hr_dev->dev, "failed to cfg qpc sz, ret = %d.\n", ret); in hns_roce_config_entry_size()
2344 caps->sccc_sz); in hns_roce_config_entry_size()
2346 dev_err(hr_dev->dev, "failed to cfg sccc sz, ret = %d.\n", ret); in hns_roce_config_entry_size()
2353 struct device *dev = hr_dev->dev; in hns_roce_v2_vf_profile()
2356 hr_dev->func_num = 1; in hns_roce_v2_vf_profile()
2381 struct device *dev = hr_dev->dev; in hns_roce_v2_pf_profile()
2434 struct device *dev = hr_dev->dev; in hns_roce_v2_profile()
2449 hr_dev->vendor_part_id = hr_dev->pci_dev->device; in hns_roce_v2_profile()
2450 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); in hns_roce_v2_profile()
2452 if (hr_dev->is_vf) in hns_roce_v2_profile()
2465 page_num = data_buf->npages; in config_llm_table()
2468 if (i == (page_num - 1)) in config_llm_table()
2484 struct hns_roce_buf *buf = table->buf; in set_llm_cfg_to_hw()
2493 hr_reg_write(r_a, CFG_LLM_A_BA_L, lower_32_bits(table->table.map)); in set_llm_cfg_to_hw()
2494 hr_reg_write(r_a, CFG_LLM_A_BA_H, upper_32_bits(table->table.map)); in set_llm_cfg_to_hw()
2495 hr_reg_write(r_a, CFG_LLM_A_DEPTH, buf->npages); in set_llm_cfg_to_hw()
2496 hr_reg_write(r_a, CFG_LLM_A_PGSZ, to_hr_hw_page_shift(buf->page_shift)); in set_llm_cfg_to_hw()
2505 addr = to_hr_hw_page_addr(hns_roce_buf_page(buf, buf->npages - 1)); in set_llm_cfg_to_hw()
2508 hr_reg_write(r_b, CFG_LLM_B_TAIL_PTR, buf->npages - 1); in set_llm_cfg_to_hw()
2516 u16 total_sl = hr_dev->caps.sl_num * hr_dev->func_num; in alloc_link_table_buf()
2517 struct hns_roce_v2_priv *priv = hr_dev->priv; in alloc_link_table_buf()
2521 link_tbl = &priv->ext_llm; in alloc_link_table_buf()
2522 pg_shift = hr_dev->caps.llm_buf_pg_sz + PAGE_SHIFT; in alloc_link_table_buf()
2523 size = hr_dev->caps.num_qps * hr_dev->func_num * in alloc_link_table_buf()
2529 link_tbl->buf = hns_roce_buf_alloc(hr_dev, size, pg_shift, 0); in alloc_link_table_buf()
2530 if (IS_ERR(link_tbl->buf)) in alloc_link_table_buf()
2531 return ERR_PTR(-ENOMEM); in alloc_link_table_buf()
2534 size = link_tbl->buf->npages * sizeof(u64); in alloc_link_table_buf()
2535 link_tbl->table.buf = dma_alloc_coherent(hr_dev->dev, size, in alloc_link_table_buf()
2536 &link_tbl->table.map, in alloc_link_table_buf()
2538 if (!link_tbl->table.buf) { in alloc_link_table_buf()
2539 hns_roce_buf_free(hr_dev, link_tbl->buf); in alloc_link_table_buf()
2540 return ERR_PTR(-ENOMEM); in alloc_link_table_buf()
2549 if (tbl->buf) { in free_link_table_buf()
2550 u32 size = tbl->buf->npages * sizeof(u64); in free_link_table_buf()
2552 dma_free_coherent(hr_dev->dev, size, tbl->table.buf, in free_link_table_buf()
2553 tbl->table.map); in free_link_table_buf()
2556 hns_roce_buf_free(hr_dev, tbl->buf); in free_link_table_buf()
2566 return -ENOMEM; in hns_roce_init_link_table()
2568 if (WARN_ON(link_tbl->buf->npages > HNS_ROCE_V2_EXT_LLM_MAX_DEPTH)) { in hns_roce_init_link_table()
2569 ret = -EINVAL; in hns_roce_init_link_table()
2573 config_llm_table(link_tbl->buf, link_tbl->table.buf); in hns_roce_init_link_table()
2587 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_free_link_table()
2589 free_link_table_buf(hr_dev, &priv->ext_llm); in hns_roce_free_link_table()
2597 xa_lock(&hr_dev->qp_table.dip_xa); in free_dip_entry()
2599 xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) { in free_dip_entry()
2600 __xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx); in free_dip_entry()
2604 xa_unlock(&hr_dev->qp_table.dip_xa); in free_dip_entry()
2609 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_init_pd()
2610 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_init_pd()
2611 struct ib_device *ibdev = &hr_dev->ib_dev; in free_mr_init_pd()
2613 struct ib_pd *pd; in free_mr_init_pd() local
2618 pd = &hr_pd->ibpd; in free_mr_init_pd()
2619 pd->device = ibdev; in free_mr_init_pd()
2621 if (hns_roce_alloc_pd(pd, NULL)) { in free_mr_init_pd()
2622 ibdev_err(ibdev, "failed to create pd for free mr.\n"); in free_mr_init_pd()
2626 free_mr->rsv_pd = to_hr_pd(pd); in free_mr_init_pd()
2627 free_mr->rsv_pd->ibpd.device = &hr_dev->ib_dev; in free_mr_init_pd()
2628 free_mr->rsv_pd->ibpd.uobject = NULL; in free_mr_init_pd()
2629 free_mr->rsv_pd->ibpd.__internal_mr = NULL; in free_mr_init_pd()
2630 atomic_set(&free_mr->rsv_pd->ibpd.usecnt, 0); in free_mr_init_pd()
2632 return pd; in free_mr_init_pd()
2637 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_init_cq()
2638 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_init_cq()
2639 struct ib_device *ibdev = &hr_dev->ib_dev; in free_mr_init_cq()
2650 cq = &hr_cq->ib_cq; in free_mr_init_cq()
2651 cq->device = ibdev; in free_mr_init_cq()
2658 free_mr->rsv_cq = to_hr_cq(cq); in free_mr_init_cq()
2659 free_mr->rsv_cq->ib_cq.device = &hr_dev->ib_dev; in free_mr_init_cq()
2660 free_mr->rsv_cq->ib_cq.uobject = NULL; in free_mr_init_cq()
2661 free_mr->rsv_cq->ib_cq.comp_handler = NULL; in free_mr_init_cq()
2662 free_mr->rsv_cq->ib_cq.event_handler = NULL; in free_mr_init_cq()
2663 free_mr->rsv_cq->ib_cq.cq_context = NULL; in free_mr_init_cq()
2664 atomic_set(&free_mr->rsv_cq->ib_cq.usecnt, 0); in free_mr_init_cq()
2672 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_init_qp()
2673 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_init_qp()
2674 struct ib_device *ibdev = &hr_dev->ib_dev; in free_mr_init_qp()
2681 return -ENOMEM; in free_mr_init_qp()
2683 qp = &hr_qp->ibqp; in free_mr_init_qp()
2684 qp->device = ibdev; in free_mr_init_qp()
2693 free_mr->rsv_qp[i] = hr_qp; in free_mr_init_qp()
2694 free_mr->rsv_qp[i]->ibqp.recv_cq = cq; in free_mr_init_qp()
2695 free_mr->rsv_qp[i]->ibqp.send_cq = cq; in free_mr_init_qp()
2702 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_exit()
2703 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_exit()
2707 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { in free_mr_exit()
2708 if (free_mr->rsv_qp[i]) { in free_mr_exit()
2709 qp = &free_mr->rsv_qp[i]->ibqp; in free_mr_exit()
2711 kfree(free_mr->rsv_qp[i]); in free_mr_exit()
2712 free_mr->rsv_qp[i] = NULL; in free_mr_exit()
2716 if (free_mr->rsv_cq) { in free_mr_exit()
2717 hns_roce_destroy_cq(&free_mr->rsv_cq->ib_cq, NULL); in free_mr_exit()
2718 kfree(free_mr->rsv_cq); in free_mr_exit()
2719 free_mr->rsv_cq = NULL; in free_mr_exit()
2722 if (free_mr->rsv_pd) { in free_mr_exit()
2723 hns_roce_dealloc_pd(&free_mr->rsv_pd->ibpd, NULL); in free_mr_exit()
2724 kfree(free_mr->rsv_pd); in free_mr_exit()
2725 free_mr->rsv_pd = NULL; in free_mr_exit()
2728 mutex_destroy(&free_mr->mutex); in free_mr_exit()
2733 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_alloc_res()
2734 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_alloc_res()
2736 struct ib_pd *pd; in free_mr_alloc_res() local
2741 pd = free_mr_init_pd(hr_dev); in free_mr_alloc_res()
2742 if (!pd) in free_mr_alloc_res()
2743 return -ENOMEM; in free_mr_alloc_res()
2747 ret = -ENOMEM; in free_mr_alloc_res()
2755 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { in free_mr_alloc_res()
2769 for (i--; i >= 0; i--) { in free_mr_alloc_res()
2770 hns_roce_v2_destroy_qp(&free_mr->rsv_qp[i]->ibqp, NULL); in free_mr_alloc_res()
2771 kfree(free_mr->rsv_qp[i]); in free_mr_alloc_res()
2777 hns_roce_dealloc_pd(pd, NULL); in free_mr_alloc_res()
2778 kfree(pd); in free_mr_alloc_res()
2786 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_modify_rsv_qp()
2787 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_modify_rsv_qp()
2788 struct ib_device *ibdev = &hr_dev->ib_dev; in free_mr_modify_rsv_qp()
2794 hr_qp = to_hr_qp(&free_mr->rsv_qp[sl_num]->ibqp); in free_mr_modify_rsv_qp()
2795 hr_qp->free_mr_en = 1; in free_mr_modify_rsv_qp()
2796 hr_qp->ibqp.device = ibdev; in free_mr_modify_rsv_qp()
2797 hr_qp->ibqp.qp_type = IB_QPT_RC; in free_mr_modify_rsv_qp()
2800 attr->qp_state = IB_QPS_INIT; in free_mr_modify_rsv_qp()
2801 attr->port_num = 1; in free_mr_modify_rsv_qp()
2802 attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE; in free_mr_modify_rsv_qp()
2803 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, in free_mr_modify_rsv_qp()
2811 loopback = hr_dev->loop_idc; in free_mr_modify_rsv_qp()
2813 hr_dev->loop_idc = 1; in free_mr_modify_rsv_qp()
2817 attr->qp_state = IB_QPS_RTR; in free_mr_modify_rsv_qp()
2818 attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; in free_mr_modify_rsv_qp()
2819 attr->path_mtu = IB_MTU_256; in free_mr_modify_rsv_qp()
2820 attr->dest_qp_num = hr_qp->qpn; in free_mr_modify_rsv_qp()
2821 attr->rq_psn = HNS_ROCE_FREE_MR_USED_PSN; in free_mr_modify_rsv_qp()
2823 rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num); in free_mr_modify_rsv_qp()
2825 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT, in free_mr_modify_rsv_qp()
2827 hr_dev->loop_idc = loopback; in free_mr_modify_rsv_qp()
2836 attr->qp_state = IB_QPS_RTS; in free_mr_modify_rsv_qp()
2837 attr->sq_psn = HNS_ROCE_FREE_MR_USED_PSN; in free_mr_modify_rsv_qp()
2838 attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT; in free_mr_modify_rsv_qp()
2839 attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT; in free_mr_modify_rsv_qp()
2840 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR, in free_mr_modify_rsv_qp()
2851 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_modify_qp()
2852 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_modify_qp()
2861 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { in free_mr_modify_qp()
2872 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_init()
2873 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_init()
2876 mutex_init(&free_mr->mutex); in free_mr_init()
2880 mutex_destroy(&free_mr->mutex); in free_mr_init()
2905 for (gmv_count = 0; gmv_count < hr_dev->caps.gmv_entry_num; in get_hem_table()
2907 ret = hns_roce_table_get(hr_dev, &hr_dev->gmv_table, gmv_count); in get_hem_table()
2912 if (hr_dev->is_vf) in get_hem_table()
2916 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num; in get_hem_table()
2918 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table, in get_hem_table()
2921 dev_err(hr_dev->dev, "QPC Timer get failed\n"); in get_hem_table()
2927 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num; in get_hem_table()
2929 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table, in get_hem_table()
2932 dev_err(hr_dev->dev, "CQC Timer get failed\n"); in get_hem_table()
2941 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); in get_hem_table()
2945 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); in get_hem_table()
2949 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i); in get_hem_table()
2958 for (i = 0; i < hr_dev->caps.gmv_entry_num; i++) in put_hem_table()
2959 hns_roce_table_put(hr_dev, &hr_dev->gmv_table, i); in put_hem_table()
2961 if (hr_dev->is_vf) in put_hem_table()
2964 for (i = 0; i < hr_dev->caps.qpc_timer_bt_num; i++) in put_hem_table()
2965 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i); in put_hem_table()
2967 for (i = 0; i < hr_dev->caps.cqc_timer_bt_num; i++) in put_hem_table()
2968 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i); in put_hem_table()
2984 if (hr_dev->is_vf) in hns_roce_v2_init()
2989 dev_err(hr_dev->dev, "failed to init llm, ret = %d.\n", ret); in hns_roce_v2_init()
3003 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) in hns_roce_v2_exit()
3008 if (!hr_dev->is_vf) in hns_roce_v2_exit()
3011 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09) in hns_roce_v2_exit()
3023 mb->in_param_l = cpu_to_le32(mbox_msg->in_param); in hns_roce_mbox_post()
3024 mb->in_param_h = cpu_to_le32(mbox_msg->in_param >> 32); in hns_roce_mbox_post()
3025 mb->out_param_l = cpu_to_le32(mbox_msg->out_param); in hns_roce_mbox_post()
3026 mb->out_param_h = cpu_to_le32(mbox_msg->out_param >> 32); in hns_roce_mbox_post()
3027 mb->cmd_tag = cpu_to_le32(mbox_msg->tag << 8 | mbox_msg->cmd); in hns_roce_mbox_post()
3028 mb->token_event_en = cpu_to_le32(mbox_msg->event_en << 16 | in hns_roce_mbox_post()
3029 mbox_msg->token); in hns_roce_mbox_post()
3040 int ret = -EBUSY; in v2_wait_mbox_complete()
3047 if (hr_dev->cmd.state == HNS_ROCE_CMDQ_STATE_FATAL_ERR) in v2_wait_mbox_complete()
3048 return -EIO; in v2_wait_mbox_complete()
3055 status = le32_to_cpu(mb_st->mb_status_hw_run); in v2_wait_mbox_complete()
3064 dev_err_ratelimited(hr_dev->dev, in v2_wait_mbox_complete()
3067 return -ETIMEDOUT; in v2_wait_mbox_complete()
3071 ret = -EBUSY; in v2_wait_mbox_complete()
3095 dev_err_ratelimited(hr_dev->dev, in v2_post_mbox()
3104 dev_err_ratelimited(hr_dev->dev, in v2_post_mbox()
3119 return -EBUSY; in v2_poll_mbox_done()
3121 dev_err_ratelimited(hr_dev->dev, in v2_poll_mbox_done()
3140 (*p)[i] = cpu_to_le32(*(u32 *)&src->raw[i * sizeof(u32)]); in copy_gid()
3156 copy_gid(&sgid_tb->vf_sgid_l, gid); in config_sgid_table()
3187 copy_gid(&tb_a->vf_sgid_l, gid); in config_gmv_table()
3193 tb_b->vf_smac_l = cpu_to_le32(*(u32 *)mac); in config_gmv_table()
3209 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { in hns_roce_v2_set_gid()
3214 } else if (attr->gid_type == IB_GID_TYPE_ROCE) { in hns_roce_v2_set_gid()
3219 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in hns_roce_v2_set_gid()
3225 ibdev_err(&hr_dev->ib_dev, "failed to set gid, ret = %d!\n", in hns_roce_v2_set_gid()
3247 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l); in hns_roce_v2_set_mac()
3257 struct ib_device *ibdev = &hr_dev->ib_dev; in set_mtpt_pbl()
3262 ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, in set_mtpt_pbl()
3263 min_t(int, ARRAY_SIZE(pages), mr->npages)); in set_mtpt_pbl()
3273 pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); in set_mtpt_pbl()
3275 mpt_entry->pbl_size = cpu_to_le32(mr->npages); in set_mtpt_pbl()
3276 mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> MPT_PBL_BA_ADDR_S); in set_mtpt_pbl()
3280 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); in set_mtpt_pbl()
3283 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1])); in set_mtpt_pbl()
3286 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); in set_mtpt_pbl()
3300 hr_reg_write(mpt_entry, MPT_PD, mr->pd); in hns_roce_v2_write_mtpt()
3303 mr->access & IB_ACCESS_MW_BIND); in hns_roce_v2_write_mtpt()
3305 mr->access & IB_ACCESS_REMOTE_ATOMIC); in hns_roce_v2_write_mtpt()
3307 mr->access & IB_ACCESS_REMOTE_READ); in hns_roce_v2_write_mtpt()
3309 mr->access & IB_ACCESS_REMOTE_WRITE); in hns_roce_v2_write_mtpt()
3311 mr->access & IB_ACCESS_LOCAL_WRITE); in hns_roce_v2_write_mtpt()
3313 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); in hns_roce_v2_write_mtpt()
3314 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); in hns_roce_v2_write_mtpt()
3315 mpt_entry->lkey = cpu_to_le32(mr->key); in hns_roce_v2_write_mtpt()
3316 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); in hns_roce_v2_write_mtpt()
3317 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); in hns_roce_v2_write_mtpt()
3319 if (mr->type != MR_TYPE_MR) in hns_roce_v2_write_mtpt()
3322 if (mr->type == MR_TYPE_DMA) in hns_roce_v2_write_mtpt()
3325 if (mr->pbl_hop_num != HNS_ROCE_HOP_NUM_0) in hns_roce_v2_write_mtpt()
3326 hr_reg_write(mpt_entry, MPT_PBL_HOP_NUM, mr->pbl_hop_num); in hns_roce_v2_write_mtpt()
3329 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); in hns_roce_v2_write_mtpt()
3340 u32 mr_access_flags = mr->access; in hns_roce_v2_rereg_write_mtpt()
3344 hr_reg_write(mpt_entry, MPT_PD, mr->pd); in hns_roce_v2_rereg_write_mtpt()
3360 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova)); in hns_roce_v2_rereg_write_mtpt()
3361 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova)); in hns_roce_v2_rereg_write_mtpt()
3362 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size)); in hns_roce_v2_rereg_write_mtpt()
3363 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size)); in hns_roce_v2_rereg_write_mtpt()
3373 dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr); in hns_roce_v2_frmr_write_mtpt()
3380 hr_reg_write(mpt_entry, MPT_PD, mr->pd); in hns_roce_v2_frmr_write_mtpt()
3392 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.ba_pg_shift)); in hns_roce_v2_frmr_write_mtpt()
3394 to_hr_hw_page_shift(mr->pbl_mtr.hem_cfg.buf_pg_shift)); in hns_roce_v2_frmr_write_mtpt()
3396 mpt_entry->pbl_size = cpu_to_le32(mr->npages); in hns_roce_v2_frmr_write_mtpt()
3398 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(pbl_ba >> in hns_roce_v2_frmr_write_mtpt()
3414 hr_reg_write(mpt_entry, MPT_PD, mw->pdn); in hns_roce_v2_mw_write_mtpt()
3423 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1); in hns_roce_v2_mw_write_mtpt()
3425 mpt_entry->lkey = cpu_to_le32(mw->rkey); in hns_roce_v2_mw_write_mtpt()
3428 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : in hns_roce_v2_mw_write_mtpt()
3429 mw->pbl_hop_num); in hns_roce_v2_mw_write_mtpt()
3431 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_mw_write_mtpt()
3433 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET); in hns_roce_v2_mw_write_mtpt()
3440 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); in free_mr_post_send_lp_wqe()
3441 struct ib_device *ibdev = &hr_dev->ib_dev; in free_mr_post_send_lp_wqe()
3448 send_wr->opcode = IB_WR_RDMA_WRITE; in free_mr_post_send_lp_wqe()
3450 ret = hns_roce_v2_post_send(&hr_qp->ibqp, send_wr, &bad_wr); in free_mr_post_send_lp_wqe()
3465 struct hns_roce_v2_priv *priv = hr_dev->priv; in free_mr_send_cmd_to_hw()
3466 struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; in free_mr_send_cmd_to_hw()
3467 struct ib_wc wc[ARRAY_SIZE(free_mr->rsv_qp)]; in free_mr_send_cmd_to_hw()
3468 struct ib_device *ibdev = &hr_dev->ib_dev; in free_mr_send_cmd_to_hw()
3480 if (priv->handle->rinfo.reset_state == HNS_ROCE_STATE_RST_INIT || in free_mr_send_cmd_to_hw()
3481 priv->handle->rinfo.instance_state == HNS_ROCE_STATE_INIT || in free_mr_send_cmd_to_hw()
3482 hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) in free_mr_send_cmd_to_hw()
3485 mutex_lock(&free_mr->mutex); in free_mr_send_cmd_to_hw()
3487 for (i = 0; i < ARRAY_SIZE(free_mr->rsv_qp); i++) { in free_mr_send_cmd_to_hw()
3488 hr_qp = free_mr->rsv_qp[i]; in free_mr_send_cmd_to_hw()
3494 hr_qp->qpn, ret); in free_mr_send_cmd_to_hw()
3503 npolled = hns_roce_v2_poll_cq(&free_mr->rsv_cq->ib_cq, cqe_cnt, wc); in free_mr_send_cmd_to_hw()
3517 cqe_cnt -= npolled; in free_mr_send_cmd_to_hw()
3521 mutex_unlock(&free_mr->mutex); in free_mr_send_cmd_to_hw()
3526 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) in hns_roce_v2_dereg_mr()
3532 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); in get_cqe_v2()
3537 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe_v2()
3540 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : in get_sw_cqe_v2()
3547 if (likely(hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB)) { in update_cq_db()
3548 *hr_cq->set_ci_db = hr_cq->cons_index & V2_CQ_DB_CONS_IDX_M; in update_cq_db()
3552 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); in update_cq_db()
3554 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); in update_cq_db()
3557 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg); in update_cq_db()
3564 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); in __hns_roce_v2_cq_clean()
3571 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index); in __hns_roce_v2_cq_clean()
3573 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe) in __hns_roce_v2_cq_clean()
3581 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { in __hns_roce_v2_cq_clean()
3582 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe); in __hns_roce_v2_cq_clean()
3591 hr_cq->ib_cq.cqe); in __hns_roce_v2_cq_clean()
3593 memcpy(dest, cqe, hr_cq->cqe_size); in __hns_roce_v2_cq_clean()
3599 hr_cq->cons_index += nfreed; in __hns_roce_v2_cq_clean()
3607 spin_lock_irq(&hr_cq->lock); in hns_roce_v2_cq_clean()
3609 spin_unlock_irq(&hr_cq->lock); in hns_roce_v2_cq_clean()
3623 hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); in hns_roce_v2_write_cqc()
3624 hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector); in hns_roce_v2_write_cqc()
3625 hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn); in hns_roce_v2_write_cqc()
3627 if (hr_cq->cqe_size == HNS_ROCE_V3_CQE_SIZE) in hns_roce_v2_write_cqc()
3630 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH) in hns_roce_v2_write_cqc()
3637 hr_reg_write(cq_context, CQC_CQE_HOP_NUM, hr_dev->caps.cqe_hop_num == in hns_roce_v2_write_cqc()
3638 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); in hns_roce_v2_write_cqc()
3644 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift)); in hns_roce_v2_write_cqc()
3646 to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift)); in hns_roce_v2_write_cqc()
3650 hr_cq->flags & HNS_ROCE_CQ_FLAG_RECORD_DB); in hns_roce_v2_write_cqc()
3652 ((u32)hr_cq->db.dma) >> 1); in hns_roce_v2_write_cqc()
3654 hr_cq->db.dma >> CQC_CQE_DB_RECORD_ADDR_H_S); in hns_roce_v2_write_cqc()
3664 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); in hns_roce_v2_req_notify_cq()
3676 hr_reg_write(&cq_db, DB_TAG, hr_cq->cqn); in hns_roce_v2_req_notify_cq()
3678 hr_reg_write(&cq_db, DB_CQ_CI, hr_cq->cons_index); in hns_roce_v2_req_notify_cq()
3679 hr_reg_write(&cq_db, DB_CQ_CMD_SN, hr_cq->arm_sn); in hns_roce_v2_req_notify_cq()
3682 hns_roce_write64(hr_dev, (__le32 *)&cq_db, hr_cq->db_reg); in hns_roce_v2_req_notify_cq()
3693 left = wq->head - wq->tail; in sw_comp()
3699 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in sw_comp()
3700 wc->status = IB_WC_WR_FLUSH_ERR; in sw_comp()
3701 wc->vendor_err = 0; in sw_comp()
3702 wc->qp = &hr_qp->ibqp; in sw_comp()
3704 wq->tail++; in sw_comp()
3718 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) { in hns_roce_v2_sw_poll_cq()
3719 npolled += sw_comp(hr_qp, &hr_qp->sq, in hns_roce_v2_sw_poll_cq()
3720 num_entries - npolled, wc + npolled); in hns_roce_v2_sw_poll_cq()
3725 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) { in hns_roce_v2_sw_poll_cq()
3726 npolled += sw_comp(hr_qp, &hr_qp->rq, in hns_roce_v2_sw_poll_cq()
3727 num_entries - npolled, wc + npolled); in hns_roce_v2_sw_poll_cq()
3765 wc->status = IB_WC_GENERAL_ERR; in get_cqe_status()
3768 wc->status = map[i].wc_status; in get_cqe_status()
3772 if (likely(wc->status == IB_WC_SUCCESS || in get_cqe_status()
3773 wc->status == IB_WC_WR_FLUSH_ERR)) in get_cqe_status()
3776 ibdev_err_ratelimited(&hr_dev->ib_dev, "error cqe status 0x%x:\n", in get_cqe_status()
3779 cq->cqe_size, false); in get_cqe_status()
3780 wc->vendor_err = hr_reg_read(cqe, CQE_SUB_STATUS); in get_cqe_status()
3796 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); in get_cur_qp()
3802 if (!hr_qp || qpn != hr_qp->qpn) { in get_cur_qp()
3805 ibdev_err(&hr_dev->ib_dev, in get_cur_qp()
3807 hr_cq->cqn, qpn); in get_cur_qp()
3808 return -EINVAL; in get_cur_qp()
3817 * mapped-value = 1 + real-value
3844 return -EINVAL; in to_ib_wc_send_op()
3846 return wc_send_op_map[hr_opcode] ? wc_send_op_map[hr_opcode] - 1 : in to_ib_wc_send_op()
3847 -EINVAL; in to_ib_wc_send_op()
3860 return -EINVAL; in to_ib_wc_recv_op()
3862 return wc_recv_op_map[hr_opcode] ? wc_recv_op_map[hr_opcode] - 1 : in to_ib_wc_recv_op()
3863 -EINVAL; in to_ib_wc_recv_op()
3871 wc->wc_flags = 0; in fill_send_wc()
3876 wc->byte_len = le32_to_cpu(cqe->byte_cnt); in fill_send_wc()
3880 wc->wc_flags |= IB_WC_WITH_IMM; in fill_send_wc()
3886 wc->byte_len = 8; in fill_send_wc()
3894 wc->status = IB_WC_GENERAL_ERR; in fill_send_wc()
3896 wc->opcode = ib_opcode; in fill_send_wc()
3904 wc->byte_len = le32_to_cpu(cqe->byte_cnt); in fill_recv_wc()
3910 wc->wc_flags = IB_WC_WITH_IMM; in fill_recv_wc()
3911 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->immtdata)); in fill_recv_wc()
3914 wc->wc_flags = IB_WC_WITH_INVALIDATE; in fill_recv_wc()
3915 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey); in fill_recv_wc()
3918 wc->wc_flags = 0; in fill_recv_wc()
3923 wc->status = IB_WC_GENERAL_ERR; in fill_recv_wc()
3925 wc->opcode = ib_opcode; in fill_recv_wc()
3927 wc->sl = hr_reg_read(cqe, CQE_SL); in fill_recv_wc()
3928 wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN); in fill_recv_wc()
3929 wc->slid = 0; in fill_recv_wc()
3930 wc->wc_flags |= hr_reg_read(cqe, CQE_GRH) ? IB_WC_GRH : 0; in fill_recv_wc()
3931 wc->port_num = hr_reg_read(cqe, CQE_PORTN); in fill_recv_wc()
3932 wc->pkey_index = 0; in fill_recv_wc()
3935 wc->vlan_id = hr_reg_read(cqe, CQE_VID); in fill_recv_wc()
3936 wc->wc_flags |= IB_WC_WITH_VLAN; in fill_recv_wc()
3938 wc->vlan_id = 0xffff; in fill_recv_wc()
3941 wc->network_hdr_type = hr_reg_read(cqe, CQE_PORT_TYPE); in fill_recv_wc()
3949 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); in hns_roce_v2_poll_one()
3958 cqe = get_sw_cqe_v2(hr_cq, hr_cq->cons_index); in hns_roce_v2_poll_one()
3960 return -EAGAIN; in hns_roce_v2_poll_one()
3962 ++hr_cq->cons_index; in hns_roce_v2_poll_one()
3970 wc->qp = &qp->ibqp; in hns_roce_v2_poll_one()
3971 wc->vendor_err = 0; in hns_roce_v2_poll_one()
3977 wq = &qp->sq; in hns_roce_v2_poll_one()
3982 if (qp->sq_signal_bits) in hns_roce_v2_poll_one()
3983 wq->tail += (wqe_idx - (u16)wq->tail) & in hns_roce_v2_poll_one()
3984 (wq->wqe_cnt - 1); in hns_roce_v2_poll_one()
3986 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in hns_roce_v2_poll_one()
3987 ++wq->tail; in hns_roce_v2_poll_one()
3991 if (qp->ibqp.srq) { in hns_roce_v2_poll_one()
3992 srq = to_hr_srq(qp->ibqp.srq); in hns_roce_v2_poll_one()
3993 wc->wr_id = srq->wrid[wqe_idx]; in hns_roce_v2_poll_one()
3996 wq = &qp->rq; in hns_roce_v2_poll_one()
3997 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in hns_roce_v2_poll_one()
3998 ++wq->tail; in hns_roce_v2_poll_one()
4005 if (unlikely(wc->status != IB_WC_SUCCESS)) in hns_roce_v2_poll_one()
4014 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); in hns_roce_v2_poll_cq()
4020 spin_lock_irqsave(&hr_cq->lock, flags); in hns_roce_v2_poll_cq()
4029 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) { in hns_roce_v2_poll_cq()
4043 spin_unlock_irqrestore(&hr_cq->lock, flags); in hns_roce_v2_poll_cq()
4076 dev_warn(hr_dev->dev, "failed to check hem type %u.\n", type); in get_op_for_set_hem()
4077 return -EINVAL; in get_op_for_set_hem()
4090 u32 idx = obj / (HNS_HW_PAGE_SIZE / hr_dev->caps.gmv_entry_sz); in config_gmv_ba_to_hw()
4136 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) in hns_roce_v2_set_hem()
4156 if (table->type == HEM_TYPE_SCCC) in hns_roce_v2_set_hem()
4160 hem = table->hem[hem_idx]; in hns_roce_v2_set_hem()
4162 ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx); in hns_roce_v2_set_hem()
4165 bt_ba = table->bt_l0_dma_addr[i]; in hns_roce_v2_set_hem()
4167 bt_ba = table->bt_l1_dma_addr[l1_idx]; in hns_roce_v2_set_hem()
4169 ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type, step_idx); in hns_roce_v2_set_hem()
4180 struct device *dev = hr_dev->dev; in hns_roce_v2_clear_hem()
4184 if (!hns_roce_check_whether_mhop(hr_dev, table->type)) in hns_roce_v2_clear_hem()
4187 switch (table->type) { in hns_roce_v2_clear_hem()
4207 table->type); in hns_roce_v2_clear_hem()
4217 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cmd, tag); in hns_roce_v2_clear_hem()
4237 qpc_size = hr_dev->caps.qpc_sz; in hns_roce_v2_qp_modify()
4238 memcpy(mailbox->buf, context, qpc_size); in hns_roce_v2_qp_modify()
4239 memcpy(mailbox->buf + qpc_size, qpc_mask, qpc_size); in hns_roce_v2_qp_modify()
4241 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, in hns_roce_v2_qp_modify()
4242 HNS_ROCE_CMD_MODIFY_QPC, hr_qp->qpn); in hns_roce_v2_qp_modify()
4258 attr->max_dest_rd_atomic : hr_qp->resp_depth; in set_access_flags()
4261 attr->qp_access_flags : hr_qp->atomic_rd_en; in set_access_flags()
4286 to_hr_hem_entries_shift(hr_qp->sge.sge_cnt, in set_qpc_wqe_cnt()
4287 hr_qp->sge.sge_shift)); in set_qpc_wqe_cnt()
4289 hr_reg_write(context, QPC_SQ_SHIFT, ilog2(hr_qp->sq.wqe_cnt)); in set_qpc_wqe_cnt()
4291 hr_reg_write(context, QPC_RQ_SHIFT, ilog2(hr_qp->rq.wqe_cnt)); in set_qpc_wqe_cnt()
4296 return ib_cq ? to_hr_cq(ib_cq)->cqn : 0; in get_cqn()
4301 return ib_pd ? to_hr_pd(ib_pd)->pdn : 0; in get_pdn()
4308 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in modify_qp_reset_to_init()
4317 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type)); in modify_qp_reset_to_init()
4319 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd)); in modify_qp_reset_to_init()
4321 hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs)); in modify_qp_reset_to_init()
4328 if (ibqp->qp_type == IB_QPT_XRC_TGT) { in modify_qp_reset_to_init()
4329 context->qkey_xrcd = cpu_to_le32(hr_qp->xrcdn); in modify_qp_reset_to_init()
4334 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in modify_qp_reset_to_init()
4337 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_OWNER_DB) in modify_qp_reset_to_init()
4341 lower_32_bits(hr_qp->rdb.dma) >> 1); in modify_qp_reset_to_init()
4343 upper_32_bits(hr_qp->rdb.dma)); in modify_qp_reset_to_init()
4345 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); in modify_qp_reset_to_init()
4347 if (ibqp->srq) { in modify_qp_reset_to_init()
4349 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn); in modify_qp_reset_to_init()
4354 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_reset_to_init()
4356 if (hr_dev->caps.qpc_sz < HNS_ROCE_V3_QPC_SZ) in modify_qp_reset_to_init()
4359 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_STASH) in modify_qp_reset_to_init()
4360 hr_reg_enable(&context->ext, QPCEX_STASH); in modify_qp_reset_to_init()
4373 hr_reg_write(context, QPC_TST, to_hr_qp_type(ibqp->qp_type)); in modify_qp_init_to_init()
4376 hr_reg_write(context, QPC_PD, get_pdn(ibqp->pd)); in modify_qp_init_to_init()
4379 hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq)); in modify_qp_init_to_init()
4382 hr_reg_write(context, QPC_TX_CQN, get_cqn(ibqp->send_cq)); in modify_qp_init_to_init()
4385 if (ibqp->srq) { in modify_qp_init_to_init()
4388 hr_reg_write(context, QPC_SRQN, to_hr_srq(ibqp->srq)->srqn); in modify_qp_init_to_init()
4403 ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts, in config_qp_rq_buf()
4405 if (hr_qp->rq.wqe_cnt && ret) { in config_qp_rq_buf()
4406 ibdev_err(&hr_dev->ib_dev, in config_qp_rq_buf()
4408 hr_qp->qpn, ret); in config_qp_rq_buf()
4412 wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr); in config_qp_rq_buf()
4414 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); in config_qp_rq_buf()
4415 qpc_mask->wqe_sge_ba = 0; in config_qp_rq_buf()
4427 to_hr_hem_hopnum(hr_dev->caps.wqe_sq_hop_num, in config_qp_rq_buf()
4428 hr_qp->sq.wqe_cnt)); in config_qp_rq_buf()
4432 to_hr_hem_hopnum(hr_dev->caps.wqe_sge_hop_num, in config_qp_rq_buf()
4433 hr_qp->sge.sge_cnt)); in config_qp_rq_buf()
4437 to_hr_hem_hopnum(hr_dev->caps.wqe_rq_hop_num, in config_qp_rq_buf()
4438 hr_qp->rq.wqe_cnt)); in config_qp_rq_buf()
4443 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.ba_pg_shift)); in config_qp_rq_buf()
4447 to_hr_hw_page_shift(hr_qp->mtr.hem_cfg.buf_pg_shift)); in config_qp_rq_buf()
4450 context->rq_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); in config_qp_rq_buf()
4451 qpc_mask->rq_cur_blk_addr = 0; in config_qp_rq_buf()
4457 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in config_qp_rq_buf()
4458 context->rq_nxt_blk_addr = in config_qp_rq_buf()
4460 qpc_mask->rq_nxt_blk_addr = 0; in config_qp_rq_buf()
4474 struct ib_device *ibdev = &hr_dev->ib_dev; in config_qp_sq_buf()
4480 ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset, in config_qp_sq_buf()
4484 hr_qp->qpn, ret); in config_qp_sq_buf()
4487 if (hr_qp->sge.sge_cnt > 0) { in config_qp_sq_buf()
4488 ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, in config_qp_sq_buf()
4489 hr_qp->sge.offset, &sge_cur_blk, 1); in config_qp_sq_buf()
4492 hr_qp->qpn, ret); in config_qp_sq_buf()
4530 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) in get_mtu()
4533 return attr->path_mtu; in get_mtu()
4544 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in modify_qp_init_to_rtr()
4546 struct ib_device *ibdev = &hr_dev->ib_dev; in modify_qp_init_to_rtr()
4565 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, in modify_qp_init_to_rtr()
4566 hr_qp->qpn, &irrl_ba); in modify_qp_init_to_rtr()
4569 return -EINVAL; in modify_qp_init_to_rtr()
4573 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, in modify_qp_init_to_rtr()
4574 hr_qp->qpn, &trrl_ba); in modify_qp_init_to_rtr()
4577 return -EINVAL; in modify_qp_init_to_rtr()
4583 return -EINVAL; in modify_qp_init_to_rtr()
4588 context->trrl_ba = cpu_to_le32(trrl_ba >> QPC_TRRL_BA_M_S); in modify_qp_init_to_rtr()
4589 qpc_mask->trrl_ba = 0; in modify_qp_init_to_rtr()
4593 context->irrl_ba = cpu_to_le32(irrl_ba >> QPC_IRRL_BA_L_S); in modify_qp_init_to_rtr()
4594 qpc_mask->irrl_ba = 0; in modify_qp_init_to_rtr()
4601 hr_reg_write(context, QPC_SIG_TYPE, hr_qp->sq_signal_bits); in modify_qp_init_to_rtr()
4604 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; in modify_qp_init_to_rtr()
4606 smac = (const u8 *)hr_dev->dev_addr[port]; in modify_qp_init_to_rtr()
4607 dmac = (u8 *)attr->ah_attr.roce.dmac; in modify_qp_init_to_rtr()
4610 hr_dev->loop_idc == 0x1) { in modify_qp_init_to_rtr()
4611 hr_reg_write(context, QPC_LBI, hr_dev->loop_idc); in modify_qp_init_to_rtr()
4616 hr_reg_write(context, QPC_DQPN, attr->dest_qp_num); in modify_qp_init_to_rtr()
4620 memcpy(&context->dmac, dmac, sizeof(u32)); in modify_qp_init_to_rtr()
4622 qpc_mask->dmac = 0; in modify_qp_init_to_rtr()
4626 hr_qp->path_mtu = ib_mtu; in modify_qp_init_to_rtr()
4630 return -EINVAL; in modify_qp_init_to_rtr()
4651 context->rq_rnr_timer = 0; in modify_qp_init_to_rtr()
4652 qpc_mask->rq_rnr_timer = 0; in modify_qp_init_to_rtr()
4662 if (udata && ibqp->qp_type == IB_QPT_RC && in modify_qp_init_to_rtr()
4663 (uctx->config & HNS_ROCE_RQ_INLINE_FLAGS)) { in modify_qp_init_to_rtr()
4665 hr_dev->caps.flags & in modify_qp_init_to_rtr()
4671 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_XRC_TGT) && in modify_qp_init_to_rtr()
4672 (uctx->config & HNS_ROCE_CQE_INLINE_FLAGS)) { in modify_qp_init_to_rtr()
4674 hr_dev->caps.flags & in modify_qp_init_to_rtr()
4689 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in modify_qp_rtr_to_rts()
4691 struct ib_device *ibdev = &hr_dev->ib_dev; in modify_qp_rtr_to_rts()
4697 return -EINVAL; in modify_qp_rtr_to_rts()
4743 return -ENOMEM; in alloc_dip_entry()
4755 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); in get_dip_ctx_idx()
4756 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in get_dip_ctx_idx()
4757 struct xarray *dip_xa = &hr_dev->qp_table.dip_xa; in get_dip_ctx_idx()
4763 ret = alloc_dip_entry(dip_xa, ibqp->qp_num); in get_dip_ctx_idx()
4770 if (hr_dip->qp_cnt && in get_dip_ctx_idx()
4771 !memcmp(grh->dgid.raw, hr_dip->dgid, GID_LEN_V2)) { in get_dip_ctx_idx()
4772 *dip_idx = hr_dip->dip_idx; in get_dip_ctx_idx()
4773 hr_dip->qp_cnt++; in get_dip_ctx_idx()
4774 hr_qp->dip = hr_dip; in get_dip_ctx_idx()
4783 if (hr_dip->qp_cnt) in get_dip_ctx_idx()
4787 memcpy(hr_dip->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); in get_dip_ctx_idx()
4788 hr_dip->dip_idx = idx; in get_dip_ctx_idx()
4789 hr_dip->qp_cnt++; in get_dip_ctx_idx()
4790 hr_qp->dip = hr_dip; in get_dip_ctx_idx()
4795 if (WARN_ON_ONCE(!hr_qp->dip)) in get_dip_ctx_idx()
4796 ret = -ENOSPC; in get_dip_ctx_idx()
4834 switch (hr_qp->cong_type) { in check_cong_type()
4836 cong_alg->alg_sel = CONG_DCQCN; in check_cong_type()
4837 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; in check_cong_type()
4838 cong_alg->dip_vld = DIP_INVALID; in check_cong_type()
4839 cong_alg->wnd_mode_sel = WND_LIMIT; in check_cong_type()
4842 cong_alg->alg_sel = CONG_WINDOW; in check_cong_type()
4843 cong_alg->alg_sub_sel = CONG_LDCP; in check_cong_type()
4844 cong_alg->dip_vld = DIP_INVALID; in check_cong_type()
4845 cong_alg->wnd_mode_sel = WND_UNLIMIT; in check_cong_type()
4848 cong_alg->alg_sel = CONG_WINDOW; in check_cong_type()
4849 cong_alg->alg_sub_sel = CONG_HC3; in check_cong_type()
4850 cong_alg->dip_vld = DIP_INVALID; in check_cong_type()
4851 cong_alg->wnd_mode_sel = WND_LIMIT; in check_cong_type()
4854 cong_alg->alg_sel = CONG_DCQCN; in check_cong_type()
4855 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; in check_cong_type()
4856 cong_alg->dip_vld = DIP_VALID; in check_cong_type()
4857 cong_alg->wnd_mode_sel = WND_LIMIT; in check_cong_type()
4860 hr_qp->cong_type = CONG_TYPE_DCQCN; in check_cong_type()
4861 cong_alg->alg_sel = CONG_DCQCN; in check_cong_type()
4862 cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL; in check_cong_type()
4863 cong_alg->dip_vld = DIP_INVALID; in check_cong_type()
4864 cong_alg->wnd_mode_sel = WND_LIMIT; in check_cong_type()
4875 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); in fill_cong_field()
4877 struct ib_device *ibdev = ibqp->device; in fill_cong_field()
4883 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 || in fill_cong_field()
4884 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE) in fill_cong_field()
4891 hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id + in fill_cong_field()
4892 hr_qp->cong_type * HNS_ROCE_CONG_SIZE); in fill_cong_field()
4894 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel); in fill_cong_field()
4895 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL); in fill_cong_field()
4896 hr_reg_write(&context->ext, QPCEX_CONG_ALG_SUB_SEL, in fill_cong_field()
4898 hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SUB_SEL); in fill_cong_field()
4899 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX_VLD, cong_field.dip_vld); in fill_cong_field()
4900 hr_reg_clear(&qpc_mask->ext, QPCEX_DIP_CTX_IDX_VLD); in fill_cong_field()
4901 hr_reg_write(&context->ext, QPCEX_SQ_RQ_NOT_FORBID_EN, in fill_cong_field()
4903 hr_reg_clear(&qpc_mask->ext, QPCEX_SQ_RQ_NOT_FORBID_EN); in fill_cong_field()
4915 hr_reg_write(&context->ext, QPCEX_DIP_CTX_IDX, dip_idx); in fill_cong_field()
4916 hr_reg_write(&qpc_mask->ext, QPCEX_DIP_CTX_IDX, 0); in fill_cong_field()
4924 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_hw_v2_get_dscp()
4925 struct hnae3_handle *handle = priv->handle; in hns_roce_hw_v2_get_dscp()
4926 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns_roce_hw_v2_get_dscp()
4928 if (!ops->get_dscp_prio) in hns_roce_hw_v2_get_dscp()
4929 return -EOPNOTSUPP; in hns_roce_hw_v2_get_dscp()
4931 return ops->get_dscp_prio(handle, dscp, tc_mode, priority); in hns_roce_hw_v2_get_dscp()
4938 max_sl = min_t(u32, MAX_SERVICE_LEVEL, hr_dev->caps.sl_num - 1); in check_sl_valid()
4940 ibdev_err_ratelimited(&hr_dev->ib_dev, in check_sl_valid()
4954 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); in hns_roce_set_sl()
4955 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_set_sl()
4957 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_set_sl()
4960 ret = hns_roce_hw_v2_get_dscp(hr_dev, get_tclass(&attr->ah_attr.grh), in hns_roce_set_sl()
4961 &hr_qp->tc_mode, &hr_qp->priority); in hns_roce_set_sl()
4962 if (ret && ret != -EOPNOTSUPP && in hns_roce_set_sl()
4963 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { in hns_roce_set_sl()
4969 if (hr_qp->tc_mode == HNAE3_TC_MAP_MODE_DSCP && in hns_roce_set_sl()
4970 grh->sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) in hns_roce_set_sl()
4971 hr_qp->sl = hr_qp->priority; in hns_roce_set_sl()
4973 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); in hns_roce_set_sl()
4975 if (!check_sl_valid(hr_dev, hr_qp->sl)) in hns_roce_set_sl()
4976 return -EINVAL; in hns_roce_set_sl()
4978 hr_reg_write(context, QPC_SL, hr_qp->sl); in hns_roce_set_sl()
4990 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); in hns_roce_v2_set_path()
4991 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_set_path()
4993 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_set_path()
4995 u8 sl = rdma_ah_get_sl(&attr->ah_attr); in hns_roce_v2_set_path()
5008 if (hr_qp->free_mr_en) { in hns_roce_v2_set_path()
5010 return -EINVAL; in hns_roce_v2_set_path()
5013 hr_qp->sl = sl; in hns_roce_v2_set_path()
5017 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1; in hns_roce_v2_set_path()
5018 hr_port = ib_port - 1; in hns_roce_v2_set_path()
5019 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && in hns_roce_v2_set_path()
5020 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH; in hns_roce_v2_set_path()
5023 gid_attr = attr->ah_attr.grh.sgid_attr; in hns_roce_v2_set_path()
5028 is_udp = (gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP); in hns_roce_v2_set_path()
5033 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in hns_roce_v2_set_path()
5043 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { in hns_roce_v2_set_path()
5045 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); in hns_roce_v2_set_path()
5046 return -EINVAL; in hns_roce_v2_set_path()
5049 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { in hns_roce_v2_set_path()
5051 return -EINVAL; in hns_roce_v2_set_path()
5055 is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, in hns_roce_v2_set_path()
5056 attr->dest_qp_num) : in hns_roce_v2_set_path()
5061 hr_reg_write(context, QPC_GMV_IDX, grh->sgid_index); in hns_roce_v2_set_path()
5065 hr_reg_write(context, QPC_HOPLIMIT, grh->hop_limit); in hns_roce_v2_set_path()
5072 hr_reg_write(context, QPC_TC, get_tclass(&attr->ah_attr.grh)); in hns_roce_v2_set_path()
5075 hr_reg_write(context, QPC_FL, grh->flow_label); in hns_roce_v2_set_path()
5077 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw)); in hns_roce_v2_set_path()
5078 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw)); in hns_roce_v2_set_path()
5117 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_set_abs_fields()
5121 return -EINVAL; in hns_roce_v2_set_abs_fields()
5124 memset(qpc_mask, 0, hr_dev->caps.qpc_sz); in hns_roce_v2_set_abs_fields()
5143 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in check_qp_timeout_cfg_range()
5145 ibdev_warn(&hr_dev->ib_dev, in check_qp_timeout_cfg_range()
5150 } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { in check_qp_timeout_cfg_range()
5152 ibdev_warn(&hr_dev->ib_dev, in check_qp_timeout_cfg_range()
5167 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_set_opt_fields()
5180 timeout = attr->timeout; in hns_roce_v2_set_opt_fields()
5188 hr_reg_write(context, QPC_RETRY_NUM_INIT, attr->retry_cnt); in hns_roce_v2_set_opt_fields()
5191 hr_reg_write(context, QPC_RETRY_CNT, attr->retry_cnt); in hns_roce_v2_set_opt_fields()
5196 hr_reg_write(context, QPC_RNR_NUM_INIT, attr->rnr_retry); in hns_roce_v2_set_opt_fields()
5199 hr_reg_write(context, QPC_RNR_CNT, attr->rnr_retry); in hns_roce_v2_set_opt_fields()
5204 hr_reg_write(context, QPC_SQ_CUR_PSN, attr->sq_psn); in hns_roce_v2_set_opt_fields()
5207 hr_reg_write(context, QPC_SQ_MAX_PSN, attr->sq_psn); in hns_roce_v2_set_opt_fields()
5210 hr_reg_write(context, QPC_RETRY_MSG_PSN_L, attr->sq_psn); in hns_roce_v2_set_opt_fields()
5214 attr->sq_psn >> RETRY_MSG_PSN_SHIFT); in hns_roce_v2_set_opt_fields()
5217 hr_reg_write(context, QPC_RETRY_MSG_FPKT_PSN, attr->sq_psn); in hns_roce_v2_set_opt_fields()
5220 hr_reg_write(context, QPC_RX_ACK_EPSN, attr->sq_psn); in hns_roce_v2_set_opt_fields()
5225 attr->max_dest_rd_atomic) { in hns_roce_v2_set_opt_fields()
5227 fls(attr->max_dest_rd_atomic - 1)); in hns_roce_v2_set_opt_fields()
5231 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) { in hns_roce_v2_set_opt_fields()
5232 hr_reg_write(context, QPC_SR_MAX, fls(attr->max_rd_atomic - 1)); in hns_roce_v2_set_opt_fields()
5241 hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? in hns_roce_v2_set_opt_fields()
5242 HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer); in hns_roce_v2_set_opt_fields()
5247 hr_reg_write(context, QPC_RX_REQ_EPSN, attr->rq_psn); in hns_roce_v2_set_opt_fields()
5250 hr_reg_write(context, QPC_RAQ_PSN, attr->rq_psn - 1); in hns_roce_v2_set_opt_fields()
5255 context->qkey_xrcd = cpu_to_le32(attr->qkey); in hns_roce_v2_set_opt_fields()
5256 qpc_mask->qkey_xrcd = 0; in hns_roce_v2_set_opt_fields()
5257 hr_qp->qkey = attr->qkey; in hns_roce_v2_set_opt_fields()
5267 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_record_opt_fields()
5271 hr_qp->atomic_rd_en = attr->qp_access_flags; in hns_roce_v2_record_opt_fields()
5274 hr_qp->resp_depth = attr->max_dest_rd_atomic; in hns_roce_v2_record_opt_fields()
5276 hr_qp->port = attr->port_num - 1; in hns_roce_v2_record_opt_fields()
5277 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; in hns_roce_v2_record_opt_fields()
5283 struct ib_qp *ibqp = &hr_qp->ibqp; in clear_qp()
5285 if (ibqp->send_cq) in clear_qp()
5286 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq), in clear_qp()
5287 hr_qp->qpn, NULL); in clear_qp()
5289 if (ibqp->recv_cq && ibqp->recv_cq != ibqp->send_cq) in clear_qp()
5290 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), in clear_qp()
5291 hr_qp->qpn, ibqp->srq ? in clear_qp()
5292 to_hr_srq(ibqp->srq) : NULL); in clear_qp()
5294 if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) in clear_qp()
5295 *hr_qp->rdb.db_record = 0; in clear_qp()
5297 hr_qp->rq.head = 0; in clear_qp()
5298 hr_qp->rq.tail = 0; in clear_qp()
5299 hr_qp->sq.head = 0; in clear_qp()
5300 hr_qp->sq.tail = 0; in clear_qp()
5301 hr_qp->next_sge = 0; in clear_qp()
5312 if (ibqp->qp_type == IB_QPT_XRC_TGT) in v2_set_flushed_fields()
5315 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag); in v2_set_flushed_fields()
5316 hr_reg_write(context, QPC_SQ_PRODUCER_IDX, hr_qp->sq.head); in v2_set_flushed_fields()
5318 hr_qp->state = IB_QPS_ERR; in v2_set_flushed_fields()
5319 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag); in v2_set_flushed_fields()
5321 if (ibqp->srq || ibqp->qp_type == IB_QPT_XRC_INI) /* no RQ */ in v2_set_flushed_fields()
5324 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag); in v2_set_flushed_fields()
5325 hr_reg_write(context, QPC_RQ_PRODUCER_IDX, hr_qp->rq.head); in v2_set_flushed_fields()
5327 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag); in v2_set_flushed_fields()
5335 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_modify_qp()
5340 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_modify_qp()
5344 return -EOPNOTSUPP; in hns_roce_v2_modify_qp()
5352 memset(context, 0, hr_dev->caps.qpc_sz); in hns_roce_v2_modify_qp()
5353 memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz); in hns_roce_v2_modify_qp()
5371 to_hr_qp_type(hr_qp->ibqp.qp_type) == SERV_TYPE_XRC || in hns_roce_v2_modify_qp()
5372 ibqp->srq); in hns_roce_v2_modify_qp()
5386 hr_qp->state = new_state; in hns_roce_v2_modify_qp()
5390 if (new_state == IB_QPS_RESET && !ibqp->uobject) in hns_roce_v2_modify_qp()
5410 return (state < ARRAY_SIZE(map)) ? map[state] : -1; in to_ib_qp_st()
5423 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_QPC, in hns_roce_v2_query_qpc()
5428 memcpy(buffer, mailbox->buf, hr_dev->caps.qpc_sz); in hns_roce_v2_query_qpc()
5446 context = mailbox->buf; in hns_roce_v2_query_srqc()
5447 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SRQC, in hns_roce_v2_query_srqc()
5470 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC, in hns_roce_v2_query_sccc()
5475 context = mailbox->buf; in hns_roce_v2_query_sccc()
5489 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) in get_qp_timeout_attr()
5490 timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08; in get_qp_timeout_attr()
5499 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_query_qp()
5502 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_query_qp()
5510 mutex_lock(&hr_qp->mutex); in hns_roce_v2_query_qp()
5512 if (hr_qp->state == IB_QPS_RESET) { in hns_roce_v2_query_qp()
5513 qp_attr->qp_state = IB_QPS_RESET; in hns_roce_v2_query_qp()
5518 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp->qpn, &context); in hns_roce_v2_query_qp()
5523 ret = -EINVAL; in hns_roce_v2_query_qp()
5529 if (tmp_qp_state == -1) { in hns_roce_v2_query_qp()
5531 ret = -EINVAL; in hns_roce_v2_query_qp()
5534 hr_qp->state = (u8)tmp_qp_state; in hns_roce_v2_query_qp()
5535 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; in hns_roce_v2_query_qp()
5536 qp_attr->path_mtu = (enum ib_mtu)hr_reg_read(&context, QPC_MTU); in hns_roce_v2_query_qp()
5537 qp_attr->path_mig_state = IB_MIG_ARMED; in hns_roce_v2_query_qp()
5538 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; in hns_roce_v2_query_qp()
5539 if (hr_qp->ibqp.qp_type == IB_QPT_UD) in hns_roce_v2_query_qp()
5540 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd); in hns_roce_v2_query_qp()
5542 qp_attr->rq_psn = hr_reg_read(&context, QPC_RX_REQ_EPSN); in hns_roce_v2_query_qp()
5543 qp_attr->sq_psn = (u32)hr_reg_read(&context, QPC_SQ_CUR_PSN); in hns_roce_v2_query_qp()
5544 qp_attr->dest_qp_num = hr_reg_read(&context, QPC_DQPN); in hns_roce_v2_query_qp()
5545 qp_attr->qp_access_flags = in hns_roce_v2_query_qp()
5550 if (hr_qp->ibqp.qp_type == IB_QPT_RC || in hns_roce_v2_query_qp()
5551 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || in hns_roce_v2_query_qp()
5552 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) { in hns_roce_v2_query_qp()
5554 rdma_ah_retrieve_grh(&qp_attr->ah_attr); in hns_roce_v2_query_qp()
5556 rdma_ah_set_sl(&qp_attr->ah_attr, in hns_roce_v2_query_qp()
5558 rdma_ah_set_port_num(&qp_attr->ah_attr, hr_qp->port + 1); in hns_roce_v2_query_qp()
5559 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); in hns_roce_v2_query_qp()
5560 grh->flow_label = hr_reg_read(&context, QPC_FL); in hns_roce_v2_query_qp()
5561 grh->sgid_index = hr_reg_read(&context, QPC_GMV_IDX); in hns_roce_v2_query_qp()
5562 grh->hop_limit = hr_reg_read(&context, QPC_HOPLIMIT); in hns_roce_v2_query_qp()
5563 grh->traffic_class = hr_reg_read(&context, QPC_TC); in hns_roce_v2_query_qp()
5565 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw)); in hns_roce_v2_query_qp()
5568 qp_attr->port_num = hr_qp->port + 1; in hns_roce_v2_query_qp()
5569 qp_attr->sq_draining = 0; in hns_roce_v2_query_qp()
5570 qp_attr->max_rd_atomic = 1 << hr_reg_read(&context, QPC_SR_MAX); in hns_roce_v2_query_qp()
5571 qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX); in hns_roce_v2_query_qp()
5573 qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME); in hns_roce_v2_query_qp()
5574 qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context); in hns_roce_v2_query_qp()
5575 qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT); in hns_roce_v2_query_qp()
5576 qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT); in hns_roce_v2_query_qp()
5579 qp_attr->cur_qp_state = qp_attr->qp_state; in hns_roce_v2_query_qp()
5580 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; in hns_roce_v2_query_qp()
5581 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge; in hns_roce_v2_query_qp()
5582 qp_attr->cap.max_inline_data = hr_qp->max_inline_data; in hns_roce_v2_query_qp()
5584 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; in hns_roce_v2_query_qp()
5585 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; in hns_roce_v2_query_qp()
5587 qp_init_attr->qp_context = ibqp->qp_context; in hns_roce_v2_query_qp()
5588 qp_init_attr->qp_type = ibqp->qp_type; in hns_roce_v2_query_qp()
5589 qp_init_attr->recv_cq = ibqp->recv_cq; in hns_roce_v2_query_qp()
5590 qp_init_attr->send_cq = ibqp->send_cq; in hns_roce_v2_query_qp()
5591 qp_init_attr->srq = ibqp->srq; in hns_roce_v2_query_qp()
5592 qp_init_attr->cap = qp_attr->cap; in hns_roce_v2_query_qp()
5593 qp_init_attr->sq_sig_type = hr_qp->sq_signal_bits; in hns_roce_v2_query_qp()
5596 mutex_unlock(&hr_qp->mutex); in hns_roce_v2_query_qp()
5602 return ((hr_qp->ibqp.qp_type == IB_QPT_RC || in modify_qp_is_ok()
5603 hr_qp->ibqp.qp_type == IB_QPT_UD || in modify_qp_is_ok()
5604 hr_qp->ibqp.qp_type == IB_QPT_XRC_INI || in modify_qp_is_ok()
5605 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT) && in modify_qp_is_ok()
5606 hr_qp->state != IB_QPS_RESET); in modify_qp_is_ok()
5613 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_destroy_qp_common()
5620 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, in hns_roce_v2_destroy_qp_common()
5621 hr_qp->state, IB_QPS_RESET, udata); in hns_roce_v2_destroy_qp_common()
5628 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; in hns_roce_v2_destroy_qp_common()
5629 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; in hns_roce_v2_destroy_qp_common()
5631 spin_lock_irqsave(&hr_dev->qp_list_lock, flags); in hns_roce_v2_destroy_qp_common()
5636 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, in hns_roce_v2_destroy_qp_common()
5637 (hr_qp->ibqp.srq ? in hns_roce_v2_destroy_qp_common()
5638 to_hr_srq(hr_qp->ibqp.srq) : in hns_roce_v2_destroy_qp_common()
5642 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL); in hns_roce_v2_destroy_qp_common()
5648 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); in hns_roce_v2_destroy_qp_common()
5656 struct hns_roce_dip *hr_dip = hr_qp->dip; in put_dip_ctx_idx()
5661 xa_lock(&hr_dev->qp_table.dip_xa); in put_dip_ctx_idx()
5663 hr_dip->qp_cnt--; in put_dip_ctx_idx()
5664 if (!hr_dip->qp_cnt) in put_dip_ctx_idx()
5665 memset(hr_dip->dgid, 0, GID_LEN_V2); in put_dip_ctx_idx()
5667 xa_unlock(&hr_dev->qp_table.dip_xa); in put_dip_ctx_idx()
5672 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); in hns_roce_v2_destroy_qp()
5678 spin_lock_irqsave(&hr_qp->flush_lock, flags); in hns_roce_v2_destroy_qp()
5679 set_bit(HNS_ROCE_STOP_FLUSH_FLAG, &hr_qp->flush_flag); in hns_roce_v2_destroy_qp()
5680 spin_unlock_irqrestore(&hr_qp->flush_lock, flags); in hns_roce_v2_destroy_qp()
5681 flush_work(&hr_qp->flush_work.work); in hns_roce_v2_destroy_qp()
5683 if (hr_qp->cong_type == CONG_TYPE_DIP) in hns_roce_v2_destroy_qp()
5688 ibdev_err_ratelimited(&hr_dev->ib_dev, in hns_roce_v2_destroy_qp()
5690 hr_qp->qpn, ret); in hns_roce_v2_destroy_qp()
5700 struct ib_device *ibdev = &hr_dev->ib_dev; in hns_roce_v2_qp_flow_control_init()
5706 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) in hns_roce_v2_qp_flow_control_init()
5709 mutex_lock(&hr_dev->qp_table.scc_mutex); in hns_roce_v2_qp_flow_control_init()
5722 clr->qpn = cpu_to_le32(hr_qp->qpn); in hns_roce_v2_qp_flow_control_init()
5741 if (resp->clr_done) in hns_roce_v2_qp_flow_control_init()
5748 ret = -ETIMEDOUT; in hns_roce_v2_qp_flow_control_init()
5751 mutex_unlock(&hr_dev->qp_table.scc_mutex); in hns_roce_v2_qp_flow_control_init()
5761 struct hns_roce_idx_que *idx_que = &srq->idx_que; in hns_roce_v2_write_srqc_index_queue()
5762 struct ib_device *ibdev = srq->ibsrq.device; in hns_roce_v2_write_srqc_index_queue()
5769 ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx, in hns_roce_v2_write_srqc_index_queue()
5777 dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr); in hns_roce_v2_write_srqc_index_queue()
5780 to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt)); in hns_roce_v2_write_srqc_index_queue()
5787 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.ba_pg_shift)); in hns_roce_v2_write_srqc_index_queue()
5789 to_hr_hw_page_shift(idx_que->mtr.hem_cfg.buf_pg_shift)); in hns_roce_v2_write_srqc_index_queue()
5806 struct ib_device *ibdev = srq->ibsrq.device; in hns_roce_v2_write_srqc()
5816 ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe, in hns_roce_v2_write_srqc()
5824 dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr); in hns_roce_v2_write_srqc()
5828 srq->ibsrq.srq_type == IB_SRQT_XRC); in hns_roce_v2_write_srqc()
5829 hr_reg_write(ctx, SRQC_PD, to_hr_pd(srq->ibsrq.pd)->pdn); in hns_roce_v2_write_srqc()
5830 hr_reg_write(ctx, SRQC_SRQN, srq->srqn); in hns_roce_v2_write_srqc()
5831 hr_reg_write(ctx, SRQC_XRCD, srq->xrcdn); in hns_roce_v2_write_srqc()
5832 hr_reg_write(ctx, SRQC_XRC_CQN, srq->cqn); in hns_roce_v2_write_srqc()
5833 hr_reg_write(ctx, SRQC_SHIFT, ilog2(srq->wqe_cnt)); in hns_roce_v2_write_srqc()
5835 srq->max_gs <= 0 ? 0 : fls(srq->max_gs - 1)); in hns_roce_v2_write_srqc()
5838 to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num, in hns_roce_v2_write_srqc()
5839 srq->wqe_cnt)); in hns_roce_v2_write_srqc()
5846 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.ba_pg_shift)); in hns_roce_v2_write_srqc()
5848 to_hr_hw_page_shift(srq->buf_mtr.hem_cfg.buf_pg_shift)); in hns_roce_v2_write_srqc()
5850 if (srq->cap_flags & HNS_ROCE_SRQ_CAP_RECORD_DB) { in hns_roce_v2_write_srqc()
5853 lower_32_bits(srq->rdb.dma) >> 1); in hns_roce_v2_write_srqc()
5855 upper_32_bits(srq->rdb.dma)); in hns_roce_v2_write_srqc()
5866 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); in hns_roce_v2_modify_srq()
5875 ret = -EOPNOTSUPP; in hns_roce_v2_modify_srq()
5880 if (srq_attr->srq_limit > srq->wqe_cnt) { in hns_roce_v2_modify_srq()
5881 ret = -EINVAL; in hns_roce_v2_modify_srq()
5891 srq_context = mailbox->buf; in hns_roce_v2_modify_srq()
5892 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1; in hns_roce_v2_modify_srq()
5896 hr_reg_write(srq_context, SRQC_LIMIT_WL, srq_attr->srq_limit); in hns_roce_v2_modify_srq()
5899 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, in hns_roce_v2_modify_srq()
5900 HNS_ROCE_CMD_MODIFY_SRQC, srq->srqn); in hns_roce_v2_modify_srq()
5903 ibdev_err(&hr_dev->ib_dev, in hns_roce_v2_modify_srq()
5910 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_SRQ_MODIFY_ERR_CNT]); in hns_roce_v2_modify_srq()
5917 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device); in hns_roce_v2_query_srq()
5927 srq_context = mailbox->buf; in hns_roce_v2_query_srq()
5928 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, in hns_roce_v2_query_srq()
5929 HNS_ROCE_CMD_QUERY_SRQC, srq->srqn); in hns_roce_v2_query_srq()
5931 ibdev_err(&hr_dev->ib_dev, in hns_roce_v2_query_srq()
5937 attr->srq_limit = hr_reg_read(srq_context, SRQC_LIMIT_WL); in hns_roce_v2_query_srq()
5938 attr->max_wr = srq->wqe_cnt; in hns_roce_v2_query_srq()
5939 attr->max_sge = srq->max_gs - srq->rsv_sge; in hns_roce_v2_query_srq()
5948 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device); in hns_roce_v2_modify_cq()
5960 cq_context = mailbox->buf; in hns_roce_v2_modify_cq()
5961 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1; in hns_roce_v2_modify_cq()
5968 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in hns_roce_v2_modify_cq()
5970 dev_info(hr_dev->dev, in hns_roce_v2_modify_cq()
5980 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, in hns_roce_v2_modify_cq()
5981 HNS_ROCE_CMD_MODIFY_CQC, hr_cq->cqn); in hns_roce_v2_modify_cq()
5984 ibdev_err_ratelimited(&hr_dev->ib_dev, in hns_roce_v2_modify_cq()
5990 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CQ_MODIFY_ERR_CNT]); in hns_roce_v2_modify_cq()
6006 context = mailbox->buf; in hns_roce_v2_query_cqc()
6007 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, in hns_roce_v2_query_cqc()
6010 ibdev_err_ratelimited(&hr_dev->ib_dev, in hns_roce_v2_query_cqc()
6035 context = mailbox->buf; in hns_roce_v2_query_mpt()
6036 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_MPT, in hns_roce_v2_query_mpt()
6039 ibdev_err(&hr_dev->ib_dev, in hns_roce_v2_query_mpt()
6055 struct hns_roce_dev *hr_dev = irq_work->hr_dev; in dump_aeqe_log()
6056 struct ib_device *ibdev = &hr_dev->ib_dev; in dump_aeqe_log()
6058 switch (irq_work->event_type) { in dump_aeqe_log()
6072 irq_work->queue_num, irq_work->sub_type); in dump_aeqe_log()
6076 irq_work->queue_num); in dump_aeqe_log()
6080 irq_work->queue_num, irq_work->sub_type); in dump_aeqe_log()
6092 ibdev_err(ibdev, "CQ 0x%x access err.\n", irq_work->queue_num); in dump_aeqe_log()
6095 ibdev_warn(ibdev, "CQ 0x%x overflow\n", irq_work->queue_num); in dump_aeqe_log()
6113 irq_work->event_type); in dump_aeqe_log()
6122 struct hns_roce_dev *hr_dev = irq_work->hr_dev; in hns_roce_irq_work_handle()
6123 int event_type = irq_work->event_type; in hns_roce_irq_work_handle()
6124 u32 queue_num = irq_work->queue_num; in hns_roce_irq_work_handle()
6165 INIT_WORK(&irq_work->work, hns_roce_irq_work_handle); in hns_roce_v2_init_irq_work()
6166 irq_work->hr_dev = hr_dev; in hns_roce_v2_init_irq_work()
6167 irq_work->event_type = eq->event_type; in hns_roce_v2_init_irq_work()
6168 irq_work->sub_type = eq->sub_type; in hns_roce_v2_init_irq_work()
6169 irq_work->queue_num = queue_num; in hns_roce_v2_init_irq_work()
6170 queue_work(hr_dev->irq_workq, &irq_work->work); in hns_roce_v2_init_irq_work()
6175 struct hns_roce_dev *hr_dev = eq->hr_dev; in update_eq_db()
6178 if (eq->type_flag == HNS_ROCE_AEQ) { in update_eq_db()
6180 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? in update_eq_db()
6184 hr_reg_write(&eq_db, EQ_DB_TAG, eq->eqn); in update_eq_db()
6187 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ? in update_eq_db()
6192 hr_reg_write(&eq_db, EQ_DB_CI, eq->cons_index); in update_eq_db()
6194 hns_roce_write64(hr_dev, (__le32 *)&eq_db, eq->db_reg); in update_eq_db()
6201 aeqe = hns_roce_buf_offset(eq->mtr.kmem, in next_aeqe_sw_v2()
6202 (eq->cons_index & (eq->entries - 1)) * in next_aeqe_sw_v2()
6203 eq->eqe_size); in next_aeqe_sw_v2()
6206 !!(eq->cons_index & eq->entries)) ? aeqe : NULL; in next_aeqe_sw_v2()
6239 le16_to_cpu(aeqe->event.cmd.token), in hns_roce_v2_aeq_int()
6240 aeqe->event.cmd.status, in hns_roce_v2_aeq_int()
6241 le64_to_cpu(aeqe->event.cmd.out_param)); in hns_roce_v2_aeq_int()
6247 eq->event_type = event_type; in hns_roce_v2_aeq_int()
6248 eq->sub_type = sub_type; in hns_roce_v2_aeq_int()
6249 ++eq->cons_index; in hns_roce_v2_aeq_int()
6252 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_AEQE_CNT]); in hns_roce_v2_aeq_int()
6269 ceqe = hns_roce_buf_offset(eq->mtr.kmem, in next_ceqe_sw_v2()
6270 (eq->cons_index & (eq->entries - 1)) * in next_ceqe_sw_v2()
6271 eq->eqe_size); in next_ceqe_sw_v2()
6274 !!(eq->cons_index & eq->entries)) ? ceqe : NULL; in next_ceqe_sw_v2()
6279 queue_work(system_bh_wq, &eq->work); in hns_roce_v2_ceq_int()
6287 struct hns_roce_dev *hr_dev = eq->hr_dev; in hns_roce_v2_msix_interrupt_eq()
6290 if (eq->type_flag == HNS_ROCE_CEQ) in hns_roce_v2_msix_interrupt_eq()
6303 struct pci_dev *pdev = hr_dev->pci_dev; in abnormal_interrupt_basic()
6305 const struct hnae3_ae_ops *ops = ae_dev->ops; in abnormal_interrupt_basic()
6313 dev_err(hr_dev->dev, "AEQ overflow!\n"); in abnormal_interrupt_basic()
6318 reset_type = hr_dev->is_vf ? in abnormal_interrupt_basic()
6322 if (ops->set_default_reset_request) in abnormal_interrupt_basic()
6323 ops->set_default_reset_request(ae_dev, reset_type); in abnormal_interrupt_basic()
6324 if (ops->reset_event) in abnormal_interrupt_basic()
6325 ops->reset_event(pdev, NULL); in abnormal_interrupt_basic()
6332 dev_err(hr_dev->dev, "there is no basic abn irq found.\n"); in abnormal_interrupt_basic()
6350 ecc_info->is_ecc_err = hr_reg_read(req, QUERY_RAM_ECC_1BIT_ERR); in fmea_ram_ecc_query()
6351 ecc_info->res_type = hr_reg_read(req, QUERY_RAM_ECC_RES_TYPE); in fmea_ram_ecc_query()
6352 ecc_info->index = hr_reg_read(req, QUERY_RAM_ECC_TAG); in fmea_ram_ecc_query()
6370 dev_err(hr_dev->dev, in fmea_recover_gmv()
6409 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, read_bt0_op, index); in fmea_recover_others()
6411 dev_err(hr_dev->dev, in fmea_recover_others()
6417 addr = fmea_get_ram_res_addr(res_type, mailbox->buf); in fmea_recover_others()
6421 dev_err(hr_dev->dev, in fmea_recover_others()
6433 u32 res_type = ecc_info->res_type; in fmea_ram_ecc_recover()
6434 u32 index = ecc_info->index; in fmea_ram_ecc_recover()
6440 dev_err(hr_dev->dev, "unsupported fmea ram ecc type %u.\n", in fmea_ram_ecc_recover()
6450 dev_err(hr_dev->dev, in fmea_ram_ecc_recover()
6462 dev_err(hr_dev->dev, "failed to query fmea ram ecc.\n"); in fmea_ram_ecc_work()
6467 dev_err(hr_dev->dev, "there is no fmea ram ecc err found.\n"); in fmea_ram_ecc_work()
6484 } else if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { in hns_roce_v2_msix_interrupt_abn()
6485 queue_work(hr_dev->irq_workq, &hr_dev->ecc_work); in hns_roce_v2_msix_interrupt_abn()
6488 dev_err(hr_dev->dev, "there is no abnormal irq found.\n"); in hns_roce_v2_msix_interrupt_abn()
6509 hns_roce_mtr_destroy(hr_dev, &eq->mtr); in free_eq_buf()
6515 struct device *dev = hr_dev->dev; in hns_roce_v2_destroy_eqc()
6516 int eqn = eq->eqn; in hns_roce_v2_destroy_eqc()
6520 if (eqn < hr_dev->caps.num_comp_vectors) in hns_roce_v2_destroy_eqc()
6534 eq->db_reg = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG; in init_eq_config()
6535 eq->cons_index = 0; in init_eq_config()
6536 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0; in init_eq_config()
6537 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0; in init_eq_config()
6538 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED; in init_eq_config()
6539 eq->shift = ilog2((unsigned int)eq->entries); in init_eq_config()
6555 /* if not multi-hop, eqe buffer only use one trunk */ in config_eqc()
6556 ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, in config_eqc()
6559 dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret); in config_eqc()
6563 bt_ba = hns_roce_get_mtr_ba(&eq->mtr); in config_eqc()
6566 hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num); in config_eqc()
6567 hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore); in config_eqc()
6568 hr_reg_write(eqc, EQC_COALESCE, eq->coalesce); in config_eqc()
6569 hr_reg_write(eqc, EQC_ARM_ST, eq->arm_st); in config_eqc()
6570 hr_reg_write(eqc, EQC_EQN, eq->eqn); in config_eqc()
6573 to_hr_hw_page_shift(eq->mtr.hem_cfg.ba_pg_shift)); in config_eqc()
6575 to_hr_hw_page_shift(eq->mtr.hem_cfg.buf_pg_shift)); in config_eqc()
6577 hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); in config_eqc()
6579 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in config_eqc()
6580 if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { in config_eqc()
6581 dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n", in config_eqc()
6582 eq->eq_period); in config_eqc()
6583 eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD; in config_eqc()
6585 eq->eq_period *= HNS_ROCE_CLOCK_ADJUST; in config_eqc()
6588 hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period); in config_eqc()
6592 hr_reg_write(eqc, EQC_SHIFT, eq->shift); in config_eqc()
6600 hr_reg_write(eqc, EQC_EQE_SIZE, eq->eqe_size == HNS_ROCE_V3_EQE_SIZE); in config_eqc()
6610 if (hr_dev->caps.eqe_hop_num == HNS_ROCE_HOP_NUM_0) in alloc_eq_buf()
6611 eq->hop_num = 0; in alloc_eq_buf()
6613 eq->hop_num = hr_dev->caps.eqe_hop_num; in alloc_eq_buf()
6615 buf_attr.page_shift = hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT; in alloc_eq_buf()
6616 buf_attr.region[0].size = eq->entries * eq->eqe_size; in alloc_eq_buf()
6617 buf_attr.region[0].hopnum = eq->hop_num; in alloc_eq_buf()
6620 err = hns_roce_mtr_create(hr_dev, &eq->mtr, &buf_attr, in alloc_eq_buf()
6621 hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT, NULL, in alloc_eq_buf()
6624 dev_err(hr_dev->dev, "failed to alloc EQE mtr, err %d\n", err); in alloc_eq_buf()
6644 ret = config_eqc(hr_dev, eq, mailbox->buf); in hns_roce_v2_create_eq()
6648 ret = hns_roce_create_hw_ctx(hr_dev, mailbox, eq_cmd, eq->eqn); in hns_roce_v2_create_eq()
6650 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n"); in hns_roce_v2_create_eq()
6671 struct hns_roce_dev *hr_dev = eq->hr_dev; in hns_roce_ceq_work()
6675 while (ceqe && ceqe_num < hr_dev->caps.ceqe_depth) { in hns_roce_ceq_work()
6685 ++eq->cons_index; in hns_roce_ceq_work()
6687 atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CEQE_CNT]); in hns_roce_ceq_work()
6698 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; in __hns_roce_request_irq()
6703 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN, in __hns_roce_request_irq()
6705 if (!hr_dev->irq_names[i]) { in __hns_roce_request_irq()
6706 ret = -ENOMEM; in __hns_roce_request_irq()
6713 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, in __hns_roce_request_irq()
6714 "hns-%s-abn-%d", pci_name(hr_dev->pci_dev), j); in __hns_roce_request_irq()
6717 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, in __hns_roce_request_irq()
6718 "hns-%s-aeq-%d", pci_name(hr_dev->pci_dev), j - other_num); in __hns_roce_request_irq()
6721 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN, in __hns_roce_request_irq()
6722 "hns-%s-ceq-%d", pci_name(hr_dev->pci_dev), in __hns_roce_request_irq()
6723 j - other_num - aeq_num); in __hns_roce_request_irq()
6727 ret = request_irq(hr_dev->irq[j], in __hns_roce_request_irq()
6729 0, hr_dev->irq_names[j], hr_dev); in __hns_roce_request_irq()
6731 INIT_WORK(&eq_table->eq[j - other_num].work, in __hns_roce_request_irq()
6733 ret = request_irq(eq_table->eq[j - other_num].irq, in __hns_roce_request_irq()
6735 0, hr_dev->irq_names[j + aeq_num], in __hns_roce_request_irq()
6736 &eq_table->eq[j - other_num]); in __hns_roce_request_irq()
6738 ret = request_irq(eq_table->eq[j - other_num].irq, in __hns_roce_request_irq()
6740 0, hr_dev->irq_names[j - comp_num], in __hns_roce_request_irq()
6741 &eq_table->eq[j - other_num]); in __hns_roce_request_irq()
6745 dev_err(hr_dev->dev, "request irq error!\n"); in __hns_roce_request_irq()
6753 for (j -= 1; j >= 0; j--) { in __hns_roce_request_irq()
6755 free_irq(hr_dev->irq[j], hr_dev); in __hns_roce_request_irq()
6758 free_irq(eq_table->eq[j - other_num].irq, in __hns_roce_request_irq()
6759 &eq_table->eq[j - other_num]); in __hns_roce_request_irq()
6761 cancel_work_sync(&eq_table->eq[j - other_num].work); in __hns_roce_request_irq()
6765 for (i -= 1; i >= 0; i--) in __hns_roce_request_irq()
6766 kfree(hr_dev->irq_names[i]); in __hns_roce_request_irq()
6777 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; in __hns_roce_free_irq()
6778 irq_num = eq_num + hr_dev->caps.num_other_vectors; in __hns_roce_free_irq()
6780 for (i = 0; i < hr_dev->caps.num_other_vectors; i++) in __hns_roce_free_irq()
6781 free_irq(hr_dev->irq[i], hr_dev); in __hns_roce_free_irq()
6784 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]); in __hns_roce_free_irq()
6785 if (i < hr_dev->caps.num_comp_vectors) in __hns_roce_free_irq()
6786 cancel_work_sync(&hr_dev->eq_table.eq[i].work); in __hns_roce_free_irq()
6790 kfree(hr_dev->irq_names[i]); in __hns_roce_free_irq()
6795 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; in hns_roce_v2_init_eq_table()
6796 struct device *dev = hr_dev->dev; in hns_roce_v2_init_eq_table()
6807 if (hr_dev->caps.aeqe_depth < HNS_AEQ_POLLING_BUDGET) in hns_roce_v2_init_eq_table()
6808 return -EINVAL; in hns_roce_v2_init_eq_table()
6810 other_num = hr_dev->caps.num_other_vectors; in hns_roce_v2_init_eq_table()
6811 comp_num = hr_dev->caps.num_comp_vectors; in hns_roce_v2_init_eq_table()
6812 aeq_num = hr_dev->caps.num_aeq_vectors; in hns_roce_v2_init_eq_table()
6817 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); in hns_roce_v2_init_eq_table()
6818 if (!eq_table->eq) in hns_roce_v2_init_eq_table()
6819 return -ENOMEM; in hns_roce_v2_init_eq_table()
6823 eq = &eq_table->eq[i]; in hns_roce_v2_init_eq_table()
6824 eq->hr_dev = hr_dev; in hns_roce_v2_init_eq_table()
6825 eq->eqn = i; in hns_roce_v2_init_eq_table()
6829 eq->type_flag = HNS_ROCE_CEQ; in hns_roce_v2_init_eq_table()
6830 eq->entries = hr_dev->caps.ceqe_depth; in hns_roce_v2_init_eq_table()
6831 eq->eqe_size = hr_dev->caps.ceqe_size; in hns_roce_v2_init_eq_table()
6832 eq->irq = hr_dev->irq[i + other_num + aeq_num]; in hns_roce_v2_init_eq_table()
6833 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM; in hns_roce_v2_init_eq_table()
6834 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL; in hns_roce_v2_init_eq_table()
6838 eq->type_flag = HNS_ROCE_AEQ; in hns_roce_v2_init_eq_table()
6839 eq->entries = hr_dev->caps.aeqe_depth; in hns_roce_v2_init_eq_table()
6840 eq->eqe_size = hr_dev->caps.aeqe_size; in hns_roce_v2_init_eq_table()
6841 eq->irq = hr_dev->irq[i - comp_num + other_num]; in hns_roce_v2_init_eq_table()
6842 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM; in hns_roce_v2_init_eq_table()
6843 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL; in hns_roce_v2_init_eq_table()
6853 INIT_WORK(&hr_dev->ecc_work, fmea_ram_ecc_work); in hns_roce_v2_init_eq_table()
6855 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0); in hns_roce_v2_init_eq_table()
6856 if (!hr_dev->irq_workq) { in hns_roce_v2_init_eq_table()
6858 ret = -ENOMEM; in hns_roce_v2_init_eq_table()
6875 destroy_workqueue(hr_dev->irq_workq); in hns_roce_v2_init_eq_table()
6878 for (i -= 1; i >= 0; i--) in hns_roce_v2_init_eq_table()
6879 hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]); in hns_roce_v2_init_eq_table()
6880 kfree(eq_table->eq); in hns_roce_v2_init_eq_table()
6887 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; in hns_roce_v2_cleanup_eq_table()
6891 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; in hns_roce_v2_cleanup_eq_table()
6897 destroy_workqueue(hr_dev->irq_workq); in hns_roce_v2_cleanup_eq_table()
6900 hns_roce_v2_destroy_eqc(hr_dev, &eq_table->eq[i]); in hns_roce_v2_cleanup_eq_table()
6902 kfree(eq_table->eq); in hns_roce_v2_cleanup_eq_table()
6974 struct hns_roce_v2_priv *priv = hr_dev->priv; in hns_roce_hw_v2_get_cfg()
6978 hr_dev->pci_dev = handle->pdev; in hns_roce_hw_v2_get_cfg()
6979 id = pci_match_id(hns_roce_hw_v2_pci_tbl, hr_dev->pci_dev); in hns_roce_hw_v2_get_cfg()
6980 hr_dev->is_vf = id->driver_data; in hns_roce_hw_v2_get_cfg()
6981 hr_dev->dev = &handle->pdev->dev; in hns_roce_hw_v2_get_cfg()
6982 hr_dev->hw = &hns_roce_hw_v2; in hns_roce_hw_v2_get_cfg()
6983 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; in hns_roce_hw_v2_get_cfg()
6984 hr_dev->odb_offset = hr_dev->sdb_offset; in hns_roce_hw_v2_get_cfg()
6987 hr_dev->reg_base = handle->rinfo.roce_io_base; in hns_roce_hw_v2_get_cfg()
6988 hr_dev->mem_base = handle->rinfo.roce_mem_base; in hns_roce_hw_v2_get_cfg()
6989 hr_dev->caps.num_ports = 1; in hns_roce_hw_v2_get_cfg()
6990 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev; in hns_roce_hw_v2_get_cfg()
6991 hr_dev->iboe.phy_port[0] = 0; in hns_roce_hw_v2_get_cfg()
6993 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid, in hns_roce_hw_v2_get_cfg()
6994 hr_dev->iboe.netdevs[0]->dev_addr); in hns_roce_hw_v2_get_cfg()
6996 for (i = 0; i < handle->rinfo.num_vectors; i++) in hns_roce_hw_v2_get_cfg()
6997 hr_dev->irq[i] = pci_irq_vector(handle->pdev, in hns_roce_hw_v2_get_cfg()
6998 i + handle->rinfo.base_vector); in hns_roce_hw_v2_get_cfg()
7001 hr_dev->cmd_mod = 1; in hns_roce_hw_v2_get_cfg()
7002 hr_dev->loop_idc = 0; in hns_roce_hw_v2_get_cfg()
7004 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle); in hns_roce_hw_v2_get_cfg()
7005 priv->handle = handle; in hns_roce_hw_v2_get_cfg()
7015 return -ENOMEM; in __hns_roce_hw_v2_init_instance()
7017 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL); in __hns_roce_hw_v2_init_instance()
7018 if (!hr_dev->priv) { in __hns_roce_hw_v2_init_instance()
7019 ret = -ENOMEM; in __hns_roce_hw_v2_init_instance()
7027 dev_err(hr_dev->dev, "RoCE Engine init failed!\n"); in __hns_roce_hw_v2_init_instance()
7031 if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { in __hns_roce_hw_v2_init_instance()
7034 dev_err(hr_dev->dev, "failed to init free mr!\n"); in __hns_roce_hw_v2_init_instance()
7039 handle->priv = hr_dev; in __hns_roce_hw_v2_init_instance()
7047 kfree(hr_dev->priv); in __hns_roce_hw_v2_init_instance()
7050 ib_dealloc_device(&hr_dev->ib_dev); in __hns_roce_hw_v2_init_instance()
7058 struct hns_roce_dev *hr_dev = handle->priv; in __hns_roce_hw_v2_uninit_instance()
7063 handle->priv = NULL; in __hns_roce_hw_v2_uninit_instance()
7065 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT; in __hns_roce_hw_v2_uninit_instance()
7069 kfree(hr_dev->priv); in __hns_roce_hw_v2_uninit_instance()
7070 ib_dealloc_device(&hr_dev->ib_dev); in __hns_roce_hw_v2_uninit_instance()
7075 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns_roce_hw_v2_init_instance()
7077 struct device *dev = &handle->pdev->dev; in hns_roce_hw_v2_init_instance()
7080 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT; in hns_roce_hw_v2_init_instance()
7082 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) { in hns_roce_hw_v2_init_instance()
7083 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; in hns_roce_hw_v2_init_instance()
7087 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev); in hns_roce_hw_v2_init_instance()
7091 if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) in hns_roce_hw_v2_init_instance()
7096 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; in hns_roce_hw_v2_init_instance()
7098 if (ops->ae_dev_resetting(handle) || in hns_roce_hw_v2_init_instance()
7099 ops->get_hw_reset_stat(handle)) in hns_roce_hw_v2_init_instance()
7105 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED; in hns_roce_hw_v2_init_instance()
7113 return -EBUSY; in hns_roce_hw_v2_init_instance()
7119 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) in hns_roce_hw_v2_uninit_instance()
7122 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT; in hns_roce_hw_v2_uninit_instance()
7126 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT; in hns_roce_hw_v2_uninit_instance()
7133 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) { in hns_roce_hw_v2_reset_notify_down()
7134 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); in hns_roce_hw_v2_reset_notify_down()
7138 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN; in hns_roce_hw_v2_reset_notify_down()
7139 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state); in hns_roce_hw_v2_reset_notify_down()
7141 hr_dev = handle->priv; in hns_roce_hw_v2_reset_notify_down()
7145 hr_dev->active = false; in hns_roce_hw_v2_reset_notify_down()
7146 hr_dev->dis_db = true; in hns_roce_hw_v2_reset_notify_down()
7148 rdma_user_mmap_disassociate(&hr_dev->ib_dev); in hns_roce_hw_v2_reset_notify_down()
7150 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; in hns_roce_hw_v2_reset_notify_down()
7157 struct device *dev = &handle->pdev->dev; in hns_roce_hw_v2_reset_notify_init()
7161 &handle->rinfo.state)) { in hns_roce_hw_v2_reset_notify_init()
7162 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; in hns_roce_hw_v2_reset_notify_init()
7166 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT; in hns_roce_hw_v2_reset_notify_init()
7168 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n"); in hns_roce_hw_v2_reset_notify_init()
7175 handle->priv = NULL; in hns_roce_hw_v2_reset_notify_init()
7178 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED; in hns_roce_hw_v2_reset_notify_init()
7187 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state)) in hns_roce_hw_v2_reset_notify_uninit()
7190 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT; in hns_roce_hw_v2_reset_notify_uninit()
7191 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n"); in hns_roce_hw_v2_reset_notify_uninit()
7223 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv; in hns_roce_hw_v2_link_status_change()
7224 struct net_device *netdev = handle->rinfo.netdev; in hns_roce_hw_v2_link_status_change()
7229 ib_dispatch_port_state_event(&hr_dev->ib_dev, netdev); in hns_roce_hw_v2_link_status_change()