1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
2 // Copyright (c) 2019 Hisilicon Limited.
3
4 #include <rdma/rdma_cm.h>
5 #include <rdma/restrack.h>
6 #include <uapi/rdma/rdma_netlink.h>
7 #include "hns_roce_common.h"
8 #include "hns_roce_device.h"
9 #include "hns_roce_hw_v2.h"
10
hns_roce_fill_res_cq_entry(struct sk_buff * msg,struct ib_cq * ib_cq)11 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
12 {
13 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
14 struct nlattr *table_attr;
15
16 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
17 if (!table_attr)
18 return -EMSGSIZE;
19
20 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth))
21 goto err;
22
23 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index))
24 goto err;
25
26 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size))
27 goto err;
28
29 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn))
30 goto err;
31
32 nla_nest_end(msg, table_attr);
33
34 return 0;
35
36 err:
37 nla_nest_cancel(msg, table_attr);
38
39 return -EMSGSIZE;
40 }
41
hns_roce_fill_res_cq_entry_raw(struct sk_buff * msg,struct ib_cq * ib_cq)42 int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
43 {
44 struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
45 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
46 struct hns_roce_v2_cq_context context;
47 int ret;
48
49 if (!hr_dev->hw->query_cqc)
50 return -EINVAL;
51
52 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
53 if (ret)
54 return -EINVAL;
55
56 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
57
58 return ret;
59 }
60
hns_roce_fill_res_qp_entry(struct sk_buff * msg,struct ib_qp * ib_qp)61 int hns_roce_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ib_qp)
62 {
63 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
64 struct nlattr *table_attr;
65
66 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
67 if (!table_attr)
68 return -EMSGSIZE;
69
70 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt))
71 goto err;
72
73 if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs))
74 goto err;
75
76 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt))
77 goto err;
78
79 if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs))
80 goto err;
81
82 if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt))
83 goto err;
84
85 nla_nest_end(msg, table_attr);
86
87 return 0;
88
89 err:
90 nla_nest_cancel(msg, table_attr);
91
92 return -EMSGSIZE;
93 }
94
hns_roce_fill_res_qp_entry_raw(struct sk_buff * msg,struct ib_qp * ib_qp)95 int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
96 {
97 struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
98 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
99 struct hns_roce_full_qp_ctx {
100 struct hns_roce_v2_qp_context qpc;
101 struct hns_roce_v2_scc_context sccc;
102 } context = {};
103 int ret;
104
105 if (!hr_dev->hw->query_qpc)
106 return -EINVAL;
107
108 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
109 if (ret)
110 return ret;
111
112 /* If SCC is disabled or the query fails, the queried SCCC will
113 * be all 0.
114 */
115 if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
116 !hr_dev->hw->query_sccc)
117 goto out;
118
119 ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
120 if (ret)
121 ibdev_warn_ratelimited(&hr_dev->ib_dev,
122 "failed to query SCCC, ret = %d.\n",
123 ret);
124
125 out:
126 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
127
128 return ret;
129 }
130
hns_roce_fill_res_mr_entry(struct sk_buff * msg,struct ib_mr * ib_mr)131 int hns_roce_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ib_mr)
132 {
133 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
134 struct nlattr *table_attr;
135
136 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
137 if (!table_attr)
138 return -EMSGSIZE;
139
140 if (rdma_nl_put_driver_u32_hex(msg, "pbl_hop_num", hr_mr->pbl_hop_num))
141 goto err;
142
143 if (rdma_nl_put_driver_u32_hex(msg, "ba_pg_shift",
144 hr_mr->pbl_mtr.hem_cfg.ba_pg_shift))
145 goto err;
146
147 if (rdma_nl_put_driver_u32_hex(msg, "buf_pg_shift",
148 hr_mr->pbl_mtr.hem_cfg.buf_pg_shift))
149 goto err;
150
151 nla_nest_end(msg, table_attr);
152
153 return 0;
154
155 err:
156 nla_nest_cancel(msg, table_attr);
157
158 return -EMSGSIZE;
159 }
160
hns_roce_fill_res_mr_entry_raw(struct sk_buff * msg,struct ib_mr * ib_mr)161 int hns_roce_fill_res_mr_entry_raw(struct sk_buff *msg, struct ib_mr *ib_mr)
162 {
163 struct hns_roce_dev *hr_dev = to_hr_dev(ib_mr->device);
164 struct hns_roce_mr *hr_mr = to_hr_mr(ib_mr);
165 struct hns_roce_v2_mpt_entry context;
166 int ret;
167
168 if (!hr_dev->hw->query_mpt)
169 return -EINVAL;
170
171 ret = hr_dev->hw->query_mpt(hr_dev, hr_mr->key, &context);
172 if (ret)
173 return -EINVAL;
174
175 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
176
177 return ret;
178 }
179
hns_roce_fill_res_srq_entry(struct sk_buff * msg,struct ib_srq * ib_srq)180 int hns_roce_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq)
181 {
182 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
183 struct nlattr *table_attr;
184
185 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
186 if (!table_attr)
187 return -EMSGSIZE;
188
189 if (rdma_nl_put_driver_u32_hex(msg, "srqn", hr_srq->srqn))
190 goto err;
191
192 if (rdma_nl_put_driver_u32_hex(msg, "wqe_cnt", hr_srq->wqe_cnt))
193 goto err;
194
195 if (rdma_nl_put_driver_u32_hex(msg, "max_gs", hr_srq->max_gs))
196 goto err;
197
198 if (rdma_nl_put_driver_u32_hex(msg, "xrcdn", hr_srq->xrcdn))
199 goto err;
200
201 nla_nest_end(msg, table_attr);
202
203 return 0;
204
205 err:
206 nla_nest_cancel(msg, table_attr);
207 return -EMSGSIZE;
208 }
209
hns_roce_fill_res_srq_entry_raw(struct sk_buff * msg,struct ib_srq * ib_srq)210 int hns_roce_fill_res_srq_entry_raw(struct sk_buff *msg, struct ib_srq *ib_srq)
211 {
212 struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
213 struct hns_roce_srq *hr_srq = to_hr_srq(ib_srq);
214 struct hns_roce_srq_context context;
215 int ret;
216
217 if (!hr_dev->hw->query_srqc)
218 return -EINVAL;
219
220 ret = hr_dev->hw->query_srqc(hr_dev, hr_srq->srqn, &context);
221 if (ret)
222 return ret;
223
224 ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
225
226 return ret;
227 }
228