Lines Matching full:msg
169 static int put_driver_name_print_type(struct sk_buff *msg, const char *name, in put_driver_name_print_type() argument
172 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, name)) in put_driver_name_print_type()
175 nla_put_u8(msg, RDMA_NLDEV_ATTR_DRIVER_PRINT_TYPE, print_type)) in put_driver_name_print_type()
181 static int _rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u32() argument
185 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u32()
187 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DRIVER_U32, value)) in _rdma_nl_put_driver_u32()
193 static int _rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, in _rdma_nl_put_driver_u64() argument
197 if (put_driver_name_print_type(msg, name, print_type)) in _rdma_nl_put_driver_u64()
199 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_DRIVER_U64, value, in _rdma_nl_put_driver_u64()
206 int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_string() argument
209 if (put_driver_name_print_type(msg, name, in rdma_nl_put_driver_string()
212 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DRIVER_STRING, str)) in rdma_nl_put_driver_string()
219 int rdma_nl_put_driver_u32(struct sk_buff *msg, const char *name, u32 value) in rdma_nl_put_driver_u32() argument
221 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u32()
226 int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name, in rdma_nl_put_driver_u32_hex() argument
229 return _rdma_nl_put_driver_u32(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u32_hex()
234 int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64() argument
236 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_UNSPEC, in rdma_nl_put_driver_u64()
241 int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name, u64 value) in rdma_nl_put_driver_u64_hex() argument
243 return _rdma_nl_put_driver_u64(msg, name, RDMA_NLDEV_PRINT_TYPE_HEX, in rdma_nl_put_driver_u64_hex()
254 static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device) in fill_nldev_handle() argument
256 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index)) in fill_nldev_handle()
258 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, in fill_nldev_handle()
265 static int fill_dev_info(struct sk_buff *msg, struct ib_device *device) in fill_dev_info() argument
271 if (fill_nldev_handle(msg, device)) in fill_dev_info()
274 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device))) in fill_dev_info()
278 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_dev_info()
285 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw)) in fill_dev_info()
288 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID, in fill_dev_info()
292 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID, in fill_dev_info()
296 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_NODE_TYPE, device->node_type)) in fill_dev_info()
298 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, device->use_cq_dim)) in fill_dev_info()
308 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "opa"); in fill_dev_info()
310 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "ib"); in fill_dev_info()
312 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "iw"); in fill_dev_info()
314 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, "roce"); in fill_dev_info()
316 ret = nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_PROTOCOL, in fill_dev_info()
321 static int fill_port_info(struct sk_buff *msg, in fill_port_info() argument
330 if (fill_nldev_handle(msg, device)) in fill_port_info()
333 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in fill_port_info()
345 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS, in fill_port_info()
348 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX, in fill_port_info()
351 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid)) in fill_port_info()
353 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid)) in fill_port_info()
355 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc)) in fill_port_info()
358 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_STATE, attr.state)) in fill_port_info()
360 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_PORT_PHYS_STATE, attr.phys_state)) in fill_port_info()
365 ret = nla_put_u32(msg, in fill_port_info()
369 ret = nla_put_string(msg, in fill_port_info()
378 static int fill_res_info_entry(struct sk_buff *msg, in fill_res_info_entry() argument
383 entry_attr = nla_nest_start_noflag(msg, in fill_res_info_entry()
388 if (nla_put_string(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_NAME, name)) in fill_res_info_entry()
390 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_SUMMARY_ENTRY_CURR, curr, in fill_res_info_entry()
394 nla_nest_end(msg, entry_attr); in fill_res_info_entry()
398 nla_nest_cancel(msg, entry_attr); in fill_res_info_entry()
402 static int fill_res_info(struct sk_buff *msg, struct ib_device *device) in fill_res_info() argument
417 if (fill_nldev_handle(msg, device)) in fill_res_info()
420 table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_RES_SUMMARY); in fill_res_info()
428 ret = fill_res_info_entry(msg, names[i], curr); in fill_res_info()
433 nla_nest_end(msg, table_attr); in fill_res_info()
437 nla_nest_cancel(msg, table_attr); in fill_res_info()
441 static int fill_res_name_pid(struct sk_buff *msg, in fill_res_name_pid() argument
451 err = nla_put_string(msg, RDMA_NLDEV_ATTR_RES_KERN_NAME, in fill_res_name_pid()
468 err = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PID, pid); in fill_res_name_pid()
474 static int fill_res_qp_entry_query(struct sk_buff *msg, in fill_res_qp_entry_query() argument
488 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQPN, in fill_res_qp_entry_query()
491 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RQ_PSN, in fill_res_qp_entry_query()
496 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SQ_PSN, qp_attr.sq_psn)) in fill_res_qp_entry_query()
501 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_PATH_MIG_STATE, in fill_res_qp_entry_query()
505 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, qp->qp_type)) in fill_res_qp_entry_query()
507 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, qp_attr.qp_state)) in fill_res_qp_entry_query()
511 return dev->ops.fill_res_qp_entry(msg, qp); in fill_res_qp_entry_query()
517 static int fill_res_qp_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_entry() argument
528 if (qp->port && nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, qp->port)) in fill_res_qp_entry()
531 ret = nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qp->qp_num); in fill_res_qp_entry()
536 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, qp->pd->res.id)) in fill_res_qp_entry()
539 ret = fill_res_name_pid(msg, res); in fill_res_qp_entry()
543 return fill_res_qp_entry_query(msg, res, dev, qp); in fill_res_qp_entry()
546 static int fill_res_qp_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_qp_raw_entry() argument
556 return dev->ops.fill_res_qp_entry_raw(msg, qp); in fill_res_qp_raw_entry()
559 static int fill_res_cm_id_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cm_id_entry() argument
571 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) in fill_res_cm_id_entry()
575 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, id_priv->qp_num)) in fill_res_cm_id_entry()
577 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) in fill_res_cm_id_entry()
581 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) in fill_res_cm_id_entry()
584 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_STATE, id_priv->state)) in fill_res_cm_id_entry()
588 nla_put(msg, RDMA_NLDEV_ATTR_RES_SRC_ADDR, in fill_res_cm_id_entry()
593 nla_put(msg, RDMA_NLDEV_ATTR_RES_DST_ADDR, in fill_res_cm_id_entry()
598 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CM_IDN, res->id)) in fill_res_cm_id_entry()
601 if (fill_res_name_pid(msg, res)) in fill_res_cm_id_entry()
605 return dev->ops.fill_res_cm_id_entry(msg, cm_id); in fill_res_cm_id_entry()
611 static int fill_res_cq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_entry() argument
617 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe)) in fill_res_cq_entry()
619 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_cq_entry()
625 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_POLL_CTX, cq->poll_ctx)) in fill_res_cq_entry()
628 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_DEV_DIM, (cq->dim != NULL))) in fill_res_cq_entry()
631 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, res->id)) in fill_res_cq_entry()
634 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_cq_entry()
638 if (fill_res_name_pid(msg, res)) in fill_res_cq_entry()
642 dev->ops.fill_res_cq_entry(msg, cq) : 0; in fill_res_cq_entry()
645 static int fill_res_cq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_cq_raw_entry() argument
653 return dev->ops.fill_res_cq_entry_raw(msg, cq); in fill_res_cq_raw_entry()
656 static int fill_res_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_entry() argument
663 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_RKEY, mr->rkey)) in fill_res_mr_entry()
665 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LKEY, mr->lkey)) in fill_res_mr_entry()
669 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_MRLEN, mr->length, in fill_res_mr_entry()
673 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_res_mr_entry()
677 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, mr->pd->res.id)) in fill_res_mr_entry()
680 if (fill_res_name_pid(msg, res)) in fill_res_mr_entry()
684 dev->ops.fill_res_mr_entry(msg, mr) : in fill_res_mr_entry()
688 static int fill_res_mr_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_mr_raw_entry() argument
696 return dev->ops.fill_res_mr_entry_raw(msg, mr); in fill_res_mr_raw_entry()
699 static int fill_res_pd_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_pd_entry() argument
705 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LOCAL_DMA_LKEY, in fill_res_pd_entry()
709 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_UNSAFE_GLOBAL_RKEY, in fill_res_pd_entry()
713 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_RES_USECNT, in fill_res_pd_entry()
717 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, res->id)) in fill_res_pd_entry()
721 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, in fill_res_pd_entry()
725 return fill_res_name_pid(msg, res); in fill_res_pd_entry()
730 static int fill_res_ctx_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_ctx_entry() argument
738 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CTXN, ctx->res.id)) in fill_res_ctx_entry()
741 return fill_res_name_pid(msg, res); in fill_res_ctx_entry()
744 static int fill_res_range_qp_entry(struct sk_buff *msg, uint32_t min_range, in fill_res_range_qp_entry() argument
752 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_res_range_qp_entry()
757 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, min_range)) in fill_res_range_qp_entry()
760 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MIN_RANGE, min_range)) in fill_res_range_qp_entry()
762 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_MAX_RANGE, max_range)) in fill_res_range_qp_entry()
765 nla_nest_end(msg, entry_attr); in fill_res_range_qp_entry()
769 nla_nest_cancel(msg, entry_attr); in fill_res_range_qp_entry()
773 static int fill_res_srq_qps(struct sk_buff *msg, struct ib_srq *srq) in fill_res_srq_qps() argument
782 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_res_srq_qps()
805 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
816 if (fill_res_range_qp_entry(msg, min_range, prev)) in fill_res_srq_qps()
819 nla_nest_end(msg, table_attr); in fill_res_srq_qps()
826 nla_nest_cancel(msg, table_attr); in fill_res_srq_qps()
830 static int fill_res_srq_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_srq_entry() argument
836 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_SRQN, srq->res.id)) in fill_res_srq_entry()
839 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, srq->srq_type)) in fill_res_srq_entry()
842 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PDN, srq->pd->res.id)) in fill_res_srq_entry()
846 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQN, in fill_res_srq_entry()
851 if (fill_res_srq_qps(msg, srq)) in fill_res_srq_entry()
854 if (fill_res_name_pid(msg, res)) in fill_res_srq_entry()
858 return dev->ops.fill_res_srq_entry(msg, srq); in fill_res_srq_entry()
866 static int fill_res_srq_raw_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_srq_raw_entry() argument
874 return dev->ops.fill_res_srq_entry_raw(msg, srq); in fill_res_srq_raw_entry()
877 static int fill_stat_counter_mode(struct sk_buff *msg, in fill_stat_counter_mode() argument
882 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, m->mode)) in fill_stat_counter_mode()
887 nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, m->param.qp_type)) in fill_stat_counter_mode()
891 fill_res_name_pid(msg, &counter->res)) in fill_stat_counter_mode()
898 static int fill_stat_counter_qp_entry(struct sk_buff *msg, u32 qpn) in fill_stat_counter_qp_entry() argument
902 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP_ENTRY); in fill_stat_counter_qp_entry()
906 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) in fill_stat_counter_qp_entry()
909 nla_nest_end(msg, entry_attr); in fill_stat_counter_qp_entry()
913 nla_nest_cancel(msg, entry_attr); in fill_stat_counter_qp_entry()
917 static int fill_stat_counter_qps(struct sk_buff *msg, in fill_stat_counter_qps() argument
927 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_RES_QP); in fill_stat_counter_qps()
938 ret = fill_stat_counter_qp_entry(msg, qp->qp_num); in fill_stat_counter_qps()
944 nla_nest_end(msg, table_attr); in fill_stat_counter_qps()
949 nla_nest_cancel(msg, table_attr); in fill_stat_counter_qps()
953 int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name, in rdma_nl_stat_hwcounter_entry() argument
958 entry_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); in rdma_nl_stat_hwcounter_entry()
962 if (nla_put_string(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, in rdma_nl_stat_hwcounter_entry()
965 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_VALUE, in rdma_nl_stat_hwcounter_entry()
969 nla_nest_end(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
973 nla_nest_cancel(msg, entry_attr); in rdma_nl_stat_hwcounter_entry()
978 static int fill_stat_mr_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_stat_mr_entry() argument
984 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_MRN, res->id)) in fill_stat_mr_entry()
988 return dev->ops.fill_stat_mr_entry(msg, mr); in fill_stat_mr_entry()
995 static int fill_stat_counter_hwcounters(struct sk_buff *msg, in fill_stat_counter_hwcounters() argument
1002 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in fill_stat_counter_hwcounters()
1010 if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, in fill_stat_counter_hwcounters()
1016 nla_nest_end(msg, table_attr); in fill_stat_counter_hwcounters()
1021 nla_nest_cancel(msg, table_attr); in fill_stat_counter_hwcounters()
1025 static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin, in fill_res_counter_entry() argument
1038 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, counter->port) || in fill_res_counter_entry()
1039 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, counter->id) || in fill_res_counter_entry()
1040 fill_stat_counter_mode(msg, counter) || in fill_res_counter_entry()
1041 fill_stat_counter_qps(msg, counter) || in fill_res_counter_entry()
1042 fill_stat_counter_hwcounters(msg, counter)) in fill_res_counter_entry()
1053 struct sk_buff *msg; in nldev_get_doit() local
1068 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_doit()
1069 if (!msg) { in nldev_get_doit()
1074 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_doit()
1082 err = fill_dev_info(msg, device); in nldev_get_doit()
1086 nlmsg_end(msg, nlh); in nldev_get_doit()
1089 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_doit()
1092 nlmsg_free(msg); in nldev_get_doit()
1193 struct sk_buff *msg; in nldev_port_get_doit() local
1216 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_port_get_doit()
1217 if (!msg) { in nldev_port_get_doit()
1222 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_port_get_doit()
1230 err = fill_port_info(msg, device, port, sock_net(skb->sk)); in nldev_port_get_doit()
1234 nlmsg_end(msg, nlh); in nldev_port_get_doit()
1237 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_port_get_doit()
1240 nlmsg_free(msg); in nldev_port_get_doit()
1309 struct sk_buff *msg; in nldev_res_get_doit() local
1323 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_res_get_doit()
1324 if (!msg) { in nldev_res_get_doit()
1329 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_res_get_doit()
1337 ret = fill_res_info(msg, device); in nldev_res_get_doit()
1341 nlmsg_end(msg, nlh); in nldev_res_get_doit()
1343 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_res_get_doit()
1346 nlmsg_free(msg); in nldev_res_get_doit()
1457 struct sk_buff *msg; in res_get_common_doit() local
1491 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in res_get_common_doit()
1492 if (!msg) { in res_get_common_doit()
1497 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in res_get_common_doit()
1502 if (!nlh || fill_nldev_handle(msg, device)) { in res_get_common_doit()
1509 ret = fill_func(msg, has_cap_net_admin, res, port); in res_get_common_doit()
1514 nlmsg_end(msg, nlh); in res_get_common_doit()
1516 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in res_get_common_doit()
1519 nlmsg_free(msg); in res_get_common_doit()
1804 struct sk_buff *msg; in nldev_get_chardev() local
1835 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_get_chardev()
1836 if (!msg) { in nldev_get_chardev()
1840 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_get_chardev()
1849 data.nl_msg = msg; in nldev_get_chardev()
1854 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV, in nldev_get_chardev()
1859 err = nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CHARDEV_ABI, data.abi, in nldev_get_chardev()
1863 if (nla_put_string(msg, RDMA_NLDEV_ATTR_CHARDEV_NAME, in nldev_get_chardev()
1869 nlmsg_end(msg, nlh); in nldev_get_chardev()
1873 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_get_chardev()
1878 nlmsg_free(msg); in nldev_get_chardev()
1889 struct sk_buff *msg; in nldev_sys_get_doit() local
1897 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_sys_get_doit()
1898 if (!msg) in nldev_sys_get_doit()
1901 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_sys_get_doit()
1906 nlmsg_free(msg); in nldev_sys_get_doit()
1910 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_NETNS_MODE, in nldev_sys_get_doit()
1913 nlmsg_free(msg); in nldev_sys_get_doit()
1917 err = nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_PRIVILEGED_QKEY_MODE, in nldev_sys_get_doit()
1920 nlmsg_free(msg); in nldev_sys_get_doit()
1933 nla_put_u8(msg, RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK, 1); in nldev_sys_get_doit()
1935 nlmsg_end(msg, nlh); in nldev_sys_get_doit()
1936 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_sys_get_doit()
1987 static int nldev_stat_set_mode_doit(struct sk_buff *msg, in nldev_stat_set_mode_doit() argument
2023 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_set_mode_doit()
2024 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_set_mode_doit()
2086 struct sk_buff *msg; in nldev_stat_set_doit() local
2113 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_set_doit()
2114 if (!msg) { in nldev_stat_set_doit()
2118 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_set_doit()
2122 if (!nlh || fill_nldev_handle(msg, device) || in nldev_stat_set_doit()
2123 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in nldev_stat_set_doit()
2129 ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); in nldev_stat_set_doit()
2140 nlmsg_end(msg, nlh); in nldev_stat_set_doit()
2142 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_set_doit()
2145 nlmsg_free(msg); in nldev_stat_set_doit()
2156 struct sk_buff *msg; in nldev_stat_del_doit() local
2182 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_del_doit()
2183 if (!msg) { in nldev_stat_del_doit()
2187 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_del_doit()
2198 if (fill_nldev_handle(msg, device) || in nldev_stat_del_doit()
2199 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in nldev_stat_del_doit()
2200 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || in nldev_stat_del_doit()
2201 nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { in nldev_stat_del_doit()
2210 nlmsg_end(msg, nlh); in nldev_stat_del_doit()
2212 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_del_doit()
2215 nlmsg_free(msg); in nldev_stat_del_doit()
2230 struct sk_buff *msg; in stat_get_doit_default_counter() local
2254 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_default_counter()
2255 if (!msg) { in stat_get_doit_default_counter()
2260 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_default_counter()
2265 if (!nlh || fill_nldev_handle(msg, device) || in stat_get_doit_default_counter()
2266 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { in stat_get_doit_default_counter()
2279 table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in stat_get_doit_default_counter()
2290 if (rdma_nl_stat_hwcounter_entry(msg, in stat_get_doit_default_counter()
2296 nla_nest_end(msg, table_attr); in stat_get_doit_default_counter()
2299 nlmsg_end(msg, nlh); in stat_get_doit_default_counter()
2301 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_default_counter()
2304 nla_nest_cancel(msg, table_attr); in stat_get_doit_default_counter()
2308 nlmsg_free(msg); in stat_get_doit_default_counter()
2321 struct sk_buff *msg; in stat_get_doit_qp() local
2343 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in stat_get_doit_qp()
2344 if (!msg) { in stat_get_doit_qp()
2349 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in stat_get_doit_qp()
2362 if (fill_nldev_handle(msg, device) || in stat_get_doit_qp()
2363 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || in stat_get_doit_qp()
2364 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_MODE, mode)) { in stat_get_doit_qp()
2370 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK, mask)) { in stat_get_doit_qp()
2375 nlmsg_end(msg, nlh); in stat_get_doit_qp()
2377 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in stat_get_doit_qp()
2380 nlmsg_free(msg); in stat_get_doit_qp()
2450 struct sk_buff *msg; in nldev_stat_get_counter_status_doit() local
2477 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); in nldev_stat_get_counter_status_doit()
2478 if (!msg) { in nldev_stat_get_counter_status_doit()
2484 msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, in nldev_stat_get_counter_status_doit()
2489 if (!nlh || fill_nldev_handle(msg, device) || in nldev_stat_get_counter_status_doit()
2490 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) in nldev_stat_get_counter_status_doit()
2493 table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); in nldev_stat_get_counter_status_doit()
2499 entry = nla_nest_start(msg, in nldev_stat_get_counter_status_doit()
2504 if (nla_put_string(msg, in nldev_stat_get_counter_status_doit()
2507 nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) in nldev_stat_get_counter_status_doit()
2511 (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, in nldev_stat_get_counter_status_doit()
2515 nla_nest_end(msg, entry); in nldev_stat_get_counter_status_doit()
2519 nla_nest_end(msg, table); in nldev_stat_get_counter_status_doit()
2520 nlmsg_end(msg, nlh); in nldev_stat_get_counter_status_doit()
2522 return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); in nldev_stat_get_counter_status_doit()
2525 nla_nest_cancel(msg, entry); in nldev_stat_get_counter_status_doit()
2528 nla_nest_cancel(msg, table); in nldev_stat_get_counter_status_doit()
2530 nlmsg_free(msg); in nldev_stat_get_counter_status_doit()