Lines Matching +full:protection +full:- +full:domain
20 * - Redistributions of source code must retain the above
24 * - Redistributions in binary form must reproduce the above
97 [IB_WC_LOC_PROT_ERR] = "local protection error",
151 default: return -1; in ib_rate_to_mult()
211 default: return -1; in ib_rate_to_mbps()
237 if (device->ops.get_link_layer) in rdma_port_get_link_layer()
238 return device->ops.get_link_layer(device, port_num); in rdma_port_get_link_layer()
240 lt = rdma_node_get_transport(device->node_type); in rdma_port_get_link_layer()
248 /* Protection domains */
251 * __ib_alloc_pd - Allocates an unused protection domain.
252 * @device: The device on which to allocate the protection domain.
253 * @flags: protection domain flags
254 * @caller: caller's build-time module name
256 * A protection domain object provides an association between QPs, shared
271 return ERR_PTR(-ENOMEM); in __ib_alloc_pd()
273 pd->device = device; in __ib_alloc_pd()
274 pd->flags = flags; in __ib_alloc_pd()
276 rdma_restrack_new(&pd->res, RDMA_RESTRACK_PD); in __ib_alloc_pd()
277 rdma_restrack_set_name(&pd->res, caller); in __ib_alloc_pd()
279 ret = device->ops.alloc_pd(pd, NULL); in __ib_alloc_pd()
281 rdma_restrack_put(&pd->res); in __ib_alloc_pd()
285 rdma_restrack_add(&pd->res); in __ib_alloc_pd()
287 if (device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY) in __ib_alloc_pd()
288 pd->local_dma_lkey = device->local_dma_lkey; in __ib_alloc_pd()
300 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags); in __ib_alloc_pd()
306 mr->device = pd->device; in __ib_alloc_pd()
307 mr->pd = pd; in __ib_alloc_pd()
308 mr->type = IB_MR_TYPE_DMA; in __ib_alloc_pd()
309 mr->uobject = NULL; in __ib_alloc_pd()
310 mr->need_inval = false; in __ib_alloc_pd()
312 pd->__internal_mr = mr; in __ib_alloc_pd()
314 if (!(device->attrs.kernel_cap_flags & IBK_LOCAL_DMA_LKEY)) in __ib_alloc_pd()
315 pd->local_dma_lkey = pd->__internal_mr->lkey; in __ib_alloc_pd()
318 pd->unsafe_global_rkey = pd->__internal_mr->rkey; in __ib_alloc_pd()
326 * ib_dealloc_pd_user - Deallocates a protection domain.
327 * @pd: The protection domain to deallocate.
338 if (pd->__internal_mr) { in ib_dealloc_pd_user()
339 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL); in ib_dealloc_pd_user()
341 pd->__internal_mr = NULL; in ib_dealloc_pd_user()
344 ret = pd->device->ops.dealloc_pd(pd, udata); in ib_dealloc_pd_user()
348 rdma_restrack_del(&pd->res); in ib_dealloc_pd_user()
357 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
366 if (dest->grh.sgid_attr) in rdma_copy_ah_attr()
367 rdma_hold_gid_attr(dest->grh.sgid_attr); in rdma_copy_ah_attr()
372 * rdma_replace_ah_attr - Replace valid ah_attr with new one.
386 if (old->grh.sgid_attr) in rdma_replace_ah_attr()
387 rdma_hold_gid_attr(old->grh.sgid_attr); in rdma_replace_ah_attr()
392 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
406 src->grh.sgid_attr = NULL; in rdma_move_ah_attr()
417 if (!rdma_is_port_valid(device, ah_attr->port_num)) in rdma_check_ah_attr()
418 return -EINVAL; in rdma_check_ah_attr()
420 if ((rdma_is_grh_required(device, ah_attr->port_num) || in rdma_check_ah_attr()
421 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) && in rdma_check_ah_attr()
422 !(ah_attr->ah_flags & IB_AH_GRH)) in rdma_check_ah_attr()
423 return -EINVAL; in rdma_check_ah_attr()
425 if (ah_attr->grh.sgid_attr) { in rdma_check_ah_attr()
430 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index || in rdma_check_ah_attr()
431 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num) in rdma_check_ah_attr()
432 return -EINVAL; in rdma_check_ah_attr()
449 *old_sgid_attr = ah_attr->grh.sgid_attr; in rdma_fill_sgid_attr()
455 if (!(ah_attr->ah_flags & IB_AH_GRH)) in rdma_fill_sgid_attr()
459 if (grh->sgid_attr) in rdma_fill_sgid_attr()
463 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index); in rdma_fill_sgid_attr()
468 grh->sgid_attr = sgid_attr; in rdma_fill_sgid_attr()
479 if (ah_attr->grh.sgid_attr == old_sgid_attr) in rdma_unfill_sgid_attr()
496 if (ah_attr->ah_flags & IB_AH_GRH) { in rdma_update_sgid_attr()
497 rdma_hold_gid_attr(ah_attr->grh.sgid_attr); in rdma_update_sgid_attr()
498 return ah_attr->grh.sgid_attr; in rdma_update_sgid_attr()
510 struct ib_device *device = pd->device; in _rdma_create_ah()
516 if (!udata && !device->ops.create_ah) in _rdma_create_ah()
517 return ERR_PTR(-EOPNOTSUPP); in _rdma_create_ah()
523 return ERR_PTR(-ENOMEM); in _rdma_create_ah()
525 ah->device = device; in _rdma_create_ah()
526 ah->pd = pd; in _rdma_create_ah()
527 ah->type = ah_attr->type; in _rdma_create_ah()
528 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL); in _rdma_create_ah()
534 ret = device->ops.create_user_ah(ah, &init_attr, udata); in _rdma_create_ah()
536 ret = device->ops.create_ah(ah, &init_attr, NULL); in _rdma_create_ah()
538 if (ah->sgid_attr) in _rdma_create_ah()
539 rdma_put_gid_attr(ah->sgid_attr); in _rdma_create_ah()
544 atomic_inc(&pd->usecnt); in _rdma_create_ah()
549 * rdma_create_ah - Creates an address handle for the
551 * @pd: The protection domain associated with the address handle.
567 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); in rdma_create_ah()
570 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr, in rdma_create_ah()
585 * rdma_create_user_ah - Creates an address handle for the
588 * @pd: The protection domain associated with the address handle.
605 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr); in rdma_create_user_ah()
609 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) { in rdma_create_user_ah()
610 err = ib_resolve_eth_dmac(pd->device, ah_attr); in rdma_create_user_ah()
628 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh; in ib_get_rdma_header_version()
630 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh; in ib_get_rdma_header_version()
635 if (ip6h->version != 6) in ib_get_rdma_header_version()
636 return (ip4h->version == 4) ? 4 : 0; in ib_get_rdma_header_version()
642 if (ip4h->ihl != 5) in ib_get_rdma_header_version()
653 if (ip4h->check == ip4h_checked.check) in ib_get_rdma_header_version()
673 if (grh->next_hdr == IPPROTO_UDP) in ib_get_net_type_by_grh()
692 if (ctx->gid_type != gid_attr->gid_type) in find_gid_index()
699 return ctx->vlan_id == vlan_id; in find_gid_index()
723 return -EINVAL; in ib_get_gids_from_rdma_hdr()
727 &hdr->roce4grh.saddr, 4); in ib_get_gids_from_rdma_hdr()
729 &hdr->roce4grh.daddr, 4); in ib_get_gids_from_rdma_hdr()
739 *dgid = hdr->ibgrh.dgid; in ib_get_gids_from_rdma_hdr()
740 *sgid = hdr->ibgrh.sgid; in ib_get_gids_from_rdma_hdr()
743 return -EINVAL; in ib_get_gids_from_rdma_hdr()
756 const struct ib_gid_attr *sgid_attr = grh->sgid_attr; in ib_resolve_unicast_gid_dmac()
763 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) && in ib_resolve_unicast_gid_dmac()
764 sgid_attr->gid_type == IB_GID_TYPE_ROCE) { in ib_resolve_unicast_gid_dmac()
765 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw, in ib_resolve_unicast_gid_dmac()
766 ah_attr->roce.dmac); in ib_resolve_unicast_gid_dmac()
770 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid, in ib_resolve_unicast_gid_dmac()
771 ah_attr->roce.dmac, in ib_resolve_unicast_gid_dmac()
774 grh->hop_limit = hop_limit; in ib_resolve_unicast_gid_dmac()
806 ah_attr->type = rdma_ah_find_type(device, port_num); in ib_init_ah_attr_from_wc()
808 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE) in ib_init_ah_attr_from_wc()
809 net_type = wc->network_hdr_type; in ib_init_ah_attr_from_wc()
819 rdma_ah_set_sl(ah_attr, wc->sl); in ib_init_ah_attr_from_wc()
823 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ? in ib_init_ah_attr_from_wc()
824 wc->vlan_id : 0xffff; in ib_init_ah_attr_from_wc()
826 if (!(wc->wc_flags & IB_WC_GRH)) in ib_init_ah_attr_from_wc()
827 return -EPROTOTYPE; in ib_init_ah_attr_from_wc()
835 flow_class = be32_to_cpu(grh->version_tclass_flow); in ib_init_ah_attr_from_wc()
849 rdma_ah_set_dlid(ah_attr, wc->slid); in ib_init_ah_attr_from_wc()
850 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits); in ib_init_ah_attr_from_wc()
852 if ((wc->wc_flags & IB_WC_GRH) == 0) in ib_init_ah_attr_from_wc()
864 flow_class = be32_to_cpu(grh->version_tclass_flow); in ib_init_ah_attr_from_wc()
878 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
896 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit, in rdma_move_grh_sgid_attr()
898 attr->grh.sgid_attr = sgid_attr; in rdma_move_grh_sgid_attr()
903 * rdma_destroy_ah_attr - Release reference to SGID attribute of
913 if (ah_attr->grh.sgid_attr) { in rdma_destroy_ah_attr()
914 rdma_put_gid_attr(ah_attr->grh.sgid_attr); in rdma_destroy_ah_attr()
915 ah_attr->grh.sgid_attr = NULL; in rdma_destroy_ah_attr()
927 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr); in ib_create_ah_from_wc()
943 if (ah->type != ah_attr->type) in rdma_modify_ah()
944 return -EINVAL; in rdma_modify_ah()
946 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr); in rdma_modify_ah()
950 ret = ah->device->ops.modify_ah ? in rdma_modify_ah()
951 ah->device->ops.modify_ah(ah, ah_attr) : in rdma_modify_ah()
952 -EOPNOTSUPP; in rdma_modify_ah()
954 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr); in rdma_modify_ah()
962 ah_attr->grh.sgid_attr = NULL; in rdma_query_ah()
964 return ah->device->ops.query_ah ? in rdma_query_ah()
965 ah->device->ops.query_ah(ah, ah_attr) : in rdma_query_ah()
966 -EOPNOTSUPP; in rdma_query_ah()
972 const struct ib_gid_attr *sgid_attr = ah->sgid_attr; in rdma_destroy_ah_user()
978 pd = ah->pd; in rdma_destroy_ah_user()
980 ret = ah->device->ops.destroy_ah(ah, flags); in rdma_destroy_ah_user()
984 atomic_dec(&pd->usecnt); in rdma_destroy_ah_user()
996 * ib_create_srq_user - Creates a SRQ associated with the specified protection
997 * domain.
998 * @pd: The protection domain associated with the SRQ.
1005 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1018 srq = rdma_zalloc_drv_obj(pd->device, ib_srq); in ib_create_srq_user()
1020 return ERR_PTR(-ENOMEM); in ib_create_srq_user()
1022 srq->device = pd->device; in ib_create_srq_user()
1023 srq->pd = pd; in ib_create_srq_user()
1024 srq->event_handler = srq_init_attr->event_handler; in ib_create_srq_user()
1025 srq->srq_context = srq_init_attr->srq_context; in ib_create_srq_user()
1026 srq->srq_type = srq_init_attr->srq_type; in ib_create_srq_user()
1027 srq->uobject = uobject; in ib_create_srq_user()
1029 if (ib_srq_has_cq(srq->srq_type)) { in ib_create_srq_user()
1030 srq->ext.cq = srq_init_attr->ext.cq; in ib_create_srq_user()
1031 atomic_inc(&srq->ext.cq->usecnt); in ib_create_srq_user()
1033 if (srq->srq_type == IB_SRQT_XRC) { in ib_create_srq_user()
1034 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd; in ib_create_srq_user()
1035 if (srq->ext.xrc.xrcd) in ib_create_srq_user()
1036 atomic_inc(&srq->ext.xrc.xrcd->usecnt); in ib_create_srq_user()
1038 atomic_inc(&pd->usecnt); in ib_create_srq_user()
1040 rdma_restrack_new(&srq->res, RDMA_RESTRACK_SRQ); in ib_create_srq_user()
1041 rdma_restrack_parent_name(&srq->res, &pd->res); in ib_create_srq_user()
1043 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata); in ib_create_srq_user()
1045 rdma_restrack_put(&srq->res); in ib_create_srq_user()
1046 atomic_dec(&pd->usecnt); in ib_create_srq_user()
1047 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) in ib_create_srq_user()
1048 atomic_dec(&srq->ext.xrc.xrcd->usecnt); in ib_create_srq_user()
1049 if (ib_srq_has_cq(srq->srq_type)) in ib_create_srq_user()
1050 atomic_dec(&srq->ext.cq->usecnt); in ib_create_srq_user()
1055 rdma_restrack_add(&srq->res); in ib_create_srq_user()
1065 return srq->device->ops.modify_srq ? in ib_modify_srq()
1066 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask, in ib_modify_srq()
1067 NULL) : -EOPNOTSUPP; in ib_modify_srq()
1074 return srq->device->ops.query_srq ? in ib_query_srq()
1075 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP; in ib_query_srq()
1083 if (atomic_read(&srq->usecnt)) in ib_destroy_srq_user()
1084 return -EBUSY; in ib_destroy_srq_user()
1086 ret = srq->device->ops.destroy_srq(srq, udata); in ib_destroy_srq_user()
1090 atomic_dec(&srq->pd->usecnt); in ib_destroy_srq_user()
1091 if (srq->srq_type == IB_SRQT_XRC && srq->ext.xrc.xrcd) in ib_destroy_srq_user()
1092 atomic_dec(&srq->ext.xrc.xrcd->usecnt); in ib_destroy_srq_user()
1093 if (ib_srq_has_cq(srq->srq_type)) in ib_destroy_srq_user()
1094 atomic_dec(&srq->ext.cq->usecnt); in ib_destroy_srq_user()
1095 rdma_restrack_del(&srq->res); in ib_destroy_srq_user()
1109 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags); in __ib_shared_qp_event_handler()
1110 list_for_each_entry(event->element.qp, &qp->open_list, open_list) in __ib_shared_qp_event_handler()
1111 if (event->element.qp->event_handler) in __ib_shared_qp_event_handler()
1112 event->element.qp->event_handler(event, event->element.qp->qp_context); in __ib_shared_qp_event_handler()
1113 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags); in __ib_shared_qp_event_handler()
1126 return ERR_PTR(-ENOMEM); in __ib_open_qp()
1128 qp->real_qp = real_qp; in __ib_open_qp()
1129 err = ib_open_shared_qp_security(qp, real_qp->device); in __ib_open_qp()
1135 qp->real_qp = real_qp; in __ib_open_qp()
1136 atomic_inc(&real_qp->usecnt); in __ib_open_qp()
1137 qp->device = real_qp->device; in __ib_open_qp()
1138 qp->event_handler = event_handler; in __ib_open_qp()
1139 qp->qp_context = qp_context; in __ib_open_qp()
1140 qp->qp_num = real_qp->qp_num; in __ib_open_qp()
1141 qp->qp_type = real_qp->qp_type; in __ib_open_qp()
1143 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); in __ib_open_qp()
1144 list_add(&qp->open_list, &real_qp->open_list); in __ib_open_qp()
1145 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); in __ib_open_qp()
1155 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT) in ib_open_qp()
1156 return ERR_PTR(-EINVAL); in ib_open_qp()
1158 down_read(&xrcd->tgt_qps_rwsem); in ib_open_qp()
1159 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num); in ib_open_qp()
1161 up_read(&xrcd->tgt_qps_rwsem); in ib_open_qp()
1162 return ERR_PTR(-EINVAL); in ib_open_qp()
1164 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler, in ib_open_qp()
1165 qp_open_attr->qp_context); in ib_open_qp()
1166 up_read(&xrcd->tgt_qps_rwsem); in ib_open_qp()
1177 qp->event_handler = __ib_shared_qp_event_handler; in create_xrc_qp_user()
1178 qp->qp_context = qp; in create_xrc_qp_user()
1179 qp->pd = NULL; in create_xrc_qp_user()
1180 qp->send_cq = qp->recv_cq = NULL; in create_xrc_qp_user()
1181 qp->srq = NULL; in create_xrc_qp_user()
1182 qp->xrcd = qp_init_attr->xrcd; in create_xrc_qp_user()
1183 atomic_inc(&qp_init_attr->xrcd->usecnt); in create_xrc_qp_user()
1184 INIT_LIST_HEAD(&qp->open_list); in create_xrc_qp_user()
1186 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler, in create_xrc_qp_user()
1187 qp_init_attr->qp_context); in create_xrc_qp_user()
1191 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num, in create_xrc_qp_user()
1209 if (!dev->ops.create_qp) in create_qp()
1210 return ERR_PTR(-EOPNOTSUPP); in create_qp()
1214 return ERR_PTR(-ENOMEM); in create_qp()
1216 qp->device = dev; in create_qp()
1217 qp->pd = pd; in create_qp()
1218 qp->uobject = uobj; in create_qp()
1219 qp->real_qp = qp; in create_qp()
1221 qp->qp_type = attr->qp_type; in create_qp()
1222 qp->rwq_ind_tbl = attr->rwq_ind_tbl; in create_qp()
1223 qp->srq = attr->srq; in create_qp()
1224 qp->event_handler = attr->event_handler; in create_qp()
1225 qp->port = attr->port_num; in create_qp()
1226 qp->qp_context = attr->qp_context; in create_qp()
1228 spin_lock_init(&qp->mr_lock); in create_qp()
1229 INIT_LIST_HEAD(&qp->rdma_mrs); in create_qp()
1230 INIT_LIST_HEAD(&qp->sig_mrs); in create_qp()
1232 qp->send_cq = attr->send_cq; in create_qp()
1233 qp->recv_cq = attr->recv_cq; in create_qp()
1235 rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); in create_qp()
1237 rdma_restrack_set_name(&qp->res, udata ? NULL : caller); in create_qp()
1238 ret = dev->ops.create_qp(qp, attr, udata); in create_qp()
1246 qp->send_cq = attr->send_cq; in create_qp()
1247 qp->recv_cq = attr->recv_cq; in create_qp()
1253 rdma_restrack_add(&qp->res); in create_qp()
1257 qp->device->ops.destroy_qp(qp, udata ? &dummy : NULL); in create_qp()
1259 rdma_restrack_put(&qp->res); in create_qp()
1266 * ib_create_qp_user - Creates a QP associated with the specified protection
1267 * domain.
1269 * @pd: The protection domain associated with the QP.
1275 * @caller: caller's build-time module name
1284 if (attr->qp_type == IB_QPT_XRC_TGT) in ib_create_qp_user()
1288 if (attr->qp_type != IB_QPT_XRC_TGT || IS_ERR(qp)) in ib_create_qp_user()
1297 xrc_qp->uobject = uobj; in ib_create_qp_user()
1304 if (qp->pd) in ib_qp_usecnt_inc()
1305 atomic_inc(&qp->pd->usecnt); in ib_qp_usecnt_inc()
1306 if (qp->send_cq) in ib_qp_usecnt_inc()
1307 atomic_inc(&qp->send_cq->usecnt); in ib_qp_usecnt_inc()
1308 if (qp->recv_cq) in ib_qp_usecnt_inc()
1309 atomic_inc(&qp->recv_cq->usecnt); in ib_qp_usecnt_inc()
1310 if (qp->srq) in ib_qp_usecnt_inc()
1311 atomic_inc(&qp->srq->usecnt); in ib_qp_usecnt_inc()
1312 if (qp->rwq_ind_tbl) in ib_qp_usecnt_inc()
1313 atomic_inc(&qp->rwq_ind_tbl->usecnt); in ib_qp_usecnt_inc()
1319 if (qp->rwq_ind_tbl) in ib_qp_usecnt_dec()
1320 atomic_dec(&qp->rwq_ind_tbl->usecnt); in ib_qp_usecnt_dec()
1321 if (qp->srq) in ib_qp_usecnt_dec()
1322 atomic_dec(&qp->srq->usecnt); in ib_qp_usecnt_dec()
1323 if (qp->recv_cq) in ib_qp_usecnt_dec()
1324 atomic_dec(&qp->recv_cq->usecnt); in ib_qp_usecnt_dec()
1325 if (qp->send_cq) in ib_qp_usecnt_dec()
1326 atomic_dec(&qp->send_cq->usecnt); in ib_qp_usecnt_dec()
1327 if (qp->pd) in ib_qp_usecnt_dec()
1328 atomic_dec(&qp->pd->usecnt); in ib_qp_usecnt_dec()
1336 struct ib_device *device = pd->device; in ib_create_qp_kernel()
1346 if (qp_init_attr->cap.max_rdma_ctxs) in ib_create_qp_kernel()
1355 if (qp_init_attr->cap.max_rdma_ctxs) { in ib_create_qp_kernel()
1366 qp->max_write_sge = qp_init_attr->cap.max_send_sge; in ib_create_qp_kernel()
1367 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge, in ib_create_qp_kernel()
1368 device->attrs.max_sge_rd); in ib_create_qp_kernel()
1369 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) in ib_create_qp_kernel()
1370 qp->integrity_en = true; in ib_create_qp_kernel()
1715 * ib_resolve_eth_dmac - Resolve destination mac address
1728 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) { in ib_resolve_eth_dmac()
1729 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) { in ib_resolve_eth_dmac()
1732 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4); in ib_resolve_eth_dmac()
1733 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac); in ib_resolve_eth_dmac()
1735 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw, in ib_resolve_eth_dmac()
1736 (char *)ah_attr->roce.dmac); in ib_resolve_eth_dmac()
1746 return (qp->qp_type == IB_QPT_UC || in is_qp_type_connected()
1747 qp->qp_type == IB_QPT_RC || in is_qp_type_connected()
1748 qp->qp_type == IB_QPT_XRC_INI || in is_qp_type_connected()
1749 qp->qp_type == IB_QPT_XRC_TGT); in is_qp_type_connected()
1758 u32 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; in _ib_modify_qp()
1763 attr->xmit_slave = NULL; in _ib_modify_qp()
1765 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr, in _ib_modify_qp()
1770 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE && in _ib_modify_qp()
1780 ret = ib_resolve_eth_dmac(qp->device, in _ib_modify_qp()
1781 &attr->ah_attr); in _ib_modify_qp()
1785 slave = rdma_lag_get_ah_roce_slave(qp->device, in _ib_modify_qp()
1786 &attr->ah_attr, in _ib_modify_qp()
1792 attr->xmit_slave = slave; in _ib_modify_qp()
1799 * from primary->alternate we will keep the wrong in _ib_modify_qp()
1803 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr, in _ib_modify_qp()
1812 if (!(rdma_protocol_ib(qp->device, in _ib_modify_qp()
1813 attr->alt_ah_attr.port_num) && in _ib_modify_qp()
1814 rdma_protocol_ib(qp->device, port))) { in _ib_modify_qp()
1815 ret = -EINVAL; in _ib_modify_qp()
1820 if (rdma_ib_or_roce(qp->device, port)) { in _ib_modify_qp()
1821 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) { in _ib_modify_qp()
1822 dev_warn(&qp->device->dev, in _ib_modify_qp()
1825 attr->rq_psn &= 0xffffff; in _ib_modify_qp()
1828 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) { in _ib_modify_qp()
1829 dev_warn(&qp->device->dev, in _ib_modify_qp()
1832 attr->sq_psn &= 0xffffff; in _ib_modify_qp()
1840 if (!qp->counter && (attr_mask & IB_QP_PORT) && in _ib_modify_qp()
1841 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT)) in _ib_modify_qp()
1842 rdma_counter_bind_qp_auto(qp, attr->port_num); in _ib_modify_qp()
1849 qp->port = attr->port_num; in _ib_modify_qp()
1851 qp->av_sgid_attr = in _ib_modify_qp()
1852 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr); in _ib_modify_qp()
1854 qp->alt_path_sgid_attr = rdma_update_sgid_attr( in _ib_modify_qp()
1855 &attr->alt_ah_attr, qp->alt_path_sgid_attr); in _ib_modify_qp()
1859 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av); in _ib_modify_qp()
1862 rdma_lag_put_ah_roce_slave(attr->xmit_slave); in _ib_modify_qp()
1863 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av); in _ib_modify_qp()
1869 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1873 * @attr_mask: A bit-mask used to specify which attributes of the QP
1882 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata); in ib_modify_qp_with_udata()
1977 return -EINVAL; in ib_get_eth_speed()
1981 return -ENODEV; in ib_get_eth_speed()
1995 netdev->name, netdev_speed); in ib_get_eth_speed()
2009 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); in ib_modify_qp()
2018 qp_attr->ah_attr.grh.sgid_attr = NULL; in ib_query_qp()
2019 qp_attr->alt_ah_attr.grh.sgid_attr = NULL; in ib_query_qp()
2021 return qp->device->ops.query_qp ? in ib_query_qp()
2022 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask, in ib_query_qp()
2023 qp_init_attr) : -EOPNOTSUPP; in ib_query_qp()
2032 real_qp = qp->real_qp; in ib_close_qp()
2034 return -EINVAL; in ib_close_qp()
2036 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags); in ib_close_qp()
2037 list_del(&qp->open_list); in ib_close_qp()
2038 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags); in ib_close_qp()
2040 atomic_dec(&real_qp->usecnt); in ib_close_qp()
2041 if (qp->qp_sec) in ib_close_qp()
2042 ib_close_shared_qp_security(qp->qp_sec); in ib_close_qp()
2055 real_qp = qp->real_qp; in __ib_destroy_shared_qp()
2056 xrcd = real_qp->xrcd; in __ib_destroy_shared_qp()
2057 down_write(&xrcd->tgt_qps_rwsem); in __ib_destroy_shared_qp()
2059 if (atomic_read(&real_qp->usecnt) == 0) in __ib_destroy_shared_qp()
2060 xa_erase(&xrcd->tgt_qps, real_qp->qp_num); in __ib_destroy_shared_qp()
2063 up_write(&xrcd->tgt_qps_rwsem); in __ib_destroy_shared_qp()
2068 atomic_dec(&xrcd->usecnt); in __ib_destroy_shared_qp()
2076 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr; in ib_destroy_qp_user()
2077 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr; in ib_destroy_qp_user()
2081 WARN_ON_ONCE(qp->mrs_used > 0); in ib_destroy_qp_user()
2083 if (atomic_read(&qp->usecnt)) in ib_destroy_qp_user()
2084 return -EBUSY; in ib_destroy_qp_user()
2086 if (qp->real_qp != qp) in ib_destroy_qp_user()
2089 sec = qp->qp_sec; in ib_destroy_qp_user()
2093 if (!qp->uobject) in ib_destroy_qp_user()
2097 ret = qp->device->ops.destroy_qp(qp, udata); in ib_destroy_qp_user()
2113 rdma_restrack_del(&qp->res); in ib_destroy_qp_user()
2133 return ERR_PTR(-ENOMEM); in __ib_create_cq()
2135 cq->device = device; in __ib_create_cq()
2136 cq->uobject = NULL; in __ib_create_cq()
2137 cq->comp_handler = comp_handler; in __ib_create_cq()
2138 cq->event_handler = event_handler; in __ib_create_cq()
2139 cq->cq_context = cq_context; in __ib_create_cq()
2140 atomic_set(&cq->usecnt, 0); in __ib_create_cq()
2142 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); in __ib_create_cq()
2143 rdma_restrack_set_name(&cq->res, caller); in __ib_create_cq()
2145 ret = device->ops.create_cq(cq, cq_attr, NULL); in __ib_create_cq()
2147 rdma_restrack_put(&cq->res); in __ib_create_cq()
2152 rdma_restrack_add(&cq->res); in __ib_create_cq()
2159 if (cq->shared) in rdma_set_cq_moderation()
2160 return -EOPNOTSUPP; in rdma_set_cq_moderation()
2162 return cq->device->ops.modify_cq ? in rdma_set_cq_moderation()
2163 cq->device->ops.modify_cq(cq, cq_count, in rdma_set_cq_moderation()
2164 cq_period) : -EOPNOTSUPP; in rdma_set_cq_moderation()
2172 if (WARN_ON_ONCE(cq->shared)) in ib_destroy_cq_user()
2173 return -EOPNOTSUPP; in ib_destroy_cq_user()
2175 if (atomic_read(&cq->usecnt)) in ib_destroy_cq_user()
2176 return -EBUSY; in ib_destroy_cq_user()
2178 ret = cq->device->ops.destroy_cq(cq, udata); in ib_destroy_cq_user()
2182 rdma_restrack_del(&cq->res); in ib_destroy_cq_user()
2190 if (cq->shared) in ib_resize_cq()
2191 return -EOPNOTSUPP; in ib_resize_cq()
2193 return cq->device->ops.resize_cq ? in ib_resize_cq()
2194 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP; in ib_resize_cq()
2206 if (!(pd->device->attrs.kernel_cap_flags & in ib_reg_user_mr()
2209 return ERR_PTR(-EINVAL); in ib_reg_user_mr()
2213 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr, in ib_reg_user_mr()
2219 mr->device = pd->device; in ib_reg_user_mr()
2220 mr->type = IB_MR_TYPE_USER; in ib_reg_user_mr()
2221 mr->pd = pd; in ib_reg_user_mr()
2222 mr->dm = NULL; in ib_reg_user_mr()
2223 atomic_inc(&pd->usecnt); in ib_reg_user_mr()
2224 mr->iova = virt_addr; in ib_reg_user_mr()
2225 mr->length = length; in ib_reg_user_mr()
2227 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in ib_reg_user_mr()
2228 rdma_restrack_parent_name(&mr->res, &pd->res); in ib_reg_user_mr()
2229 rdma_restrack_add(&mr->res); in ib_reg_user_mr()
2238 if (!pd->device->ops.advise_mr) in ib_advise_mr()
2239 return -EOPNOTSUPP; in ib_advise_mr()
2244 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge, in ib_advise_mr()
2251 struct ib_pd *pd = mr->pd; in ib_dereg_mr_user()
2252 struct ib_dm *dm = mr->dm; in ib_dereg_mr_user()
2253 struct ib_sig_attrs *sig_attrs = mr->sig_attrs; in ib_dereg_mr_user()
2257 rdma_restrack_del(&mr->res); in ib_dereg_mr_user()
2258 ret = mr->device->ops.dereg_mr(mr, udata); in ib_dereg_mr_user()
2260 atomic_dec(&pd->usecnt); in ib_dereg_mr_user()
2262 atomic_dec(&dm->usecnt); in ib_dereg_mr_user()
2271 * ib_alloc_mr() - Allocates a memory region
2272 * @pd: protection domain associated with the region
2287 if (!pd->device->ops.alloc_mr) { in ib_alloc_mr()
2288 mr = ERR_PTR(-EOPNOTSUPP); in ib_alloc_mr()
2294 mr = ERR_PTR(-EINVAL); in ib_alloc_mr()
2298 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg); in ib_alloc_mr()
2302 mr->device = pd->device; in ib_alloc_mr()
2303 mr->pd = pd; in ib_alloc_mr()
2304 mr->dm = NULL; in ib_alloc_mr()
2305 mr->uobject = NULL; in ib_alloc_mr()
2306 atomic_inc(&pd->usecnt); in ib_alloc_mr()
2307 mr->need_inval = false; in ib_alloc_mr()
2308 mr->type = mr_type; in ib_alloc_mr()
2309 mr->sig_attrs = NULL; in ib_alloc_mr()
2311 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in ib_alloc_mr()
2312 rdma_restrack_parent_name(&mr->res, &pd->res); in ib_alloc_mr()
2313 rdma_restrack_add(&mr->res); in ib_alloc_mr()
2321 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2322 * @pd: protection domain associated with the region
2339 if (!pd->device->ops.alloc_mr_integrity || in ib_alloc_mr_integrity()
2340 !pd->device->ops.map_mr_sg_pi) { in ib_alloc_mr_integrity()
2341 mr = ERR_PTR(-EOPNOTSUPP); in ib_alloc_mr_integrity()
2346 mr = ERR_PTR(-EINVAL); in ib_alloc_mr_integrity()
2352 mr = ERR_PTR(-ENOMEM); in ib_alloc_mr_integrity()
2356 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg, in ib_alloc_mr_integrity()
2363 mr->device = pd->device; in ib_alloc_mr_integrity()
2364 mr->pd = pd; in ib_alloc_mr_integrity()
2365 mr->dm = NULL; in ib_alloc_mr_integrity()
2366 mr->uobject = NULL; in ib_alloc_mr_integrity()
2367 atomic_inc(&pd->usecnt); in ib_alloc_mr_integrity()
2368 mr->need_inval = false; in ib_alloc_mr_integrity()
2369 mr->type = IB_MR_TYPE_INTEGRITY; in ib_alloc_mr_integrity()
2370 mr->sig_attrs = sig_attrs; in ib_alloc_mr_integrity()
2372 rdma_restrack_new(&mr->res, RDMA_RESTRACK_MR); in ib_alloc_mr_integrity()
2373 rdma_restrack_parent_name(&mr->res, &pd->res); in ib_alloc_mr_integrity()
2374 rdma_restrack_add(&mr->res); in ib_alloc_mr_integrity()
2395 if (rdma_port_get_link_layer(qp->device, attr.port_num) != in is_valid_mcast_lid()
2403 rdma_for_each_port(qp->device, port) in is_valid_mcast_lid()
2404 if (rdma_port_get_link_layer(qp->device, port) != in is_valid_mcast_lid()
2425 if (!qp->device->ops.attach_mcast) in ib_attach_mcast()
2426 return -EOPNOTSUPP; in ib_attach_mcast()
2428 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || in ib_attach_mcast()
2429 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) in ib_attach_mcast()
2430 return -EINVAL; in ib_attach_mcast()
2432 ret = qp->device->ops.attach_mcast(qp, gid, lid); in ib_attach_mcast()
2434 atomic_inc(&qp->usecnt); in ib_attach_mcast()
2443 if (!qp->device->ops.detach_mcast) in ib_detach_mcast()
2444 return -EOPNOTSUPP; in ib_detach_mcast()
2446 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) || in ib_detach_mcast()
2447 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid)) in ib_detach_mcast()
2448 return -EINVAL; in ib_detach_mcast()
2450 ret = qp->device->ops.detach_mcast(qp, gid, lid); in ib_detach_mcast()
2452 atomic_dec(&qp->usecnt); in ib_detach_mcast()
2458 * ib_alloc_xrcd_user - Allocates an XRC domain.
2459 * @device: The device on which to allocate the XRC domain.
2469 if (!device->ops.alloc_xrcd) in ib_alloc_xrcd_user()
2470 return ERR_PTR(-EOPNOTSUPP); in ib_alloc_xrcd_user()
2474 return ERR_PTR(-ENOMEM); in ib_alloc_xrcd_user()
2476 xrcd->device = device; in ib_alloc_xrcd_user()
2477 xrcd->inode = inode; in ib_alloc_xrcd_user()
2478 atomic_set(&xrcd->usecnt, 0); in ib_alloc_xrcd_user()
2479 init_rwsem(&xrcd->tgt_qps_rwsem); in ib_alloc_xrcd_user()
2480 xa_init(&xrcd->tgt_qps); in ib_alloc_xrcd_user()
2482 ret = device->ops.alloc_xrcd(xrcd, udata); in ib_alloc_xrcd_user()
2493 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2494 * @xrcd: The XRC domain to deallocate.
2501 if (atomic_read(&xrcd->usecnt)) in ib_dealloc_xrcd_user()
2502 return -EBUSY; in ib_dealloc_xrcd_user()
2504 WARN_ON(!xa_empty(&xrcd->tgt_qps)); in ib_dealloc_xrcd_user()
2505 ret = xrcd->device->ops.dealloc_xrcd(xrcd, udata); in ib_dealloc_xrcd_user()
2514 * ib_create_wq - Creates a WQ associated with the specified protection
2515 * domain.
2516 * @pd: The protection domain associated with the WQ.
2521 * wq_attr->max_wr and wq_attr->max_sge determine
2532 if (!pd->device->ops.create_wq) in ib_create_wq()
2533 return ERR_PTR(-EOPNOTSUPP); in ib_create_wq()
2535 wq = pd->device->ops.create_wq(pd, wq_attr, NULL); in ib_create_wq()
2537 wq->event_handler = wq_attr->event_handler; in ib_create_wq()
2538 wq->wq_context = wq_attr->wq_context; in ib_create_wq()
2539 wq->wq_type = wq_attr->wq_type; in ib_create_wq()
2540 wq->cq = wq_attr->cq; in ib_create_wq()
2541 wq->device = pd->device; in ib_create_wq()
2542 wq->pd = pd; in ib_create_wq()
2543 wq->uobject = NULL; in ib_create_wq()
2544 atomic_inc(&pd->usecnt); in ib_create_wq()
2545 atomic_inc(&wq_attr->cq->usecnt); in ib_create_wq()
2546 atomic_set(&wq->usecnt, 0); in ib_create_wq()
2553 * ib_destroy_wq_user - Destroys the specified user WQ.
2559 struct ib_cq *cq = wq->cq; in ib_destroy_wq_user()
2560 struct ib_pd *pd = wq->pd; in ib_destroy_wq_user()
2563 if (atomic_read(&wq->usecnt)) in ib_destroy_wq_user()
2564 return -EBUSY; in ib_destroy_wq_user()
2566 ret = wq->device->ops.destroy_wq(wq, udata); in ib_destroy_wq_user()
2570 atomic_dec(&pd->usecnt); in ib_destroy_wq_user()
2571 atomic_dec(&cq->usecnt); in ib_destroy_wq_user()
2579 if (!mr->device->ops.check_mr_status) in ib_check_mr_status()
2580 return -EOPNOTSUPP; in ib_check_mr_status()
2582 return mr->device->ops.check_mr_status(mr, check_mask, mr_status); in ib_check_mr_status()
2589 if (!device->ops.set_vf_link_state) in ib_set_vf_link_state()
2590 return -EOPNOTSUPP; in ib_set_vf_link_state()
2592 return device->ops.set_vf_link_state(device, vf, port, state); in ib_set_vf_link_state()
2599 if (!device->ops.get_vf_config) in ib_get_vf_config()
2600 return -EOPNOTSUPP; in ib_get_vf_config()
2602 return device->ops.get_vf_config(device, vf, port, info); in ib_get_vf_config()
2609 if (!device->ops.get_vf_stats) in ib_get_vf_stats()
2610 return -EOPNOTSUPP; in ib_get_vf_stats()
2612 return device->ops.get_vf_stats(device, vf, port, stats); in ib_get_vf_stats()
2619 if (!device->ops.set_vf_guid) in ib_set_vf_guid()
2620 return -EOPNOTSUPP; in ib_set_vf_guid()
2622 return device->ops.set_vf_guid(device, vf, port, guid, type); in ib_set_vf_guid()
2630 if (!device->ops.get_vf_guid) in ib_get_vf_guid()
2631 return -EOPNOTSUPP; in ib_get_vf_guid()
2633 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid); in ib_get_vf_guid()
2637 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2649 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2661 if (unlikely(!mr->device->ops.map_mr_sg_pi || in ib_map_mr_sg_pi()
2662 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY))) in ib_map_mr_sg_pi()
2663 return -EOPNOTSUPP; in ib_map_mr_sg_pi()
2665 mr->page_size = page_size; in ib_map_mr_sg_pi()
2667 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents, in ib_map_mr_sg_pi()
2674 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2684 * - The first sg element is allowed to have an offset.
2685 * - Each sg element must either be aligned to page_size or virtually
2687 * non-contiguous offset, the mapping prefix will not include it.
2688 * - The last sg element is allowed to have length less than page_size.
2689 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2691 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2702 if (unlikely(!mr->device->ops.map_mr_sg)) in ib_map_mr_sg()
2703 return -EOPNOTSUPP; in ib_map_mr_sg()
2705 mr->page_size = page_size; in ib_map_mr_sg()
2707 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset); in ib_map_mr_sg()
2712 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2740 u64 page_mask = ~((u64)mr->page_size - 1); in ib_sg_to_pages()
2744 return -EINVAL; in ib_sg_to_pages()
2746 mr->iova = sg_dma_address(&sgl[0]) + sg_offset; in ib_sg_to_pages()
2747 mr->length = 0; in ib_sg_to_pages()
2752 unsigned int dma_len = sg_dma_len(sg) - sg_offset; in ib_sg_to_pages()
2758 * end of element i-1 or the start of element i is not aligned in ib_sg_to_pages()
2768 * enough just update mr->length. Otherwise start in ib_sg_to_pages()
2777 sg_offset = prev_addr - sg_dma_address(sg); in ib_sg_to_pages()
2778 mr->length += prev_addr - dma_addr; in ib_sg_to_pages()
2785 page_addr += mr->page_size; in ib_sg_to_pages()
2788 mr->length += dma_len; in ib_sg_to_pages()
2808 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, in ib_drain_qp_done()
2811 complete(&cqe->done); in ib_drain_qp_done()
2819 struct ib_cq *cq = qp->send_cq; in __ib_drain_sq()
2846 if (cq->poll_ctx == IB_POLL_DIRECT) in __ib_drain_sq()
2848 ib_process_cq_direct(cq, -1); in __ib_drain_sq()
2858 struct ib_cq *cq = qp->recv_cq; in __ib_drain_rq()
2880 if (cq->poll_ctx == IB_POLL_DIRECT) in __ib_drain_rq()
2882 ib_process_cq_direct(cq, -1); in __ib_drain_rq()
2888 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2892 * If the device has a provider-specific drain function, then
2908 if (qp->device->ops.drain_sq) in ib_drain_sq()
2909 qp->device->ops.drain_sq(qp); in ib_drain_sq()
2912 trace_cq_drain_complete(qp->send_cq); in ib_drain_sq()
2917 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2921 * If the device has a provider-specific drain function, then
2937 if (qp->device->ops.drain_rq) in ib_drain_rq()
2938 qp->device->ops.drain_rq(qp); in ib_drain_rq()
2941 trace_cq_drain_complete(qp->recv_cq); in ib_drain_rq()
2946 * ib_drain_qp() - Block until all CQEs have been consumed by the
2963 if (!qp->srq) in ib_drain_qp()
2977 if (!device->ops.rdma_netdev_get_params) in rdma_alloc_netdev()
2978 return ERR_PTR(-EOPNOTSUPP); in rdma_alloc_netdev()
2980 rc = device->ops.rdma_netdev_get_params(device, port_num, type, in rdma_alloc_netdev()
2988 return ERR_PTR(-ENOMEM); in rdma_alloc_netdev()
3003 if (!device->ops.rdma_netdev_get_params) in rdma_init_netdev()
3004 return -EOPNOTSUPP; in rdma_init_netdev()
3006 rc = device->ops.rdma_netdev_get_params(device, port_num, type, in rdma_init_netdev()
3021 biter->__sg = sglist; in __rdma_block_iter_start()
3022 biter->__sg_nents = nents; in __rdma_block_iter_start()
3025 biter->__pg_bit = __fls(pgsz); in __rdma_block_iter_start()
3034 if (!biter->__sg_nents || !biter->__sg) in __rdma_block_iter_next()
3037 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; in __rdma_block_iter_next()
3038 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); in __rdma_block_iter_next()
3039 sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; in __rdma_block_iter_next()
3041 if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) { in __rdma_block_iter_next()
3042 biter->__sg_advance += sg_delta; in __rdma_block_iter_next()
3044 biter->__sg_advance = 0; in __rdma_block_iter_next()
3045 biter->__sg = sg_next(biter->__sg); in __rdma_block_iter_next()
3046 biter->__sg_nents--; in __rdma_block_iter_next()
3054 * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
3070 stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters), in rdma_alloc_hw_stats_struct()
3071 sizeof(*stats->is_disabled), GFP_KERNEL); in rdma_alloc_hw_stats_struct()
3072 if (!stats->is_disabled) in rdma_alloc_hw_stats_struct()
3075 stats->descs = descs; in rdma_alloc_hw_stats_struct()
3076 stats->num_counters = num_counters; in rdma_alloc_hw_stats_struct()
3077 stats->lifespan = msecs_to_jiffies(lifespan); in rdma_alloc_hw_stats_struct()
3078 mutex_init(&stats->lock); in rdma_alloc_hw_stats_struct()
3089 * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
3097 kfree(stats->is_disabled); in rdma_free_hw_stats_struct()