Lines Matching +full:reg +full:- +full:names
2 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 * COPYING in the main directory of this source tree, or the BSD-type
22 * Neither the name of the Network Appliance, Inc. nor the names of
70 * rdma tasklet function -- just turn around and call the func
89 list_del(&rep->rr_list); in rpcrdma_run_tasklet()
90 func = rep->rr_func; in rpcrdma_run_tasklet()
91 rep->rr_func = NULL; in rpcrdma_run_tasklet()
112 list_add_tail(&rep->rr_list, &rpcrdma_tasklets_g); in rpcrdma_schedule_tasklet()
123 __func__, event->event, event->device->name, context); in rpcrdma_qp_async_error_upcall()
124 if (ep->rep_connected == 1) { in rpcrdma_qp_async_error_upcall()
125 ep->rep_connected = -EIO; in rpcrdma_qp_async_error_upcall()
126 ep->rep_func(ep); in rpcrdma_qp_async_error_upcall()
127 wake_up_all(&ep->rep_connect_wait); in rpcrdma_qp_async_error_upcall()
137 __func__, event->event, event->device->name, context); in rpcrdma_cq_async_error_upcall()
138 if (ep->rep_connected == 1) { in rpcrdma_cq_async_error_upcall()
139 ep->rep_connected = -EIO; in rpcrdma_cq_async_error_upcall()
140 ep->rep_func(ep); in rpcrdma_cq_async_error_upcall()
141 wake_up_all(&ep->rep_connect_wait); in rpcrdma_cq_async_error_upcall()
150 (struct rpcrdma_rep *)(unsigned long) wc->wr_id; in rpcrdma_event_process()
153 __func__, rep, wc->status, wc->opcode, wc->byte_len); in rpcrdma_event_process()
158 if (IB_WC_SUCCESS != wc->status) { in rpcrdma_event_process()
160 __func__, wc->opcode, wc->status); in rpcrdma_event_process()
161 rep->rr_len = ~0U; in rpcrdma_event_process()
162 if (wc->opcode != IB_WC_FAST_REG_MR && wc->opcode != IB_WC_LOCAL_INV) in rpcrdma_event_process()
167 switch (wc->opcode) { in rpcrdma_event_process()
169 frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; in rpcrdma_event_process()
170 frmr->r.frmr.state = FRMR_IS_VALID; in rpcrdma_event_process()
173 frmr = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; in rpcrdma_event_process()
174 frmr->r.frmr.state = FRMR_IS_INVALID; in rpcrdma_event_process()
177 rep->rr_len = wc->byte_len; in rpcrdma_event_process()
179 rdmab_to_ia(rep->rr_buffer)->ri_id->device, in rpcrdma_event_process()
180 rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE); in rpcrdma_event_process()
182 if (rep->rr_len >= 16) { in rpcrdma_event_process()
184 (struct rpcrdma_msg *) rep->rr_base; in rpcrdma_event_process()
185 unsigned int credits = ntohl(p->rm_credit); in rpcrdma_event_process()
191 } else if (credits > rep->rr_buffer->rb_max_requests) { in rpcrdma_event_process()
193 " over-crediting: %d (%d)\n", in rpcrdma_event_process()
195 rep->rr_buffer->rb_max_requests); in rpcrdma_event_process()
196 credits = rep->rr_buffer->rb_max_requests; in rpcrdma_event_process()
198 atomic_set(&rep->rr_buffer->rb_credits, credits); in rpcrdma_event_process()
206 __func__, wc->opcode); in rpcrdma_event_process()
287 struct rpcrdma_xprt *xprt = id->context; in rpcrdma_conn_upcall()
288 struct rpcrdma_ia *ia = &xprt->rx_ia; in rpcrdma_conn_upcall()
289 struct rpcrdma_ep *ep = &xprt->rx_ep; in rpcrdma_conn_upcall()
291 struct sockaddr_in *addr = (struct sockaddr_in *) &ep->rep_remote_addr; in rpcrdma_conn_upcall()
297 switch (event->event) { in rpcrdma_conn_upcall()
300 ia->ri_async_rc = 0; in rpcrdma_conn_upcall()
301 complete(&ia->ri_done); in rpcrdma_conn_upcall()
304 ia->ri_async_rc = -EHOSTUNREACH; in rpcrdma_conn_upcall()
307 complete(&ia->ri_done); in rpcrdma_conn_upcall()
310 ia->ri_async_rc = -ENETUNREACH; in rpcrdma_conn_upcall()
313 complete(&ia->ri_done); in rpcrdma_conn_upcall()
317 ib_query_qp(ia->ri_id->qp, &attr, in rpcrdma_conn_upcall()
325 connstate = -ENOTCONN; in rpcrdma_conn_upcall()
328 connstate = -ENETDOWN; in rpcrdma_conn_upcall()
331 connstate = -ECONNREFUSED; in rpcrdma_conn_upcall()
334 connstate = -ECONNABORTED; in rpcrdma_conn_upcall()
337 connstate = -ENODEV; in rpcrdma_conn_upcall()
341 (event->event <= 11) ? conn[event->event] : in rpcrdma_conn_upcall()
343 &addr->sin_addr.s_addr, in rpcrdma_conn_upcall()
344 ntohs(addr->sin_port), in rpcrdma_conn_upcall()
345 ep, event->event); in rpcrdma_conn_upcall()
346 atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1); in rpcrdma_conn_upcall()
349 ep->rep_connected = connstate; in rpcrdma_conn_upcall()
350 ep->rep_func(ep); in rpcrdma_conn_upcall()
351 wake_up_all(&ep->rep_connect_wait); in rpcrdma_conn_upcall()
355 __func__, event->event); in rpcrdma_conn_upcall()
362 int tird = ep->rep_remote_cma.responder_resources; in rpcrdma_conn_upcall()
365 &addr->sin_addr.s_addr, in rpcrdma_conn_upcall()
366 ntohs(addr->sin_port), in rpcrdma_conn_upcall()
367 ia->ri_id->device->name, in rpcrdma_conn_upcall()
368 ia->ri_memreg_strategy, in rpcrdma_conn_upcall()
369 xprt->rx_buf.rb_max_requests, in rpcrdma_conn_upcall()
373 &addr->sin_addr.s_addr, in rpcrdma_conn_upcall()
374 ntohs(addr->sin_port), in rpcrdma_conn_upcall()
389 init_completion(&ia->ri_done); in rpcrdma_create_id()
399 ia->ri_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
406 wait_for_completion_interruptible_timeout(&ia->ri_done, in rpcrdma_create_id()
408 rc = ia->ri_async_rc; in rpcrdma_create_id()
412 ia->ri_async_rc = -ETIMEDOUT; in rpcrdma_create_id()
419 wait_for_completion_interruptible_timeout(&ia->ri_done, in rpcrdma_create_id()
421 rc = ia->ri_async_rc; in rpcrdma_create_id()
463 struct rpcrdma_ia *ia = &xprt->rx_ia; in rpcrdma_ia_open()
465 ia->ri_id = rpcrdma_create_id(xprt, ia, addr); in rpcrdma_ia_open()
466 if (IS_ERR(ia->ri_id)) { in rpcrdma_ia_open()
467 rc = PTR_ERR(ia->ri_id); in rpcrdma_ia_open()
471 ia->ri_pd = ib_alloc_pd(ia->ri_id->device); in rpcrdma_ia_open()
472 if (IS_ERR(ia->ri_pd)) { in rpcrdma_ia_open()
473 rc = PTR_ERR(ia->ri_pd); in rpcrdma_ia_open()
484 rc = ib_query_device(ia->ri_id->device, &devattr); in rpcrdma_ia_open()
492 ia->ri_have_dma_lkey = 1; in rpcrdma_ia_open()
493 ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; in rpcrdma_ia_open()
508 if (!ia->ri_id->device->alloc_fmr) { in rpcrdma_ia_open()
525 /* Requires both frmr reg and local dma lkey */ in rpcrdma_ia_open()
548 * order to do a memory window-based bind. This base registration in rpcrdma_ia_open()
549 * is protected from remote access - that is enabled only by binding in rpcrdma_ia_open()
572 if (ia->ri_have_dma_lkey) in rpcrdma_ia_open()
576 ia->ri_bind_mem = ib_get_dma_mr(ia->ri_pd, mem_priv); in rpcrdma_ia_open()
577 if (IS_ERR(ia->ri_bind_mem)) { in rpcrdma_ia_open()
581 __func__, PTR_ERR(ia->ri_bind_mem)); in rpcrdma_ia_open()
583 ia->ri_bind_mem = NULL; in rpcrdma_ia_open()
589 rc = -EINVAL; in rpcrdma_ia_open()
595 /* Else will do memory reg/dereg for each chunk */ in rpcrdma_ia_open()
596 ia->ri_memreg_strategy = memreg; in rpcrdma_ia_open()
600 rdma_destroy_id(ia->ri_id); in rpcrdma_ia_open()
601 ia->ri_id = NULL; in rpcrdma_ia_open()
617 if (ia->ri_bind_mem != NULL) { in rpcrdma_ia_close()
618 rc = ib_dereg_mr(ia->ri_bind_mem); in rpcrdma_ia_close()
622 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { in rpcrdma_ia_close()
623 if (ia->ri_id->qp) in rpcrdma_ia_close()
624 rdma_destroy_qp(ia->ri_id); in rpcrdma_ia_close()
625 rdma_destroy_id(ia->ri_id); in rpcrdma_ia_close()
626 ia->ri_id = NULL; in rpcrdma_ia_close()
628 if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) { in rpcrdma_ia_close()
629 rc = ib_dealloc_pd(ia->ri_pd); in rpcrdma_ia_close()
645 rc = ib_query_device(ia->ri_id->device, &devattr); in rpcrdma_ep_create()
653 if (cdata->max_requests > devattr.max_qp_wr) in rpcrdma_ep_create()
654 cdata->max_requests = devattr.max_qp_wr; in rpcrdma_ep_create()
656 ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; in rpcrdma_ep_create()
657 ep->rep_attr.qp_context = ep; in rpcrdma_ep_create()
659 ep->rep_attr.srq = NULL; in rpcrdma_ep_create()
660 ep->rep_attr.cap.max_send_wr = cdata->max_requests; in rpcrdma_ep_create()
661 switch (ia->ri_memreg_strategy) { in rpcrdma_ep_create()
664 * 1. FRMR reg WR for head in rpcrdma_ep_create()
666 * 3. FRMR reg WR for pagelist in rpcrdma_ep_create()
668 * 5. FRMR reg WR for tail in rpcrdma_ep_create()
672 ep->rep_attr.cap.max_send_wr *= 7; in rpcrdma_ep_create()
673 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) { in rpcrdma_ep_create()
674 cdata->max_requests = devattr.max_qp_wr / 7; in rpcrdma_ep_create()
675 if (!cdata->max_requests) in rpcrdma_ep_create()
676 return -EINVAL; in rpcrdma_ep_create()
677 ep->rep_attr.cap.max_send_wr = cdata->max_requests * 7; in rpcrdma_ep_create()
682 /* Add room for mw_binds+unbinds - overkill! */ in rpcrdma_ep_create()
683 ep->rep_attr.cap.max_send_wr++; in rpcrdma_ep_create()
684 ep->rep_attr.cap.max_send_wr *= (2 * RPCRDMA_MAX_SEGS); in rpcrdma_ep_create()
685 if (ep->rep_attr.cap.max_send_wr > devattr.max_qp_wr) in rpcrdma_ep_create()
686 return -EINVAL; in rpcrdma_ep_create()
691 ep->rep_attr.cap.max_recv_wr = cdata->max_requests; in rpcrdma_ep_create()
692 ep->rep_attr.cap.max_send_sge = (cdata->padding ? 4 : 2); in rpcrdma_ep_create()
693 ep->rep_attr.cap.max_recv_sge = 1; in rpcrdma_ep_create()
694 ep->rep_attr.cap.max_inline_data = 0; in rpcrdma_ep_create()
695 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; in rpcrdma_ep_create()
696 ep->rep_attr.qp_type = IB_QPT_RC; in rpcrdma_ep_create()
697 ep->rep_attr.port_num = ~0; in rpcrdma_ep_create()
702 ep->rep_attr.cap.max_send_wr, in rpcrdma_ep_create()
703 ep->rep_attr.cap.max_recv_wr, in rpcrdma_ep_create()
704 ep->rep_attr.cap.max_send_sge, in rpcrdma_ep_create()
705 ep->rep_attr.cap.max_recv_sge); in rpcrdma_ep_create()
708 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 /* - 1*/; in rpcrdma_ep_create()
709 switch (ia->ri_memreg_strategy) { in rpcrdma_ep_create()
712 ep->rep_cqinit -= RPCRDMA_MAX_SEGS; in rpcrdma_ep_create()
717 if (ep->rep_cqinit <= 2) in rpcrdma_ep_create()
718 ep->rep_cqinit = 0; in rpcrdma_ep_create()
720 ep->rep_ia = ia; in rpcrdma_ep_create()
721 init_waitqueue_head(&ep->rep_connect_wait); in rpcrdma_ep_create()
728 ep->rep_cq = ib_create_cq(ia->ri_id->device, rpcrdma_cq_event_upcall, in rpcrdma_ep_create()
730 ep->rep_attr.cap.max_recv_wr + in rpcrdma_ep_create()
731 ep->rep_attr.cap.max_send_wr + 1, 0); in rpcrdma_ep_create()
732 if (IS_ERR(ep->rep_cq)) { in rpcrdma_ep_create()
733 rc = PTR_ERR(ep->rep_cq); in rpcrdma_ep_create()
739 rc = ib_req_notify_cq(ep->rep_cq, IB_CQ_NEXT_COMP); in rpcrdma_ep_create()
746 ep->rep_attr.send_cq = ep->rep_cq; in rpcrdma_ep_create()
747 ep->rep_attr.recv_cq = ep->rep_cq; in rpcrdma_ep_create()
752 ep->rep_remote_cma.private_data = NULL; in rpcrdma_ep_create()
753 ep->rep_remote_cma.private_data_len = 0; in rpcrdma_ep_create()
756 ep->rep_remote_cma.initiator_depth = 0; in rpcrdma_ep_create()
757 if (ia->ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS) in rpcrdma_ep_create()
758 ep->rep_remote_cma.responder_resources = 0; in rpcrdma_ep_create()
760 ep->rep_remote_cma.responder_resources = 32; in rpcrdma_ep_create()
762 ep->rep_remote_cma.responder_resources = devattr.max_qp_rd_atom; in rpcrdma_ep_create()
764 ep->rep_remote_cma.retry_count = 7; in rpcrdma_ep_create()
765 ep->rep_remote_cma.flow_control = 0; in rpcrdma_ep_create()
766 ep->rep_remote_cma.rnr_retry_count = 0; in rpcrdma_ep_create()
771 err = ib_destroy_cq(ep->rep_cq); in rpcrdma_ep_create()
784 * allocated) or re-create it.
795 __func__, ep->rep_connected); in rpcrdma_ep_destroy()
797 if (ia->ri_id->qp) { in rpcrdma_ep_destroy()
802 rdma_destroy_qp(ia->ri_id); in rpcrdma_ep_destroy()
803 ia->ri_id->qp = NULL; in rpcrdma_ep_destroy()
806 /* padding - could be done in rpcrdma_buffer_destroy... */ in rpcrdma_ep_destroy()
807 if (ep->rep_pad_mr) { in rpcrdma_ep_destroy()
808 rpcrdma_deregister_internal(ia, ep->rep_pad_mr, &ep->rep_pad); in rpcrdma_ep_destroy()
809 ep->rep_pad_mr = NULL; in rpcrdma_ep_destroy()
812 rpcrdma_clean_cq(ep->rep_cq); in rpcrdma_ep_destroy()
813 rc = ib_destroy_cq(ep->rep_cq); in rpcrdma_ep_destroy()
831 if (ep->rep_connected != 0) { in rpcrdma_ep_connect()
835 if (rc && rc != -ENOTCONN) in rpcrdma_ep_connect()
838 rpcrdma_clean_cq(ep->rep_cq); in rpcrdma_ep_connect()
842 (struct sockaddr *)&xprt->rx_data.addr); in rpcrdma_ep_connect()
847 /* TEMP TEMP TEMP - fail if new device: in rpcrdma_ep_connect()
850 * Re-determine all attributes still sane! in rpcrdma_ep_connect()
854 if (ia->ri_id->device != id->device) { in rpcrdma_ep_connect()
858 rc = -ENETDOWN; in rpcrdma_ep_connect()
862 rdma_destroy_qp(ia->ri_id); in rpcrdma_ep_connect()
863 rdma_destroy_id(ia->ri_id); in rpcrdma_ep_connect()
864 ia->ri_id = id; in rpcrdma_ep_connect()
867 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); in rpcrdma_ep_connect()
875 if (strnicmp(ia->ri_id->device->dma_device->bus->name, "pci", 3) == 0) { in rpcrdma_ep_connect()
876 struct pci_dev *pcid = to_pci_dev(ia->ri_id->device->dma_device); in rpcrdma_ep_connect()
877 if (pcid->device == PCI_DEVICE_ID_MELLANOX_TAVOR && in rpcrdma_ep_connect()
878 (pcid->vendor == PCI_VENDOR_ID_MELLANOX || in rpcrdma_ep_connect()
879 pcid->vendor == PCI_VENDOR_ID_TOPSPIN)) { in rpcrdma_ep_connect()
883 rc = ib_modify_qp(ia->ri_id->qp, &attr, IB_QP_PATH_MTU); in rpcrdma_ep_connect()
887 ep->rep_connected = 0; in rpcrdma_ep_connect()
889 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); in rpcrdma_ep_connect()
896 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); in rpcrdma_ep_connect()
899 * Check state. A non-peer reject indicates no listener in rpcrdma_ep_connect()
902 * undergone a best-effort. in rpcrdma_ep_connect()
904 if (ep->rep_connected == -ECONNREFUSED && in rpcrdma_ep_connect()
906 dprintk("RPC: %s: non-peer_reject, retry\n", __func__); in rpcrdma_ep_connect()
909 if (ep->rep_connected <= 0) { in rpcrdma_ep_connect()
913 (ep->rep_remote_cma.responder_resources == 0 || in rpcrdma_ep_connect()
914 ep->rep_remote_cma.initiator_depth != in rpcrdma_ep_connect()
915 ep->rep_remote_cma.responder_resources)) { in rpcrdma_ep_connect()
916 if (ep->rep_remote_cma.responder_resources == 0) in rpcrdma_ep_connect()
917 ep->rep_remote_cma.responder_resources = 1; in rpcrdma_ep_connect()
918 ep->rep_remote_cma.initiator_depth = in rpcrdma_ep_connect()
919 ep->rep_remote_cma.responder_resources; in rpcrdma_ep_connect()
922 rc = ep->rep_connected; in rpcrdma_ep_connect()
929 ep->rep_connected = rc; in rpcrdma_ep_connect()
947 rpcrdma_clean_cq(ep->rep_cq); in rpcrdma_ep_disconnect()
948 rc = rdma_disconnect(ia->ri_id); in rpcrdma_ep_disconnect()
951 wait_event_interruptible(ep->rep_connect_wait, in rpcrdma_ep_disconnect()
952 ep->rep_connected != 1); in rpcrdma_ep_disconnect()
954 (ep->rep_connected == 1) ? "still " : "dis"); in rpcrdma_ep_disconnect()
957 ep->rep_connected = rc; in rpcrdma_ep_disconnect()
974 buf->rb_max_requests = cdata->max_requests; in rpcrdma_buffer_create()
975 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
976 atomic_set(&buf->rb_credits, 1); in rpcrdma_buffer_create()
987 len = buf->rb_max_requests * in rpcrdma_buffer_create()
989 len += cdata->padding; in rpcrdma_buffer_create()
990 switch (ia->ri_memreg_strategy) { in rpcrdma_buffer_create()
992 len += buf->rb_max_requests * RPCRDMA_MAX_SEGS * in rpcrdma_buffer_create()
997 len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * in rpcrdma_buffer_create()
1002 len += (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS * in rpcrdma_buffer_create()
1014 rc = -ENOMEM; in rpcrdma_buffer_create()
1017 buf->rb_pool = p; /* for freeing it later */ in rpcrdma_buffer_create()
1019 buf->rb_send_bufs = (struct rpcrdma_req **) p; in rpcrdma_buffer_create()
1020 p = (char *) &buf->rb_send_bufs[buf->rb_max_requests]; in rpcrdma_buffer_create()
1021 buf->rb_recv_bufs = (struct rpcrdma_rep **) p; in rpcrdma_buffer_create()
1022 p = (char *) &buf->rb_recv_bufs[buf->rb_max_requests]; in rpcrdma_buffer_create()
1027 if (cdata->padding) { in rpcrdma_buffer_create()
1028 rc = rpcrdma_register_internal(ia, p, cdata->padding, in rpcrdma_buffer_create()
1029 &ep->rep_pad_mr, &ep->rep_pad); in rpcrdma_buffer_create()
1033 p += cdata->padding; in rpcrdma_buffer_create()
1038 * and also reduce unbind-to-bind collision. in rpcrdma_buffer_create()
1040 INIT_LIST_HEAD(&buf->rb_mws); in rpcrdma_buffer_create()
1042 switch (ia->ri_memreg_strategy) { in rpcrdma_buffer_create()
1044 for (i = buf->rb_max_requests * RPCRDMA_MAX_SEGS; i; i--) { in rpcrdma_buffer_create()
1045 r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(ia->ri_pd, in rpcrdma_buffer_create()
1047 if (IS_ERR(r->r.frmr.fr_mr)) { in rpcrdma_buffer_create()
1048 rc = PTR_ERR(r->r.frmr.fr_mr); in rpcrdma_buffer_create()
1053 r->r.frmr.fr_pgl = in rpcrdma_buffer_create()
1054 ib_alloc_fast_reg_page_list(ia->ri_id->device, in rpcrdma_buffer_create()
1056 if (IS_ERR(r->r.frmr.fr_pgl)) { in rpcrdma_buffer_create()
1057 rc = PTR_ERR(r->r.frmr.fr_pgl); in rpcrdma_buffer_create()
1063 list_add(&r->mw_list, &buf->rb_mws); in rpcrdma_buffer_create()
1069 for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { in rpcrdma_buffer_create()
1072 r->r.fmr = ib_alloc_fmr(ia->ri_pd, in rpcrdma_buffer_create()
1075 if (IS_ERR(r->r.fmr)) { in rpcrdma_buffer_create()
1076 rc = PTR_ERR(r->r.fmr); in rpcrdma_buffer_create()
1081 list_add(&r->mw_list, &buf->rb_mws); in rpcrdma_buffer_create()
1088 for (i = (buf->rb_max_requests+1) * RPCRDMA_MAX_SEGS; i; i--) { in rpcrdma_buffer_create()
1089 r->r.mw = ib_alloc_mw(ia->ri_pd); in rpcrdma_buffer_create()
1090 if (IS_ERR(r->r.mw)) { in rpcrdma_buffer_create()
1091 rc = PTR_ERR(r->r.mw); in rpcrdma_buffer_create()
1096 list_add(&r->mw_list, &buf->rb_mws); in rpcrdma_buffer_create()
1106 * using kmalloc for now -- one for each buf. in rpcrdma_buffer_create()
1108 for (i = 0; i < buf->rb_max_requests; i++) { in rpcrdma_buffer_create()
1112 len = cdata->inline_wsize + sizeof(struct rpcrdma_req); in rpcrdma_buffer_create()
1121 rc = -ENOMEM; in rpcrdma_buffer_create()
1125 buf->rb_send_bufs[i] = req; in rpcrdma_buffer_create()
1126 buf->rb_send_bufs[i]->rl_buffer = buf; in rpcrdma_buffer_create()
1128 rc = rpcrdma_register_internal(ia, req->rl_base, in rpcrdma_buffer_create()
1129 len - offsetof(struct rpcrdma_req, rl_base), in rpcrdma_buffer_create()
1130 &buf->rb_send_bufs[i]->rl_handle, in rpcrdma_buffer_create()
1131 &buf->rb_send_bufs[i]->rl_iov); in rpcrdma_buffer_create()
1135 buf->rb_send_bufs[i]->rl_size = len-sizeof(struct rpcrdma_req); in rpcrdma_buffer_create()
1137 len = cdata->inline_rsize + sizeof(struct rpcrdma_rep); in rpcrdma_buffer_create()
1142 rc = -ENOMEM; in rpcrdma_buffer_create()
1146 buf->rb_recv_bufs[i] = rep; in rpcrdma_buffer_create()
1147 buf->rb_recv_bufs[i]->rr_buffer = buf; in rpcrdma_buffer_create()
1148 init_waitqueue_head(&rep->rr_unbind); in rpcrdma_buffer_create()
1150 rc = rpcrdma_register_internal(ia, rep->rr_base, in rpcrdma_buffer_create()
1151 len - offsetof(struct rpcrdma_rep, rr_base), in rpcrdma_buffer_create()
1152 &buf->rb_recv_bufs[i]->rr_handle, in rpcrdma_buffer_create()
1153 &buf->rb_recv_bufs[i]->rr_iov); in rpcrdma_buffer_create()
1159 __func__, buf->rb_max_requests); in rpcrdma_buffer_create()
1189 for (i = 0; i < buf->rb_max_requests; i++) { in rpcrdma_buffer_destroy()
1190 if (buf->rb_recv_bufs && buf->rb_recv_bufs[i]) { in rpcrdma_buffer_destroy()
1192 buf->rb_recv_bufs[i]->rr_handle, in rpcrdma_buffer_destroy()
1193 &buf->rb_recv_bufs[i]->rr_iov); in rpcrdma_buffer_destroy()
1194 kfree(buf->rb_recv_bufs[i]); in rpcrdma_buffer_destroy()
1196 if (buf->rb_send_bufs && buf->rb_send_bufs[i]) { in rpcrdma_buffer_destroy()
1197 while (!list_empty(&buf->rb_mws)) { in rpcrdma_buffer_destroy()
1198 r = list_entry(buf->rb_mws.next, in rpcrdma_buffer_destroy()
1200 list_del(&r->mw_list); in rpcrdma_buffer_destroy()
1201 switch (ia->ri_memreg_strategy) { in rpcrdma_buffer_destroy()
1203 rc = ib_dereg_mr(r->r.frmr.fr_mr); in rpcrdma_buffer_destroy()
1209 ib_free_fast_reg_page_list(r->r.frmr.fr_pgl); in rpcrdma_buffer_destroy()
1212 rc = ib_dealloc_fmr(r->r.fmr); in rpcrdma_buffer_destroy()
1221 rc = ib_dealloc_mw(r->r.mw); in rpcrdma_buffer_destroy()
1233 buf->rb_send_bufs[i]->rl_handle, in rpcrdma_buffer_destroy()
1234 &buf->rb_send_bufs[i]->rl_iov); in rpcrdma_buffer_destroy()
1235 kfree(buf->rb_send_bufs[i]); in rpcrdma_buffer_destroy()
1239 kfree(buf->rb_pool); in rpcrdma_buffer_destroy()
1248 * *next* available buffer (non-NULL). They are incremented after
1259 spin_lock_irqsave(&buffers->rb_lock, flags); in rpcrdma_buffer_get()
1260 if (buffers->rb_send_index == buffers->rb_max_requests) { in rpcrdma_buffer_get()
1261 spin_unlock_irqrestore(&buffers->rb_lock, flags); in rpcrdma_buffer_get()
1266 req = buffers->rb_send_bufs[buffers->rb_send_index]; in rpcrdma_buffer_get()
1267 if (buffers->rb_send_index < buffers->rb_recv_index) { in rpcrdma_buffer_get()
1270 buffers->rb_recv_index - buffers->rb_send_index); in rpcrdma_buffer_get()
1271 req->rl_reply = NULL; in rpcrdma_buffer_get()
1273 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index]; in rpcrdma_buffer_get()
1274 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL; in rpcrdma_buffer_get()
1276 buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; in rpcrdma_buffer_get()
1277 if (!list_empty(&buffers->rb_mws)) { in rpcrdma_buffer_get()
1278 i = RPCRDMA_MAX_SEGS - 1; in rpcrdma_buffer_get()
1280 r = list_entry(buffers->rb_mws.next, in rpcrdma_buffer_get()
1282 list_del(&r->mw_list); in rpcrdma_buffer_get()
1283 req->rl_segments[i].mr_chunk.rl_mw = r; in rpcrdma_buffer_get()
1284 } while (--i >= 0); in rpcrdma_buffer_get()
1286 spin_unlock_irqrestore(&buffers->rb_lock, flags); in rpcrdma_buffer_get()
1292 * Pre-decrement counter/array index.
1297 struct rpcrdma_buffer *buffers = req->rl_buffer; in rpcrdma_buffer_put()
1302 BUG_ON(req->rl_nchunks != 0); in rpcrdma_buffer_put()
1303 spin_lock_irqsave(&buffers->rb_lock, flags); in rpcrdma_buffer_put()
1304 buffers->rb_send_bufs[--buffers->rb_send_index] = req; in rpcrdma_buffer_put()
1305 req->rl_niovs = 0; in rpcrdma_buffer_put()
1306 if (req->rl_reply) { in rpcrdma_buffer_put()
1307 buffers->rb_recv_bufs[--buffers->rb_recv_index] = req->rl_reply; in rpcrdma_buffer_put()
1308 init_waitqueue_head(&req->rl_reply->rr_unbind); in rpcrdma_buffer_put()
1309 req->rl_reply->rr_func = NULL; in rpcrdma_buffer_put()
1310 req->rl_reply = NULL; in rpcrdma_buffer_put()
1312 switch (ia->ri_memreg_strategy) { in rpcrdma_buffer_put()
1324 mw = &req->rl_segments[i].mr_chunk.rl_mw; in rpcrdma_buffer_put()
1325 list_add_tail(&(*mw)->mw_list, &buffers->rb_mws); in rpcrdma_buffer_put()
1328 list_add_tail(&req->rl_segments[0].mr_chunk.rl_mw->mw_list, in rpcrdma_buffer_put()
1329 &buffers->rb_mws); in rpcrdma_buffer_put()
1330 req->rl_segments[0].mr_chunk.rl_mw = NULL; in rpcrdma_buffer_put()
1335 spin_unlock_irqrestore(&buffers->rb_lock, flags); in rpcrdma_buffer_put()
1341 * Post-increment counter/array index.
1346 struct rpcrdma_buffer *buffers = req->rl_buffer; in rpcrdma_recv_buffer_get()
1349 if (req->rl_iov.length == 0) /* special case xprt_rdma_allocate() */ in rpcrdma_recv_buffer_get()
1350 buffers = ((struct rpcrdma_req *) buffers)->rl_buffer; in rpcrdma_recv_buffer_get()
1351 spin_lock_irqsave(&buffers->rb_lock, flags); in rpcrdma_recv_buffer_get()
1352 if (buffers->rb_recv_index < buffers->rb_max_requests) { in rpcrdma_recv_buffer_get()
1353 req->rl_reply = buffers->rb_recv_bufs[buffers->rb_recv_index]; in rpcrdma_recv_buffer_get()
1354 buffers->rb_recv_bufs[buffers->rb_recv_index++] = NULL; in rpcrdma_recv_buffer_get()
1356 spin_unlock_irqrestore(&buffers->rb_lock, flags); in rpcrdma_recv_buffer_get()
1362 * aborting unbinds. Pre-decrement counter/array index.
1367 struct rpcrdma_buffer *buffers = rep->rr_buffer; in rpcrdma_recv_buffer_put()
1370 rep->rr_func = NULL; in rpcrdma_recv_buffer_put()
1371 spin_lock_irqsave(&buffers->rb_lock, flags); in rpcrdma_recv_buffer_put()
1372 buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep; in rpcrdma_recv_buffer_put()
1373 spin_unlock_irqrestore(&buffers->rb_lock, flags); in rpcrdma_recv_buffer_put()
1377 * Wrappers for internal-use kmalloc memory registration, used by buffer code.
1389 * All memory passed here was kmalloc'ed, therefore phys-contiguous. in rpcrdma_register_internal()
1391 iov->addr = ib_dma_map_single(ia->ri_id->device, in rpcrdma_register_internal()
1393 iov->length = len; in rpcrdma_register_internal()
1395 if (ia->ri_have_dma_lkey) { in rpcrdma_register_internal()
1397 iov->lkey = ia->ri_dma_lkey; in rpcrdma_register_internal()
1399 } else if (ia->ri_bind_mem != NULL) { in rpcrdma_register_internal()
1401 iov->lkey = ia->ri_bind_mem->lkey; in rpcrdma_register_internal()
1405 ipb.addr = iov->addr; in rpcrdma_register_internal()
1406 ipb.size = iov->length; in rpcrdma_register_internal()
1407 mr = ib_reg_phys_mr(ia->ri_pd, &ipb, 1, in rpcrdma_register_internal()
1408 IB_ACCESS_LOCAL_WRITE, &iov->addr); in rpcrdma_register_internal()
1413 (unsigned long long)iov->addr, len); in rpcrdma_register_internal()
1421 iov->lkey = mr->lkey; in rpcrdma_register_internal()
1434 ib_dma_unmap_single(ia->ri_id->device, in rpcrdma_deregister_internal()
1435 iov->addr, iov->length, DMA_BIDIRECTIONAL); in rpcrdma_deregister_internal()
1453 seg->mr_dir = writing ? DMA_FROM_DEVICE : DMA_TO_DEVICE; in rpcrdma_map_one()
1454 seg->mr_dmalen = seg->mr_len; in rpcrdma_map_one()
1455 if (seg->mr_page) in rpcrdma_map_one()
1456 seg->mr_dma = ib_dma_map_page(ia->ri_id->device, in rpcrdma_map_one()
1457 seg->mr_page, offset_in_page(seg->mr_offset), in rpcrdma_map_one()
1458 seg->mr_dmalen, seg->mr_dir); in rpcrdma_map_one()
1460 seg->mr_dma = ib_dma_map_single(ia->ri_id->device, in rpcrdma_map_one()
1461 seg->mr_offset, in rpcrdma_map_one()
1462 seg->mr_dmalen, seg->mr_dir); in rpcrdma_map_one()
1463 if (ib_dma_mapping_error(ia->ri_id->device, seg->mr_dma)) { in rpcrdma_map_one()
1466 (unsigned long long)seg->mr_dma, in rpcrdma_map_one()
1467 seg->mr_offset, seg->mr_dmalen); in rpcrdma_map_one()
1474 if (seg->mr_page) in rpcrdma_unmap_one()
1475 ib_dma_unmap_page(ia->ri_id->device, in rpcrdma_unmap_one()
1476 seg->mr_dma, seg->mr_dmalen, seg->mr_dir); in rpcrdma_unmap_one()
1478 ib_dma_unmap_single(ia->ri_id->device, in rpcrdma_unmap_one()
1479 seg->mr_dma, seg->mr_dmalen, seg->mr_dir); in rpcrdma_unmap_one()
1494 pageoff = offset_in_page(seg1->mr_offset); in rpcrdma_register_frmr_external()
1495 seg1->mr_offset -= pageoff; /* start of page */ in rpcrdma_register_frmr_external()
1496 seg1->mr_len += pageoff; in rpcrdma_register_frmr_external()
1497 len = -pageoff; in rpcrdma_register_frmr_external()
1502 seg1->mr_chunk.rl_mw->r.frmr.fr_pgl->page_list[i] = seg->mr_dma; in rpcrdma_register_frmr_external()
1503 len += seg->mr_len; in rpcrdma_register_frmr_external()
1504 BUG_ON(seg->mr_len > PAGE_SIZE); in rpcrdma_register_frmr_external()
1508 if ((i < *nsegs && offset_in_page(seg->mr_offset)) || in rpcrdma_register_frmr_external()
1509 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) in rpcrdma_register_frmr_external()
1513 __func__, seg1->mr_chunk.rl_mw, i); in rpcrdma_register_frmr_external()
1515 if (unlikely(seg1->mr_chunk.rl_mw->r.frmr.state == FRMR_IS_VALID)) { in rpcrdma_register_frmr_external()
1518 seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey); in rpcrdma_register_frmr_external()
1521 invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; in rpcrdma_register_frmr_external()
1526 seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; in rpcrdma_register_frmr_external()
1527 DECR_CQCOUNT(&r_xprt->rx_ep); in rpcrdma_register_frmr_external()
1533 key = (u8)(seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey & 0x000000FF); in rpcrdma_register_frmr_external()
1534 ib_update_fast_reg_key(seg1->mr_chunk.rl_mw->r.frmr.fr_mr, ++key); in rpcrdma_register_frmr_external()
1538 frmr_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; in rpcrdma_register_frmr_external()
1541 frmr_wr.wr.fast_reg.iova_start = seg1->mr_dma; in rpcrdma_register_frmr_external()
1542 frmr_wr.wr.fast_reg.page_list = seg1->mr_chunk.rl_mw->r.frmr.fr_pgl; in rpcrdma_register_frmr_external()
1550 frmr_wr.wr.fast_reg.rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; in rpcrdma_register_frmr_external()
1551 DECR_CQCOUNT(&r_xprt->rx_ep); in rpcrdma_register_frmr_external()
1553 rc = ib_post_send(ia->ri_id->qp, post_wr, &bad_wr); in rpcrdma_register_frmr_external()
1558 while (i--) in rpcrdma_register_frmr_external()
1559 rpcrdma_unmap_one(ia, --seg); in rpcrdma_register_frmr_external()
1561 seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; in rpcrdma_register_frmr_external()
1562 seg1->mr_base = seg1->mr_dma + pageoff; in rpcrdma_register_frmr_external()
1563 seg1->mr_nsegs = i; in rpcrdma_register_frmr_external()
1564 seg1->mr_len = len; in rpcrdma_register_frmr_external()
1578 while (seg1->mr_nsegs--) in rpcrdma_deregister_frmr_external()
1582 invalidate_wr.wr_id = (unsigned long)(void *)seg1->mr_chunk.rl_mw; in rpcrdma_deregister_frmr_external()
1585 invalidate_wr.ex.invalidate_rkey = seg1->mr_chunk.rl_mw->r.frmr.fr_mr->rkey; in rpcrdma_deregister_frmr_external()
1586 DECR_CQCOUNT(&r_xprt->rx_ep); in rpcrdma_deregister_frmr_external()
1588 rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); in rpcrdma_deregister_frmr_external()
1603 pageoff = offset_in_page(seg1->mr_offset); in rpcrdma_register_fmr_external()
1604 seg1->mr_offset -= pageoff; /* start of page */ in rpcrdma_register_fmr_external()
1605 seg1->mr_len += pageoff; in rpcrdma_register_fmr_external()
1606 len = -pageoff; in rpcrdma_register_fmr_external()
1611 physaddrs[i] = seg->mr_dma; in rpcrdma_register_fmr_external()
1612 len += seg->mr_len; in rpcrdma_register_fmr_external()
1616 if ((i < *nsegs && offset_in_page(seg->mr_offset)) || in rpcrdma_register_fmr_external()
1617 offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len)) in rpcrdma_register_fmr_external()
1620 rc = ib_map_phys_fmr(seg1->mr_chunk.rl_mw->r.fmr, in rpcrdma_register_fmr_external()
1621 physaddrs, i, seg1->mr_dma); in rpcrdma_register_fmr_external()
1625 len, (unsigned long long)seg1->mr_dma, in rpcrdma_register_fmr_external()
1627 while (i--) in rpcrdma_register_fmr_external()
1628 rpcrdma_unmap_one(ia, --seg); in rpcrdma_register_fmr_external()
1630 seg1->mr_rkey = seg1->mr_chunk.rl_mw->r.fmr->rkey; in rpcrdma_register_fmr_external()
1631 seg1->mr_base = seg1->mr_dma + pageoff; in rpcrdma_register_fmr_external()
1632 seg1->mr_nsegs = i; in rpcrdma_register_fmr_external()
1633 seg1->mr_len = len; in rpcrdma_register_fmr_external()
1647 list_add(&seg1->mr_chunk.rl_mw->r.fmr->list, &l); in rpcrdma_deregister_fmr_external()
1649 while (seg1->mr_nsegs--) in rpcrdma_deregister_fmr_external()
1669 param.mr = ia->ri_bind_mem; in rpcrdma_register_memwin_external()
1671 param.addr = seg->mr_dma; in rpcrdma_register_memwin_external()
1672 param.length = seg->mr_len; in rpcrdma_register_memwin_external()
1676 DECR_CQCOUNT(&r_xprt->rx_ep); in rpcrdma_register_memwin_external()
1677 rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); in rpcrdma_register_memwin_external()
1681 __func__, seg->mr_len, in rpcrdma_register_memwin_external()
1682 (unsigned long long)seg->mr_dma, rc); in rpcrdma_register_memwin_external()
1685 seg->mr_rkey = seg->mr_chunk.rl_mw->r.mw->rkey; in rpcrdma_register_memwin_external()
1686 seg->mr_base = param.addr; in rpcrdma_register_memwin_external()
1687 seg->mr_nsegs = 1; in rpcrdma_register_memwin_external()
1701 BUG_ON(seg->mr_nsegs != 1); in rpcrdma_deregister_memwin_external()
1702 param.mr = ia->ri_bind_mem; in rpcrdma_deregister_memwin_external()
1709 INIT_CQCOUNT(&r_xprt->rx_ep); in rpcrdma_deregister_memwin_external()
1713 DECR_CQCOUNT(&r_xprt->rx_ep); in rpcrdma_deregister_memwin_external()
1715 rc = ib_bind_mw(ia->ri_id->qp, seg->mr_chunk.rl_mw->r.mw, ¶m); in rpcrdma_deregister_memwin_external()
1739 ipb[i].addr = seg->mr_dma; in rpcrdma_register_default_external()
1740 ipb[i].size = seg->mr_len; in rpcrdma_register_default_external()
1741 len += seg->mr_len; in rpcrdma_register_default_external()
1745 if ((i < *nsegs && offset_in_page(seg->mr_offset)) || in rpcrdma_register_default_external()
1746 offset_in_page((seg-1)->mr_offset+(seg-1)->mr_len)) in rpcrdma_register_default_external()
1749 seg1->mr_base = seg1->mr_dma; in rpcrdma_register_default_external()
1750 seg1->mr_chunk.rl_mr = ib_reg_phys_mr(ia->ri_pd, in rpcrdma_register_default_external()
1751 ipb, i, mem_priv, &seg1->mr_base); in rpcrdma_register_default_external()
1752 if (IS_ERR(seg1->mr_chunk.rl_mr)) { in rpcrdma_register_default_external()
1753 rc = PTR_ERR(seg1->mr_chunk.rl_mr); in rpcrdma_register_default_external()
1757 (unsigned long long)seg1->mr_dma, i, rc); in rpcrdma_register_default_external()
1758 while (i--) in rpcrdma_register_default_external()
1759 rpcrdma_unmap_one(ia, --seg); in rpcrdma_register_default_external()
1761 seg1->mr_rkey = seg1->mr_chunk.rl_mr->rkey; in rpcrdma_register_default_external()
1762 seg1->mr_nsegs = i; in rpcrdma_register_default_external()
1763 seg1->mr_len = len; in rpcrdma_register_default_external()
1776 rc = ib_dereg_mr(seg1->mr_chunk.rl_mr); in rpcrdma_deregister_default_external()
1777 seg1->mr_chunk.rl_mr = NULL; in rpcrdma_deregister_default_external()
1778 while (seg1->mr_nsegs--) in rpcrdma_deregister_default_external()
1790 struct rpcrdma_ia *ia = &r_xprt->rx_ia; in rpcrdma_register_external()
1793 switch (ia->ri_memreg_strategy) { in rpcrdma_register_external()
1798 seg->mr_rkey = ia->ri_bind_mem->rkey; in rpcrdma_register_external()
1799 seg->mr_base = seg->mr_dma; in rpcrdma_register_external()
1800 seg->mr_nsegs = 1; in rpcrdma_register_external()
1827 return -1; in rpcrdma_register_external()
1836 struct rpcrdma_ia *ia = &r_xprt->rx_ia; in rpcrdma_deregister_external()
1837 int nsegs = seg->mr_nsegs, rc; in rpcrdma_deregister_external()
1839 switch (ia->ri_memreg_strategy) { in rpcrdma_deregister_external()
1868 void (*func)(struct rpcrdma_rep *) = rep->rr_func; in rpcrdma_deregister_external()
1869 rep->rr_func = NULL; in rpcrdma_deregister_external()
1886 struct rpcrdma_rep *rep = req->rl_reply; in rpcrdma_ep_post()
1893 req->rl_reply = NULL; in rpcrdma_ep_post()
1898 send_wr.sg_list = req->rl_send_iov; in rpcrdma_ep_post()
1899 send_wr.num_sge = req->rl_niovs; in rpcrdma_ep_post()
1902 ib_dma_sync_single_for_device(ia->ri_id->device, in rpcrdma_ep_post()
1903 req->rl_send_iov[3].addr, req->rl_send_iov[3].length, in rpcrdma_ep_post()
1905 ib_dma_sync_single_for_device(ia->ri_id->device, in rpcrdma_ep_post()
1906 req->rl_send_iov[1].addr, req->rl_send_iov[1].length, in rpcrdma_ep_post()
1908 ib_dma_sync_single_for_device(ia->ri_id->device, in rpcrdma_ep_post()
1909 req->rl_send_iov[0].addr, req->rl_send_iov[0].length, in rpcrdma_ep_post()
1919 rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); in rpcrdma_ep_post()
1940 recv_wr.sg_list = &rep->rr_iov; in rpcrdma_ep_post_recv()
1943 ib_dma_sync_single_for_cpu(ia->ri_id->device, in rpcrdma_ep_post_recv()
1944 rep->rr_iov.addr, rep->rr_iov.length, DMA_BIDIRECTIONAL); in rpcrdma_ep_post_recv()
1947 rc = ib_post_recv(ia->ri_id->qp, &recv_wr, &recv_wr_fail); in rpcrdma_ep_post_recv()