Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0-or-later
60 /* SMBD minimum receive size and fragmented sized defined in [MS-SMBD] */
72 /* No need to retry on Receiver Not Ready since SMBD manages credits */
77 * as defined in [MS-SMBD] 3.1.1.1
89 /* The maximum fragmented upper-layer payload receive size supported */
92 /* The maximum single-message size which can be received */
163 if (info->transport_status == SMBD_CONNECTED) { in smbd_disconnect_rdma_work()
164 info->transport_status = SMBD_DISCONNECTING; in smbd_disconnect_rdma_work()
165 rdma_disconnect(info->id); in smbd_disconnect_rdma_work()
171 queue_work(info->workqueue, &info->disconnect_work); in smbd_disconnect_rdma_connection()
178 struct smbd_connection *info = id->context; in smbd_conn_upcall()
181 event->event, event->status); in smbd_conn_upcall()
183 switch (event->event) { in smbd_conn_upcall()
186 info->ri_rc = 0; in smbd_conn_upcall()
187 complete(&info->ri_done); in smbd_conn_upcall()
191 info->ri_rc = -EHOSTUNREACH; in smbd_conn_upcall()
192 complete(&info->ri_done); in smbd_conn_upcall()
196 info->ri_rc = -ENETUNREACH; in smbd_conn_upcall()
197 complete(&info->ri_done); in smbd_conn_upcall()
201 log_rdma_event(INFO, "connected event=%d\n", event->event); in smbd_conn_upcall()
202 info->transport_status = SMBD_CONNECTED; in smbd_conn_upcall()
203 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
209 log_rdma_event(INFO, "connecting failed event=%d\n", event->event); in smbd_conn_upcall()
210 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
211 wake_up_interruptible(&info->conn_wait); in smbd_conn_upcall()
217 if (info->transport_status == SMBD_NEGOTIATE_FAILED) { in smbd_conn_upcall()
218 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
219 wake_up(&info->conn_wait); in smbd_conn_upcall()
223 info->transport_status = SMBD_DISCONNECTED; in smbd_conn_upcall()
224 wake_up_interruptible(&info->disconn_wait); in smbd_conn_upcall()
225 wake_up_interruptible(&info->wait_reassembly_queue); in smbd_conn_upcall()
226 wake_up_interruptible_all(&info->wait_send_queue); in smbd_conn_upcall()
243 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall()
245 switch (event->event) { in smbd_qp_async_error_upcall()
257 return (void *)request->packet; in smbd_request_payload()
262 return (void *)response->packet; in smbd_response_payload()
266 static void send_done(struct ib_cq *cq, struct ib_wc *wc) in send_done() argument
270 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done()
272 log_rdma_send(INFO, "smbd_request %p completed wc->status=%d\n", in send_done()
273 request, wc->status); in send_done()
275 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { in send_done()
276 log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n", in send_done()
277 wc->status, wc->opcode); in send_done()
278 smbd_disconnect_rdma_connection(request->info); in send_done()
281 for (i = 0; i < request->num_sge; i++) in send_done()
282 ib_dma_unmap_single(request->info->id->device, in send_done()
283 request->sge[i].addr, in send_done()
284 request->sge[i].length, in send_done()
287 if (atomic_dec_and_test(&request->info->send_pending)) in send_done()
288 wake_up(&request->info->wait_send_pending); in send_done()
290 wake_up(&request->info->wait_post_send); in send_done()
292 mempool_free(request, request->info->request_mempool); in send_done()
298 resp->min_version, resp->max_version, in dump_smbd_negotiate_resp()
299 resp->negotiated_version, resp->credits_requested, in dump_smbd_negotiate_resp()
300 resp->credits_granted, resp->status, in dump_smbd_negotiate_resp()
301 resp->max_readwrite_size, resp->preferred_send_size, in dump_smbd_negotiate_resp()
302 resp->max_receive_size, resp->max_fragmented_size); in dump_smbd_negotiate_resp()
306 * Process a negotiation response message, according to [MS-SMBD]3.1.5.7
313 struct smbd_connection *info = response->info; in process_negotiation_response()
322 if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) { in process_negotiation_response()
324 le16_to_cpu(packet->negotiated_version)); in process_negotiation_response()
327 info->protocol = le16_to_cpu(packet->negotiated_version); in process_negotiation_response()
329 if (packet->credits_requested == 0) { in process_negotiation_response()
333 info->receive_credit_target = le16_to_cpu(packet->credits_requested); in process_negotiation_response()
335 if (packet->credits_granted == 0) { in process_negotiation_response()
339 atomic_set(&info->send_credits, le16_to_cpu(packet->credits_granted)); in process_negotiation_response()
341 atomic_set(&info->receive_credits, 0); in process_negotiation_response()
343 if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) { in process_negotiation_response()
345 le32_to_cpu(packet->preferred_send_size)); in process_negotiation_response()
348 info->max_receive_size = le32_to_cpu(packet->preferred_send_size); in process_negotiation_response()
350 if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) { in process_negotiation_response()
352 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
355 info->max_send_size = min_t(int, info->max_send_size, in process_negotiation_response()
356 le32_to_cpu(packet->max_receive_size)); in process_negotiation_response()
358 if (le32_to_cpu(packet->max_fragmented_size) < in process_negotiation_response()
361 le32_to_cpu(packet->max_fragmented_size)); in process_negotiation_response()
364 info->max_fragmented_send_size = in process_negotiation_response()
365 le32_to_cpu(packet->max_fragmented_size); in process_negotiation_response()
366 info->rdma_readwrite_threshold = in process_negotiation_response()
367 rdma_readwrite_threshold > info->max_fragmented_send_size ? in process_negotiation_response()
368 info->max_fragmented_send_size : in process_negotiation_response()
372 info->max_readwrite_size = min_t(u32, in process_negotiation_response()
373 le32_to_cpu(packet->max_readwrite_size), in process_negotiation_response()
374 info->max_frmr_depth * PAGE_SIZE); in process_negotiation_response()
375 info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE; in process_negotiation_response()
390 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_credits()
391 wake_up(&info->wait_receive_queues); in smbd_post_send_credits()
395 if (info->receive_credit_target > in smbd_post_send_credits()
396 atomic_read(&info->receive_credits)) { in smbd_post_send_credits()
411 response->type = SMBD_TRANSFER_DATA; in smbd_post_send_credits()
412 response->first_segment = false; in smbd_post_send_credits()
425 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_credits()
426 info->new_credits_offered += ret; in smbd_post_send_credits()
427 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_credits()
429 /* Promptly send an immediate packet as defined in [MS-SMBD] 3.1.1.1 */ in smbd_post_send_credits()
430 info->send_immediate = true; in smbd_post_send_credits()
431 if (atomic_read(&info->receive_credits) < in smbd_post_send_credits()
432 info->receive_credit_target - 1) { in smbd_post_send_credits()
433 if (info->keep_alive_requested == KEEP_ALIVE_PENDING || in smbd_post_send_credits()
434 info->send_immediate) { in smbd_post_send_credits()
442 static void recv_done(struct ib_cq *cq, struct ib_wc *wc) in recv_done() argument
446 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done()
447 struct smbd_connection *info = response->info; in recv_done()
450 log_rdma_recv(INFO, "response=%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%x\n", in recv_done()
451 response, response->type, wc->status, wc->opcode, in recv_done()
452 wc->byte_len, wc->pkey_index); in recv_done()
454 if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) { in recv_done()
455 log_rdma_recv(INFO, "wc->status=%d opcode=%d\n", in recv_done()
456 wc->status, wc->opcode); in recv_done()
462 wc->qp->device, in recv_done()
463 response->sge.addr, in recv_done()
464 response->sge.length, in recv_done()
467 switch (response->type) { in recv_done()
471 info->full_packet_received = true; in recv_done()
472 info->negotiate_done = in recv_done()
473 process_negotiation_response(response, wc->byte_len); in recv_done()
474 complete(&info->negotiate_completion); in recv_done()
480 data_length = le32_to_cpu(data_transfer->data_length); in recv_done()
487 if (info->full_packet_received) in recv_done()
488 response->first_segment = true; in recv_done()
490 if (le32_to_cpu(data_transfer->remaining_data_length)) in recv_done()
491 info->full_packet_received = false; in recv_done()
493 info->full_packet_received = true; in recv_done()
503 wake_up_interruptible(&info->wait_reassembly_queue); in recv_done()
505 atomic_dec(&info->receive_credits); in recv_done()
506 info->receive_credit_target = in recv_done()
507 le16_to_cpu(data_transfer->credits_requested); in recv_done()
508 if (le16_to_cpu(data_transfer->credits_granted)) { in recv_done()
509 atomic_add(le16_to_cpu(data_transfer->credits_granted), in recv_done()
510 &info->send_credits); in recv_done()
515 wake_up_interruptible(&info->wait_send_queue); in recv_done()
519 le16_to_cpu(data_transfer->flags), in recv_done()
520 le32_to_cpu(data_transfer->data_offset), in recv_done()
521 le32_to_cpu(data_transfer->data_length), in recv_done()
522 le32_to_cpu(data_transfer->remaining_data_length)); in recv_done()
525 info->keep_alive_requested = KEEP_ALIVE_NONE; in recv_done()
526 if (le16_to_cpu(data_transfer->flags) & in recv_done()
528 info->keep_alive_requested = KEEP_ALIVE_PENDING; in recv_done()
535 "unexpected response type=%d\n", response->type); in recv_done()
558 if (dstaddr->sa_family == AF_INET6) in smbd_create_id()
559 sport = &((struct sockaddr_in6 *)dstaddr)->sin6_port; in smbd_create_id()
561 sport = &((struct sockaddr_in *)dstaddr)->sin_port; in smbd_create_id()
565 init_completion(&info->ri_done); in smbd_create_id()
566 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
575 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
576 rc = info->ri_rc; in smbd_create_id()
582 info->ri_rc = -ETIMEDOUT; in smbd_create_id()
589 &info->ri_done, msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT)); in smbd_create_id()
590 rc = info->ri_rc; in smbd_create_id()
610 if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) in frwr_is_supported()
612 if (attrs->max_fast_reg_page_list_len == 0) in frwr_is_supported()
623 info->id = smbd_create_id(info, dstaddr, port); in smbd_ia_open()
624 if (IS_ERR(info->id)) { in smbd_ia_open()
625 rc = PTR_ERR(info->id); in smbd_ia_open()
629 if (!frwr_is_supported(&info->id->device->attrs)) { in smbd_ia_open()
632 info->id->device->attrs.device_cap_flags, in smbd_ia_open()
633 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
634 rc = -EPROTONOSUPPORT; in smbd_ia_open()
637 info->max_frmr_depth = min_t(int, in smbd_ia_open()
639 info->id->device->attrs.max_fast_reg_page_list_len); in smbd_ia_open()
640 info->mr_type = IB_MR_TYPE_MEM_REG; in smbd_ia_open()
641 if (info->id->device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) in smbd_ia_open()
642 info->mr_type = IB_MR_TYPE_SG_GAPS; in smbd_ia_open()
644 info->pd = ib_alloc_pd(info->id->device, 0); in smbd_ia_open()
645 if (IS_ERR(info->pd)) { in smbd_ia_open()
646 rc = PTR_ERR(info->pd); in smbd_ia_open()
654 rdma_destroy_id(info->id); in smbd_ia_open()
655 info->id = NULL; in smbd_ia_open()
663 * The negotiation procedure is in [MS-SMBD] 3.1.5.2 and 3.1.5.3
670 int rc = -ENOMEM; in smbd_post_send_negotiate_req()
674 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_negotiate_req()
678 request->info = info; in smbd_post_send_negotiate_req()
681 packet->min_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
682 packet->max_version = cpu_to_le16(SMBD_V1); in smbd_post_send_negotiate_req()
683 packet->reserved = 0; in smbd_post_send_negotiate_req()
684 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_negotiate_req()
685 packet->preferred_send_size = cpu_to_le32(info->max_send_size); in smbd_post_send_negotiate_req()
686 packet->max_receive_size = cpu_to_le32(info->max_receive_size); in smbd_post_send_negotiate_req()
687 packet->max_fragmented_size = in smbd_post_send_negotiate_req()
688 cpu_to_le32(info->max_fragmented_recv_size); in smbd_post_send_negotiate_req()
690 request->num_sge = 1; in smbd_post_send_negotiate_req()
691 request->sge[0].addr = ib_dma_map_single( in smbd_post_send_negotiate_req()
692 info->id->device, (void *)packet, in smbd_post_send_negotiate_req()
694 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_negotiate_req()
695 rc = -EIO; in smbd_post_send_negotiate_req()
699 request->sge[0].length = sizeof(*packet); in smbd_post_send_negotiate_req()
700 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_negotiate_req()
703 info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
704 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
706 request->cqe.done = send_done; in smbd_post_send_negotiate_req()
709 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req()
710 send_wr.sg_list = request->sge; in smbd_post_send_negotiate_req()
711 send_wr.num_sge = request->num_sge; in smbd_post_send_negotiate_req()
716 request->sge[0].addr, in smbd_post_send_negotiate_req()
717 request->sge[0].length, request->sge[0].lkey); in smbd_post_send_negotiate_req()
719 atomic_inc(&info->send_pending); in smbd_post_send_negotiate_req()
720 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send_negotiate_req()
726 atomic_dec(&info->send_pending); in smbd_post_send_negotiate_req()
727 ib_dma_unmap_single(info->id->device, request->sge[0].addr, in smbd_post_send_negotiate_req()
728 request->sge[0].length, DMA_TO_DEVICE); in smbd_post_send_negotiate_req()
733 mempool_free(request, info->request_mempool); in smbd_post_send_negotiate_req()
739 * This implements [MS-SMBD] 3.1.5.9
749 spin_lock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
750 new_credits = info->new_credits_offered; in manage_credits_prior_sending()
751 info->new_credits_offered = 0; in manage_credits_prior_sending()
752 spin_unlock(&info->lock_new_credits_offered); in manage_credits_prior_sending()
768 if (info->keep_alive_requested == KEEP_ALIVE_PENDING) { in manage_keep_alive_before_sending()
769 info->keep_alive_requested = KEEP_ALIVE_SENT; in manage_keep_alive_before_sending()
782 for (i = 0; i < request->num_sge; i++) { in smbd_post_send()
785 i, request->sge[i].addr, request->sge[i].length); in smbd_post_send()
787 info->id->device, in smbd_post_send()
788 request->sge[i].addr, in smbd_post_send()
789 request->sge[i].length, in smbd_post_send()
793 request->cqe.done = send_done; in smbd_post_send()
796 send_wr.wr_cqe = &request->cqe; in smbd_post_send()
797 send_wr.sg_list = request->sge; in smbd_post_send()
798 send_wr.num_sge = request->num_sge; in smbd_post_send()
802 rc = ib_post_send(info->id->qp, &send_wr, NULL); in smbd_post_send()
806 rc = -EAGAIN; in smbd_post_send()
809 mod_delayed_work(info->workqueue, &info->idle_timer_work, in smbd_post_send()
810 info->keep_alive_interval*HZ); in smbd_post_send()
828 rc = wait_event_interruptible(info->wait_send_queue, in smbd_post_send_sgl()
829 atomic_read(&info->send_credits) > 0 || in smbd_post_send_sgl()
830 info->transport_status != SMBD_CONNECTED); in smbd_post_send_sgl()
834 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_sgl()
836 rc = -EAGAIN; in smbd_post_send_sgl()
839 if (unlikely(atomic_dec_return(&info->send_credits) < 0)) { in smbd_post_send_sgl()
840 atomic_inc(&info->send_credits); in smbd_post_send_sgl()
845 wait_event(info->wait_post_send, in smbd_post_send_sgl()
846 atomic_read(&info->send_pending) < info->send_credit_target || in smbd_post_send_sgl()
847 info->transport_status != SMBD_CONNECTED); in smbd_post_send_sgl()
849 if (info->transport_status != SMBD_CONNECTED) { in smbd_post_send_sgl()
851 rc = -EAGAIN; in smbd_post_send_sgl()
855 if (unlikely(atomic_inc_return(&info->send_pending) > in smbd_post_send_sgl()
856 info->send_credit_target)) { in smbd_post_send_sgl()
857 atomic_dec(&info->send_pending); in smbd_post_send_sgl()
861 request = mempool_alloc(info->request_mempool, GFP_KERNEL); in smbd_post_send_sgl()
863 rc = -ENOMEM; in smbd_post_send_sgl()
867 request->info = info; in smbd_post_send_sgl()
871 packet->credits_requested = cpu_to_le16(info->send_credit_target); in smbd_post_send_sgl()
874 atomic_add(new_credits, &info->receive_credits); in smbd_post_send_sgl()
875 packet->credits_granted = cpu_to_le16(new_credits); in smbd_post_send_sgl()
877 info->send_immediate = false; in smbd_post_send_sgl()
879 packet->flags = 0; in smbd_post_send_sgl()
881 packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED); in smbd_post_send_sgl()
883 packet->reserved = 0; in smbd_post_send_sgl()
885 packet->data_offset = 0; in smbd_post_send_sgl()
887 packet->data_offset = cpu_to_le32(24); in smbd_post_send_sgl()
888 packet->data_length = cpu_to_le32(data_length); in smbd_post_send_sgl()
889 packet->remaining_data_length = cpu_to_le32(remaining_data_length); in smbd_post_send_sgl()
890 packet->padding = 0; in smbd_post_send_sgl()
893 le16_to_cpu(packet->credits_requested), in smbd_post_send_sgl()
894 le16_to_cpu(packet->credits_granted), in smbd_post_send_sgl()
895 le32_to_cpu(packet->data_offset), in smbd_post_send_sgl()
896 le32_to_cpu(packet->data_length), in smbd_post_send_sgl()
897 le32_to_cpu(packet->remaining_data_length)); in smbd_post_send_sgl()
905 request->num_sge = 1; in smbd_post_send_sgl()
906 request->sge[0].addr = ib_dma_map_single(info->id->device, in smbd_post_send_sgl()
910 if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) { in smbd_post_send_sgl()
911 rc = -EIO; in smbd_post_send_sgl()
912 request->sge[0].addr = 0; in smbd_post_send_sgl()
916 request->sge[0].length = header_length; in smbd_post_send_sgl()
917 request->sge[0].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
922 request->sge[i+1].addr = in smbd_post_send_sgl()
923 ib_dma_map_page(info->id->device, sg_page(sg), in smbd_post_send_sgl()
924 sg->offset, sg->length, DMA_TO_DEVICE); in smbd_post_send_sgl()
926 info->id->device, request->sge[i+1].addr)) { in smbd_post_send_sgl()
927 rc = -EIO; in smbd_post_send_sgl()
928 request->sge[i+1].addr = 0; in smbd_post_send_sgl()
931 request->sge[i+1].length = sg->length; in smbd_post_send_sgl()
932 request->sge[i+1].lkey = info->pd->local_dma_lkey; in smbd_post_send_sgl()
933 request->num_sge++; in smbd_post_send_sgl()
941 for (i = 0; i < request->num_sge; i++) in smbd_post_send_sgl()
942 if (request->sge[i].addr) in smbd_post_send_sgl()
943 ib_dma_unmap_single(info->id->device, in smbd_post_send_sgl()
944 request->sge[i].addr, in smbd_post_send_sgl()
945 request->sge[i].length, in smbd_post_send_sgl()
947 mempool_free(request, info->request_mempool); in smbd_post_send_sgl()
950 spin_lock(&info->lock_new_credits_offered); in smbd_post_send_sgl()
951 info->new_credits_offered += new_credits; in smbd_post_send_sgl()
952 spin_unlock(&info->lock_new_credits_offered); in smbd_post_send_sgl()
953 atomic_sub(new_credits, &info->receive_credits); in smbd_post_send_sgl()
956 if (atomic_dec_and_test(&info->send_pending)) in smbd_post_send_sgl()
957 wake_up(&info->wait_send_pending); in smbd_post_send_sgl()
961 atomic_inc(&info->send_credits); in smbd_post_send_sgl()
988 * while there is no upper layer payload to send at the time
992 info->count_send_empty++; in smbd_post_send_empty()
1013 return -EINVAL; in smbd_post_send_data()
1034 int rc = -EIO; in smbd_post_recv()
1036 response->sge.addr = ib_dma_map_single( in smbd_post_recv()
1037 info->id->device, response->packet, in smbd_post_recv()
1038 info->max_receive_size, DMA_FROM_DEVICE); in smbd_post_recv()
1039 if (ib_dma_mapping_error(info->id->device, response->sge.addr)) in smbd_post_recv()
1042 response->sge.length = info->max_receive_size; in smbd_post_recv()
1043 response->sge.lkey = info->pd->local_dma_lkey; in smbd_post_recv()
1045 response->cqe.done = recv_done; in smbd_post_recv()
1047 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv()
1049 recv_wr.sg_list = &response->sge; in smbd_post_recv()
1052 rc = ib_post_recv(info->id->qp, &recv_wr, NULL); in smbd_post_recv()
1054 ib_dma_unmap_single(info->id->device, response->sge.addr, in smbd_post_recv()
1055 response->sge.length, DMA_FROM_DEVICE); in smbd_post_recv()
1063 /* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
1069 response->type = SMBD_NEGOTIATE_RESP; in smbd_negotiate()
1072 rc, response->sge.addr, in smbd_negotiate()
1073 response->sge.length, response->sge.lkey); in smbd_negotiate()
1077 init_completion(&info->negotiate_completion); in smbd_negotiate()
1078 info->negotiate_done = false; in smbd_negotiate()
1084 &info->negotiate_completion, SMBD_NEGOTIATE_TIMEOUT * HZ); in smbd_negotiate()
1087 if (info->negotiate_done) in smbd_negotiate()
1091 rc = -ETIMEDOUT; in smbd_negotiate()
1092 else if (rc == -ERESTARTSYS) in smbd_negotiate()
1093 rc = -EINTR; in smbd_negotiate()
1095 rc = -ENOTCONN; in smbd_negotiate()
1103 spin_lock(&info->empty_packet_queue_lock); in put_empty_packet()
1104 list_add_tail(&response->list, &info->empty_packet_queue); in put_empty_packet()
1105 info->count_empty_packet_queue++; in put_empty_packet()
1106 spin_unlock(&info->empty_packet_queue_lock); in put_empty_packet()
1108 queue_work(info->workqueue, &info->post_send_credits_work); in put_empty_packet()
1112 * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
1126 spin_lock(&info->reassembly_queue_lock); in enqueue_reassembly()
1127 list_add_tail(&response->list, &info->reassembly_queue); in enqueue_reassembly()
1128 info->reassembly_queue_length++; in enqueue_reassembly()
1136 info->reassembly_data_length += data_length; in enqueue_reassembly()
1137 spin_unlock(&info->reassembly_queue_lock); in enqueue_reassembly()
1138 info->count_reassembly_queue++; in enqueue_reassembly()
1139 info->count_enqueue_reassembly_queue++; in enqueue_reassembly()
1151 if (!list_empty(&info->reassembly_queue)) { in _get_first_reassembly()
1153 &info->reassembly_queue, in _get_first_reassembly()
1165 spin_lock_irqsave(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1166 if (!list_empty(&info->empty_packet_queue)) { in get_empty_queue_buffer()
1168 &info->empty_packet_queue, in get_empty_queue_buffer()
1170 list_del(&ret->list); in get_empty_queue_buffer()
1171 info->count_empty_packet_queue--; in get_empty_queue_buffer()
1173 spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags); in get_empty_queue_buffer()
1181 * pre-allocated in advance.
1189 spin_lock_irqsave(&info->receive_queue_lock, flags); in get_receive_buffer()
1190 if (!list_empty(&info->receive_queue)) { in get_receive_buffer()
1192 &info->receive_queue, in get_receive_buffer()
1194 list_del(&ret->list); in get_receive_buffer()
1195 info->count_receive_queue--; in get_receive_buffer()
1196 info->count_get_receive_buffer++; in get_receive_buffer()
1198 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in get_receive_buffer()
1214 ib_dma_unmap_single(info->id->device, response->sge.addr, in put_receive_buffer()
1215 response->sge.length, DMA_FROM_DEVICE); in put_receive_buffer()
1217 spin_lock_irqsave(&info->receive_queue_lock, flags); in put_receive_buffer()
1218 list_add_tail(&response->list, &info->receive_queue); in put_receive_buffer()
1219 info->count_receive_queue++; in put_receive_buffer()
1220 info->count_put_receive_buffer++; in put_receive_buffer()
1221 spin_unlock_irqrestore(&info->receive_queue_lock, flags); in put_receive_buffer()
1223 queue_work(info->workqueue, &info->post_send_credits_work); in put_receive_buffer()
1232 INIT_LIST_HEAD(&info->reassembly_queue); in allocate_receive_buffers()
1233 spin_lock_init(&info->reassembly_queue_lock); in allocate_receive_buffers()
1234 info->reassembly_data_length = 0; in allocate_receive_buffers()
1235 info->reassembly_queue_length = 0; in allocate_receive_buffers()
1237 INIT_LIST_HEAD(&info->receive_queue); in allocate_receive_buffers()
1238 spin_lock_init(&info->receive_queue_lock); in allocate_receive_buffers()
1239 info->count_receive_queue = 0; in allocate_receive_buffers()
1241 INIT_LIST_HEAD(&info->empty_packet_queue); in allocate_receive_buffers()
1242 spin_lock_init(&info->empty_packet_queue_lock); in allocate_receive_buffers()
1243 info->count_empty_packet_queue = 0; in allocate_receive_buffers()
1245 init_waitqueue_head(&info->wait_receive_queues); in allocate_receive_buffers()
1248 response = mempool_alloc(info->response_mempool, GFP_KERNEL); in allocate_receive_buffers()
1252 response->info = info; in allocate_receive_buffers()
1253 list_add_tail(&response->list, &info->receive_queue); in allocate_receive_buffers()
1254 info->count_receive_queue++; in allocate_receive_buffers()
1260 while (!list_empty(&info->receive_queue)) { in allocate_receive_buffers()
1262 &info->receive_queue, in allocate_receive_buffers()
1264 list_del(&response->list); in allocate_receive_buffers()
1265 info->count_receive_queue--; in allocate_receive_buffers()
1267 mempool_free(response, info->response_mempool); in allocate_receive_buffers()
1269 return -ENOMEM; in allocate_receive_buffers()
1277 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1280 mempool_free(response, info->response_mempool); in destroy_receive_buffers()
1283 /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
1290 if (info->keep_alive_requested != KEEP_ALIVE_NONE) { in idle_connection_timer()
1292 "error status info->keep_alive_requested=%d\n", in idle_connection_timer()
1293 info->keep_alive_requested); in idle_connection_timer()
1302 queue_delayed_work(info->workqueue, &info->idle_timer_work, in idle_connection_timer()
1303 info->keep_alive_interval*HZ); in idle_connection_timer()
1307 * Destroy the transport and related RDMA and memory resources
1313 struct smbd_connection *info = server->smbd_conn; in smbd_destroy()
1323 if (info->transport_status != SMBD_DISCONNECTED) { in smbd_destroy()
1324 rdma_disconnect(server->smbd_conn->id); in smbd_destroy()
1327 info->disconn_wait, in smbd_destroy()
1328 info->transport_status == SMBD_DISCONNECTED); in smbd_destroy()
1332 ib_drain_qp(info->id->qp); in smbd_destroy()
1333 rdma_destroy_qp(info->id); in smbd_destroy()
1336 cancel_delayed_work_sync(&info->idle_timer_work); in smbd_destroy()
1339 wait_event(info->wait_send_pending, in smbd_destroy()
1340 atomic_read(&info->send_pending) == 0); in smbd_destroy()
1345 spin_lock_irqsave(&info->reassembly_queue_lock, flags); in smbd_destroy()
1348 list_del(&response->list); in smbd_destroy()
1350 &info->reassembly_queue_lock, flags); in smbd_destroy()
1354 &info->reassembly_queue_lock, flags); in smbd_destroy()
1356 info->reassembly_data_length = 0; in smbd_destroy()
1359 wait_event(info->wait_receive_queues, in smbd_destroy()
1360 info->count_receive_queue + info->count_empty_packet_queue in smbd_destroy()
1361 == info->receive_credit_max); in smbd_destroy()
1365 * For performance reasons, memory registration and deregistration in smbd_destroy()
1367 * blocked on transport srv_mutex while holding memory registration. in smbd_destroy()
1369 * path when sending data, and then release memory registartions. in smbd_destroy()
1372 wake_up_interruptible_all(&info->wait_mr); in smbd_destroy()
1373 while (atomic_read(&info->mr_used_count)) { in smbd_destroy()
1374 mutex_unlock(&server->srv_mutex); in smbd_destroy()
1376 mutex_lock(&server->srv_mutex); in smbd_destroy()
1380 ib_free_cq(info->send_cq); in smbd_destroy()
1381 ib_free_cq(info->recv_cq); in smbd_destroy()
1382 ib_dealloc_pd(info->pd); in smbd_destroy()
1383 rdma_destroy_id(info->id); in smbd_destroy()
1386 mempool_destroy(info->request_mempool); in smbd_destroy()
1387 kmem_cache_destroy(info->request_cache); in smbd_destroy()
1389 mempool_destroy(info->response_mempool); in smbd_destroy()
1390 kmem_cache_destroy(info->response_cache); in smbd_destroy()
1392 info->transport_status = SMBD_DESTROYED; in smbd_destroy()
1394 destroy_workqueue(info->workqueue); in smbd_destroy()
1407 if (!server->smbd_conn) { in smbd_reconnect()
1416 if (server->smbd_conn->transport_status == SMBD_CONNECTED) { in smbd_reconnect()
1423 server->smbd_conn = smbd_get_connection( in smbd_reconnect()
1424 server, (struct sockaddr *) &server->dstaddr); in smbd_reconnect()
1426 if (server->smbd_conn) in smbd_reconnect()
1427 cifs_dbg(VFS, "RDMA transport re-established\n"); in smbd_reconnect()
1429 return server->smbd_conn ? 0 : -ENOENT; in smbd_reconnect()
1435 destroy_workqueue(info->workqueue); in destroy_caches_and_workqueue()
1436 mempool_destroy(info->response_mempool); in destroy_caches_and_workqueue()
1437 kmem_cache_destroy(info->response_cache); in destroy_caches_and_workqueue()
1438 mempool_destroy(info->request_mempool); in destroy_caches_and_workqueue()
1439 kmem_cache_destroy(info->request_cache); in destroy_caches_and_workqueue()
1449 info->request_cache = in allocate_caches_and_workqueue()
1455 if (!info->request_cache) in allocate_caches_and_workqueue()
1456 return -ENOMEM; in allocate_caches_and_workqueue()
1458 info->request_mempool = in allocate_caches_and_workqueue()
1459 mempool_create(info->send_credit_target, mempool_alloc_slab, in allocate_caches_and_workqueue()
1460 mempool_free_slab, info->request_cache); in allocate_caches_and_workqueue()
1461 if (!info->request_mempool) in allocate_caches_and_workqueue()
1465 info->response_cache = in allocate_caches_and_workqueue()
1469 info->max_receive_size, in allocate_caches_and_workqueue()
1471 if (!info->response_cache) in allocate_caches_and_workqueue()
1474 info->response_mempool = in allocate_caches_and_workqueue()
1475 mempool_create(info->receive_credit_max, mempool_alloc_slab, in allocate_caches_and_workqueue()
1476 mempool_free_slab, info->response_cache); in allocate_caches_and_workqueue()
1477 if (!info->response_mempool) in allocate_caches_and_workqueue()
1481 info->workqueue = create_workqueue(name); in allocate_caches_and_workqueue()
1482 if (!info->workqueue) in allocate_caches_and_workqueue()
1485 rc = allocate_receive_buffers(info, info->receive_credit_max); in allocate_caches_and_workqueue()
1494 destroy_workqueue(info->workqueue); in allocate_caches_and_workqueue()
1496 mempool_destroy(info->response_mempool); in allocate_caches_and_workqueue()
1498 kmem_cache_destroy(info->response_cache); in allocate_caches_and_workqueue()
1500 mempool_destroy(info->request_mempool); in allocate_caches_and_workqueue()
1502 kmem_cache_destroy(info->request_cache); in allocate_caches_and_workqueue()
1503 return -ENOMEM; in allocate_caches_and_workqueue()
1522 info->transport_status = SMBD_CONNECTING; in _smbd_get_connection()
1529 if (smbd_send_credit_target > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1530 smbd_send_credit_target > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1533 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1534 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1538 if (smbd_receive_credit_max > info->id->device->attrs.max_cqe || in _smbd_get_connection()
1539 smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) { in _smbd_get_connection()
1542 info->id->device->attrs.max_cqe, in _smbd_get_connection()
1543 info->id->device->attrs.max_qp_wr); in _smbd_get_connection()
1547 info->receive_credit_max = smbd_receive_credit_max; in _smbd_get_connection()
1548 info->send_credit_target = smbd_send_credit_target; in _smbd_get_connection()
1549 info->max_send_size = smbd_max_send_size; in _smbd_get_connection()
1550 info->max_fragmented_recv_size = smbd_max_fragmented_recv_size; in _smbd_get_connection()
1551 info->max_receive_size = smbd_max_receive_size; in _smbd_get_connection()
1552 info->keep_alive_interval = smbd_keep_alive_interval; in _smbd_get_connection()
1554 if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SGE) { in _smbd_get_connection()
1557 info->id->device->attrs.max_send_sge); in _smbd_get_connection()
1560 if (info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_SGE) { in _smbd_get_connection()
1563 info->id->device->attrs.max_recv_sge); in _smbd_get_connection()
1567 info->send_cq = NULL; in _smbd_get_connection()
1568 info->recv_cq = NULL; in _smbd_get_connection()
1569 info->send_cq = in _smbd_get_connection()
1570 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1571 info->send_credit_target, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1572 if (IS_ERR(info->send_cq)) { in _smbd_get_connection()
1573 info->send_cq = NULL; in _smbd_get_connection()
1577 info->recv_cq = in _smbd_get_connection()
1578 ib_alloc_cq_any(info->id->device, info, in _smbd_get_connection()
1579 info->receive_credit_max, IB_POLL_SOFTIRQ); in _smbd_get_connection()
1580 if (IS_ERR(info->recv_cq)) { in _smbd_get_connection()
1581 info->recv_cq = NULL; in _smbd_get_connection()
1588 qp_attr.cap.max_send_wr = info->send_credit_target; in _smbd_get_connection()
1589 qp_attr.cap.max_recv_wr = info->receive_credit_max; in _smbd_get_connection()
1595 qp_attr.send_cq = info->send_cq; in _smbd_get_connection()
1596 qp_attr.recv_cq = info->recv_cq; in _smbd_get_connection()
1599 rc = rdma_create_qp(info->id, info->pd, &qp_attr); in _smbd_get_connection()
1609 info->id->device->attrs.max_qp_rd_atom in _smbd_get_connection()
1611 info->id->device->attrs.max_qp_rd_atom : in _smbd_get_connection()
1613 info->responder_resources = conn_param.responder_resources; in _smbd_get_connection()
1615 info->responder_resources); in _smbd_get_connection()
1618 info->id->device->ops.get_port_immutable( in _smbd_get_connection()
1619 info->id->device, info->id->port_num, &port_immutable); in _smbd_get_connection()
1621 ird_ord_hdr[0] = info->responder_resources; in _smbd_get_connection()
1635 &addr_in->sin_addr, port); in _smbd_get_connection()
1637 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1638 init_waitqueue_head(&info->disconn_wait); in _smbd_get_connection()
1639 init_waitqueue_head(&info->wait_reassembly_queue); in _smbd_get_connection()
1640 rc = rdma_connect(info->id, &conn_param); in _smbd_get_connection()
1647 info->conn_wait, info->transport_status != SMBD_CONNECTING); in _smbd_get_connection()
1649 if (info->transport_status != SMBD_CONNECTED) { in _smbd_get_connection()
1662 init_waitqueue_head(&info->wait_send_queue); in _smbd_get_connection()
1663 INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer); in _smbd_get_connection()
1664 queue_delayed_work(info->workqueue, &info->idle_timer_work, in _smbd_get_connection()
1665 info->keep_alive_interval*HZ); in _smbd_get_connection()
1667 init_waitqueue_head(&info->wait_send_pending); in _smbd_get_connection()
1668 atomic_set(&info->send_pending, 0); in _smbd_get_connection()
1670 init_waitqueue_head(&info->wait_post_send); in _smbd_get_connection()
1672 INIT_WORK(&info->disconnect_work, smbd_disconnect_rdma_work); in _smbd_get_connection()
1673 INIT_WORK(&info->post_send_credits_work, smbd_post_send_credits); in _smbd_get_connection()
1674 info->new_credits_offered = 0; in _smbd_get_connection()
1675 spin_lock_init(&info->lock_new_credits_offered); in _smbd_get_connection()
1685 log_rdma_mr(ERR, "memory registration allocation failed\n"); in _smbd_get_connection()
1697 cancel_delayed_work_sync(&info->idle_timer_work); in _smbd_get_connection()
1699 info->transport_status = SMBD_NEGOTIATE_FAILED; in _smbd_get_connection()
1700 init_waitqueue_head(&info->conn_wait); in _smbd_get_connection()
1701 rdma_disconnect(info->id); in _smbd_get_connection()
1702 wait_event(info->conn_wait, in _smbd_get_connection()
1703 info->transport_status == SMBD_DISCONNECTED); in _smbd_get_connection()
1707 rdma_destroy_qp(info->id); in _smbd_get_connection()
1711 if (info->send_cq) in _smbd_get_connection()
1712 ib_free_cq(info->send_cq); in _smbd_get_connection()
1713 if (info->recv_cq) in _smbd_get_connection()
1714 ib_free_cq(info->recv_cq); in _smbd_get_connection()
1717 ib_dealloc_pd(info->pd); in _smbd_get_connection()
1718 rdma_destroy_id(info->id); in _smbd_get_connection()
1766 * No need to hold the reassembly queue lock all the time as we are in smbd_recv_buf()
1770 log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size, in smbd_recv_buf()
1771 info->reassembly_data_length); in smbd_recv_buf()
1772 if (info->reassembly_data_length >= size) { in smbd_recv_buf()
1784 queue_length = info->reassembly_queue_length; in smbd_recv_buf()
1787 offset = info->first_entry_offset; in smbd_recv_buf()
1791 data_length = le32_to_cpu(data_transfer->data_length); in smbd_recv_buf()
1794 data_transfer->remaining_data_length); in smbd_recv_buf()
1795 data_offset = le32_to_cpu(data_transfer->data_offset); in smbd_recv_buf()
1805 if (response->first_segment && size == 4) { in smbd_recv_buf()
1810 response->first_segment = false; in smbd_recv_buf()
1816 to_copy = min_t(int, data_length - offset, to_read); in smbd_recv_buf()
1823 if (to_copy == data_length - offset) { in smbd_recv_buf()
1824 queue_length--; in smbd_recv_buf()
1826 * No need to lock if we are not at the in smbd_recv_buf()
1830 list_del(&response->list); in smbd_recv_buf()
1833 &info->reassembly_queue_lock); in smbd_recv_buf()
1834 list_del(&response->list); in smbd_recv_buf()
1836 &info->reassembly_queue_lock); in smbd_recv_buf()
1839 info->count_reassembly_queue--; in smbd_recv_buf()
1840 info->count_dequeue_reassembly_queue++; in smbd_recv_buf()
1847 to_read -= to_copy; in smbd_recv_buf()
1850 …log_read(INFO, "_get_first_reassembly memcpy %d bytes data_transfer_length-offset=%d after that to… in smbd_recv_buf()
1851 to_copy, data_length - offset, in smbd_recv_buf()
1855 spin_lock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1856 info->reassembly_data_length -= data_read; in smbd_recv_buf()
1857 info->reassembly_queue_length -= queue_removed; in smbd_recv_buf()
1858 spin_unlock_irq(&info->reassembly_queue_lock); in smbd_recv_buf()
1860 info->first_entry_offset = offset; in smbd_recv_buf()
1862 data_read, info->reassembly_data_length, in smbd_recv_buf()
1863 info->first_entry_offset); in smbd_recv_buf()
1870 info->wait_reassembly_queue, in smbd_recv_buf()
1871 info->reassembly_data_length >= size || in smbd_recv_buf()
1872 info->transport_status != SMBD_CONNECTED); in smbd_recv_buf()
1877 if (info->transport_status != SMBD_CONNECTED) { in smbd_recv_buf()
1879 return -ECONNABORTED; in smbd_recv_buf()
1901 info->wait_reassembly_queue, in smbd_recv_page()
1902 info->reassembly_data_length >= to_read || in smbd_recv_page()
1903 info->transport_status != SMBD_CONNECTED); in smbd_recv_page()
1932 if (iov_iter_rw(&msg->msg_iter) == WRITE) { in smbd_recv()
1935 iov_iter_rw(&msg->msg_iter)); in smbd_recv()
1936 rc = -EINVAL; in smbd_recv()
1940 switch (iov_iter_type(&msg->msg_iter)) { in smbd_recv()
1942 buf = msg->msg_iter.kvec->iov_base; in smbd_recv()
1943 to_read = msg->msg_iter.kvec->iov_len; in smbd_recv()
1948 page = msg->msg_iter.bvec->bv_page; in smbd_recv()
1949 page_offset = msg->msg_iter.bvec->bv_offset; in smbd_recv()
1950 to_read = msg->msg_iter.bvec->bv_len; in smbd_recv()
1957 iov_iter_type(&msg->msg_iter)); in smbd_recv()
1958 rc = -EINVAL; in smbd_recv()
1964 msg->msg_iter.count = 0; in smbd_recv()
1977 struct smbd_connection *info = server->smbd_conn; in smbd_send()
1984 info->max_send_size - sizeof(struct smbd_data_transfer); in smbd_send()
1990 if (info->transport_status != SMBD_CONNECTED) { in smbd_send()
1991 rc = -EAGAIN; in smbd_send()
2004 if (remaining_data_length > info->max_fragmented_send_size) { in smbd_send()
2006 remaining_data_length, info->max_fragmented_send_size); in smbd_send()
2007 rc = -EINVAL; in smbd_send()
2017 iov = rqst->rq_iov; in smbd_send()
2021 for (i = 0; i < rqst->rq_nvec; i++) in smbd_send()
2025 log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d rq_tailsz=%d buflen=%lu\n", in smbd_send()
2026 rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz, in smbd_send()
2027 rqst->rq_tailsz, smb_rqst_len(server, rqst)); in smbd_send()
2035 remaining_data_length -= in smbd_send()
2036 (buflen-iov[i].iov_len); in smbd_send()
2038 start, i, i - start, in smbd_send()
2041 info, &iov[start], i-start, in smbd_send()
2047 nvecs = (buflen+max_iov_size-1)/max_iov_size; in smbd_send()
2056 if (j == nvecs-1) in smbd_send()
2058 buflen - in smbd_send()
2059 max_iov_size*(nvecs-1); in smbd_send()
2060 remaining_data_length -= vec.iov_len; in smbd_send()
2072 if (i == rqst->rq_nvec) in smbd_send()
2079 if (i == rqst->rq_nvec) { in smbd_send()
2081 remaining_data_length -= buflen; in smbd_send()
2083 start, i, i - start, in smbd_send()
2086 i-start, remaining_data_length); in smbd_send()
2096 for (i = 0; i < rqst->rq_npages; i++) { in smbd_send()
2100 nvecs = (buflen + max_iov_size - 1) / max_iov_size; in smbd_send()
2105 if (j == nvecs-1) in smbd_send()
2106 size = buflen - j*max_iov_size; in smbd_send()
2107 remaining_data_length -= size; in smbd_send()
2112 info, rqst->rq_pages[i], in smbd_send()
2132 wait_event(info->wait_send_pending, in smbd_send()
2133 atomic_read(&info->send_pending) == 0); in smbd_send()
2138 static void register_mr_done(struct ib_cq *cq, struct ib_wc *wc) in register_mr_done() argument
2143 if (wc->status) { in register_mr_done()
2144 log_rdma_mr(ERR, "status=%d\n", wc->status); in register_mr_done()
2145 cqe = wc->wr_cqe; in register_mr_done()
2147 smbd_disconnect_rdma_connection(mr->conn); in register_mr_done()
2156 * There is one workqueue that recovers MRs, there is no need to lock as the
2167 list_for_each_entry(smbdirect_mr, &info->mr_list, list) { in smbd_mr_recovery_work()
2168 if (smbdirect_mr->state == MR_ERROR) { in smbd_mr_recovery_work()
2171 rc = ib_dereg_mr(smbdirect_mr->mr); in smbd_mr_recovery_work()
2180 smbdirect_mr->mr = ib_alloc_mr( in smbd_mr_recovery_work()
2181 info->pd, info->mr_type, in smbd_mr_recovery_work()
2182 info->max_frmr_depth); in smbd_mr_recovery_work()
2183 if (IS_ERR(smbdirect_mr->mr)) { in smbd_mr_recovery_work()
2185 info->mr_type, in smbd_mr_recovery_work()
2186 info->max_frmr_depth); in smbd_mr_recovery_work()
2194 smbdirect_mr->state = MR_READY; in smbd_mr_recovery_work()
2196 /* smbdirect_mr->state is updated by this function in smbd_mr_recovery_work()
2199 * implicates a memory barrier and guarantees this in smbd_mr_recovery_work()
2203 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_mr_recovery_work()
2204 wake_up_interruptible(&info->wait_mr); in smbd_mr_recovery_work()
2212 cancel_work_sync(&info->mr_recovery_work); in destroy_mr_list()
2213 list_for_each_entry_safe(mr, tmp, &info->mr_list, list) { in destroy_mr_list()
2214 if (mr->state == MR_INVALIDATED) in destroy_mr_list()
2215 ib_dma_unmap_sg(info->id->device, mr->sgl, in destroy_mr_list()
2216 mr->sgl_count, mr->dir); in destroy_mr_list()
2217 ib_dereg_mr(mr->mr); in destroy_mr_list()
2218 kfree(mr->sgl); in destroy_mr_list()
2235 INIT_LIST_HEAD(&info->mr_list); in allocate_mr_list()
2236 init_waitqueue_head(&info->wait_mr); in allocate_mr_list()
2237 spin_lock_init(&info->mr_list_lock); in allocate_mr_list()
2238 atomic_set(&info->mr_ready_count, 0); in allocate_mr_list()
2239 atomic_set(&info->mr_used_count, 0); in allocate_mr_list()
2240 init_waitqueue_head(&info->wait_for_mr_cleanup); in allocate_mr_list()
2242 for (i = 0; i < info->responder_resources * 2; i++) { in allocate_mr_list()
2246 smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type, in allocate_mr_list()
2247 info->max_frmr_depth); in allocate_mr_list()
2248 if (IS_ERR(smbdirect_mr->mr)) { in allocate_mr_list()
2250 info->mr_type, info->max_frmr_depth); in allocate_mr_list()
2253 smbdirect_mr->sgl = kcalloc( in allocate_mr_list()
2254 info->max_frmr_depth, in allocate_mr_list()
2257 if (!smbdirect_mr->sgl) { in allocate_mr_list()
2259 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2262 smbdirect_mr->state = MR_READY; in allocate_mr_list()
2263 smbdirect_mr->conn = info; in allocate_mr_list()
2265 list_add_tail(&smbdirect_mr->list, &info->mr_list); in allocate_mr_list()
2266 atomic_inc(&info->mr_ready_count); in allocate_mr_list()
2268 INIT_WORK(&info->mr_recovery_work, smbd_mr_recovery_work); in allocate_mr_list()
2274 list_for_each_entry_safe(smbdirect_mr, tmp, &info->mr_list, list) { in allocate_mr_list()
2275 ib_dereg_mr(smbdirect_mr->mr); in allocate_mr_list()
2276 kfree(smbdirect_mr->sgl); in allocate_mr_list()
2279 return -ENOMEM; in allocate_mr_list()
2295 rc = wait_event_interruptible(info->wait_mr, in get_mr()
2296 atomic_read(&info->mr_ready_count) || in get_mr()
2297 info->transport_status != SMBD_CONNECTED); in get_mr()
2303 if (info->transport_status != SMBD_CONNECTED) { in get_mr()
2304 log_rdma_mr(ERR, "info->transport_status=%x\n", in get_mr()
2305 info->transport_status); in get_mr()
2309 spin_lock(&info->mr_list_lock); in get_mr()
2310 list_for_each_entry(ret, &info->mr_list, list) { in get_mr()
2311 if (ret->state == MR_READY) { in get_mr()
2312 ret->state = MR_REGISTERED; in get_mr()
2313 spin_unlock(&info->mr_list_lock); in get_mr()
2314 atomic_dec(&info->mr_ready_count); in get_mr()
2315 atomic_inc(&info->mr_used_count); in get_mr()
2320 spin_unlock(&info->mr_list_lock); in get_mr()
2329 * Register memory for RDMA read/write
2330 * pages[]: the list of pages to register memory with
2332 * tailsz: if non-zero, the bytes to register in the last page
2346 if (num_pages > info->max_frmr_depth) { in smbd_register_mr()
2348 num_pages, info->max_frmr_depth); in smbd_register_mr()
2357 smbdirect_mr->need_invalidate = need_invalidate; in smbd_register_mr()
2358 smbdirect_mr->sgl_count = num_pages; in smbd_register_mr()
2359 sg_init_table(smbdirect_mr->sgl, num_pages); in smbd_register_mr()
2365 sg_set_page(&smbdirect_mr->sgl[0], pages[0], tailsz, offset); in smbd_register_mr()
2371 &smbdirect_mr->sgl[0], pages[0], PAGE_SIZE - offset, offset); in smbd_register_mr()
2373 while (i < num_pages - 1) { in smbd_register_mr()
2374 sg_set_page(&smbdirect_mr->sgl[i], pages[i], PAGE_SIZE, 0); in smbd_register_mr()
2377 sg_set_page(&smbdirect_mr->sgl[i], pages[i], in smbd_register_mr()
2382 smbdirect_mr->dir = dir; in smbd_register_mr()
2383 rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgl, num_pages, dir); in smbd_register_mr()
2390 rc = ib_map_mr_sg(smbdirect_mr->mr, smbdirect_mr->sgl, num_pages, in smbd_register_mr()
2399 ib_update_fast_reg_key(smbdirect_mr->mr, in smbd_register_mr()
2400 ib_inc_rkey(smbdirect_mr->mr->rkey)); in smbd_register_mr()
2401 reg_wr = &smbdirect_mr->wr; in smbd_register_mr()
2402 reg_wr->wr.opcode = IB_WR_REG_MR; in smbd_register_mr()
2403 smbdirect_mr->cqe.done = register_mr_done; in smbd_register_mr()
2404 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr()
2405 reg_wr->wr.num_sge = 0; in smbd_register_mr()
2406 reg_wr->wr.send_flags = IB_SEND_SIGNALED; in smbd_register_mr()
2407 reg_wr->mr = smbdirect_mr->mr; in smbd_register_mr()
2408 reg_wr->key = smbdirect_mr->mr->rkey; in smbd_register_mr()
2409 reg_wr->access = writing ? in smbd_register_mr()
2414 * There is no need for waiting for complemtion on ib_post_send in smbd_register_mr()
2418 rc = ib_post_send(info->id->qp, ®_wr->wr, NULL); in smbd_register_mr()
2422 log_rdma_mr(ERR, "ib_post_send failed rc=%x reg_wr->key=%x\n", in smbd_register_mr()
2423 rc, reg_wr->key); in smbd_register_mr()
2427 ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgl, in smbd_register_mr()
2428 smbdirect_mr->sgl_count, smbdirect_mr->dir); in smbd_register_mr()
2431 smbdirect_mr->state = MR_ERROR; in smbd_register_mr()
2432 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_register_mr()
2433 wake_up(&info->wait_for_mr_cleanup); in smbd_register_mr()
2440 static void local_inv_done(struct ib_cq *cq, struct ib_wc *wc) in local_inv_done() argument
2445 cqe = wc->wr_cqe; in local_inv_done()
2447 smbdirect_mr->state = MR_INVALIDATED; in local_inv_done()
2448 if (wc->status != IB_WC_SUCCESS) { in local_inv_done()
2449 log_rdma_mr(ERR, "invalidate failed status=%x\n", wc->status); in local_inv_done()
2450 smbdirect_mr->state = MR_ERROR; in local_inv_done()
2452 complete(&smbdirect_mr->invalidate_done); in local_inv_done()
2464 struct smbd_connection *info = smbdirect_mr->conn; in smbd_deregister_mr()
2467 if (smbdirect_mr->need_invalidate) { in smbd_deregister_mr()
2469 wr = &smbdirect_mr->inv_wr; in smbd_deregister_mr()
2470 wr->opcode = IB_WR_LOCAL_INV; in smbd_deregister_mr()
2471 smbdirect_mr->cqe.done = local_inv_done; in smbd_deregister_mr()
2472 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
2473 wr->num_sge = 0; in smbd_deregister_mr()
2474 wr->ex.invalidate_rkey = smbdirect_mr->mr->rkey; in smbd_deregister_mr()
2475 wr->send_flags = IB_SEND_SIGNALED; in smbd_deregister_mr()
2477 init_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2478 rc = ib_post_send(info->id->qp, wr, NULL); in smbd_deregister_mr()
2484 wait_for_completion(&smbdirect_mr->invalidate_done); in smbd_deregister_mr()
2485 smbdirect_mr->need_invalidate = false; in smbd_deregister_mr()
2491 smbdirect_mr->state = MR_INVALIDATED; in smbd_deregister_mr()
2493 if (smbdirect_mr->state == MR_INVALIDATED) { in smbd_deregister_mr()
2495 info->id->device, smbdirect_mr->sgl, in smbd_deregister_mr()
2496 smbdirect_mr->sgl_count, in smbd_deregister_mr()
2497 smbdirect_mr->dir); in smbd_deregister_mr()
2498 smbdirect_mr->state = MR_READY; in smbd_deregister_mr()
2499 if (atomic_inc_return(&info->mr_ready_count) == 1) in smbd_deregister_mr()
2500 wake_up_interruptible(&info->wait_mr); in smbd_deregister_mr()
2506 queue_work(info->workqueue, &info->mr_recovery_work); in smbd_deregister_mr()
2509 if (atomic_dec_and_test(&info->mr_used_count)) in smbd_deregister_mr()
2510 wake_up(&info->wait_for_mr_cleanup); in smbd_deregister_mr()