Lines Matching full:t

161 #define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))  argument
198 struct smb_direct_transport *t; member
227 static int smb_direct_post_send_data(struct smb_direct_transport *t,
233 smb_trans_direct_transfort(struct ksmbd_transport *t) in smb_trans_direct_transfort() argument
235 return container_of(t, struct smb_direct_transport, transport); in smb_trans_direct_transfort()
252 smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t) in get_free_recvmsg() argument
256 spin_lock(&t->recvmsg_queue_lock); in get_free_recvmsg()
257 if (!list_empty(&t->recvmsg_queue)) { in get_free_recvmsg()
258 recvmsg = list_first_entry(&t->recvmsg_queue, in get_free_recvmsg()
263 spin_unlock(&t->recvmsg_queue_lock); in get_free_recvmsg()
267 static void put_recvmsg(struct smb_direct_transport *t, in put_recvmsg() argument
270 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_recvmsg()
273 spin_lock(&t->recvmsg_queue_lock); in put_recvmsg()
274 list_add(&recvmsg->list, &t->recvmsg_queue); in put_recvmsg()
275 spin_unlock(&t->recvmsg_queue_lock); in put_recvmsg()
279 smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t) in get_empty_recvmsg() argument
283 spin_lock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
284 if (!list_empty(&t->empty_recvmsg_queue)) { in get_empty_recvmsg()
285 recvmsg = list_first_entry(&t->empty_recvmsg_queue, in get_empty_recvmsg()
289 spin_unlock(&t->empty_recvmsg_queue_lock); in get_empty_recvmsg()
293 static void put_empty_recvmsg(struct smb_direct_transport *t, in put_empty_recvmsg() argument
296 ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr, in put_empty_recvmsg()
299 spin_lock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
300 list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue); in put_empty_recvmsg()
301 spin_unlock(&t->empty_recvmsg_queue_lock); in put_empty_recvmsg()
304 static void enqueue_reassembly(struct smb_direct_transport *t, in enqueue_reassembly() argument
308 spin_lock(&t->reassembly_queue_lock); in enqueue_reassembly()
309 list_add_tail(&recvmsg->list, &t->reassembly_queue); in enqueue_reassembly()
310 t->reassembly_queue_length++; in enqueue_reassembly()
318 t->reassembly_data_length += data_length; in enqueue_reassembly()
319 spin_unlock(&t->reassembly_queue_lock); in enqueue_reassembly()
322 static struct smb_direct_recvmsg *get_first_reassembly(struct smb_direct_transport *t) in get_first_reassembly() argument
324 if (!list_empty(&t->reassembly_queue)) in get_first_reassembly()
325 return list_first_entry(&t->reassembly_queue, in get_first_reassembly()
333 struct smb_direct_transport *t = in smb_direct_disconnect_rdma_work() local
337 if (t->status == SMB_DIRECT_CS_CONNECTED) { in smb_direct_disconnect_rdma_work()
338 t->status = SMB_DIRECT_CS_DISCONNECTING; in smb_direct_disconnect_rdma_work()
339 rdma_disconnect(t->cm_id); in smb_direct_disconnect_rdma_work()
344 smb_direct_disconnect_rdma_connection(struct smb_direct_transport *t) in smb_direct_disconnect_rdma_connection() argument
346 if (t->status == SMB_DIRECT_CS_CONNECTED) in smb_direct_disconnect_rdma_connection()
347 queue_work(smb_direct_wq, &t->disconnect_work); in smb_direct_disconnect_rdma_connection()
352 struct smb_direct_transport *t = container_of(work, in smb_direct_send_immediate_work() local
355 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_send_immediate_work()
358 smb_direct_post_send_data(t, NULL, NULL, 0, 0); in smb_direct_send_immediate_work()
363 struct smb_direct_transport *t; in alloc_transport() local
366 t = kzalloc(sizeof(*t), KSMBD_DEFAULT_GFP); in alloc_transport()
367 if (!t) in alloc_transport()
370 t->cm_id = cm_id; in alloc_transport()
371 cm_id->context = t; in alloc_transport()
373 t->status = SMB_DIRECT_CS_NEW; in alloc_transport()
374 init_waitqueue_head(&t->wait_status); in alloc_transport()
376 spin_lock_init(&t->reassembly_queue_lock); in alloc_transport()
377 INIT_LIST_HEAD(&t->reassembly_queue); in alloc_transport()
378 t->reassembly_data_length = 0; in alloc_transport()
379 t->reassembly_queue_length = 0; in alloc_transport()
380 init_waitqueue_head(&t->wait_reassembly_queue); in alloc_transport()
381 init_waitqueue_head(&t->wait_send_credits); in alloc_transport()
382 init_waitqueue_head(&t->wait_rw_credits); in alloc_transport()
384 spin_lock_init(&t->receive_credit_lock); in alloc_transport()
385 spin_lock_init(&t->recvmsg_queue_lock); in alloc_transport()
386 INIT_LIST_HEAD(&t->recvmsg_queue); in alloc_transport()
388 spin_lock_init(&t->empty_recvmsg_queue_lock); in alloc_transport()
389 INIT_LIST_HEAD(&t->empty_recvmsg_queue); in alloc_transport()
391 init_waitqueue_head(&t->wait_send_pending); in alloc_transport()
392 atomic_set(&t->send_pending, 0); in alloc_transport()
394 spin_lock_init(&t->lock_new_recv_credits); in alloc_transport()
396 INIT_DELAYED_WORK(&t->post_recv_credits_work, in alloc_transport()
398 INIT_WORK(&t->send_immediate_work, smb_direct_send_immediate_work); in alloc_transport()
399 INIT_WORK(&t->disconnect_work, smb_direct_disconnect_rdma_work); in alloc_transport()
404 conn->transport = KSMBD_TRANS(t); in alloc_transport()
405 KSMBD_TRANS(t)->conn = conn; in alloc_transport()
406 KSMBD_TRANS(t)->ops = &ksmbd_smb_direct_transport_ops; in alloc_transport()
407 return t; in alloc_transport()
409 kfree(t); in alloc_transport()
413 static void free_transport(struct smb_direct_transport *t) in free_transport() argument
417 wake_up_interruptible(&t->wait_send_credits); in free_transport()
420 wait_event(t->wait_send_pending, in free_transport()
421 atomic_read(&t->send_pending) == 0); in free_transport()
423 cancel_work_sync(&t->disconnect_work); in free_transport()
424 cancel_delayed_work_sync(&t->post_recv_credits_work); in free_transport()
425 cancel_work_sync(&t->send_immediate_work); in free_transport()
427 if (t->qp) { in free_transport()
428 ib_drain_qp(t->qp); in free_transport()
429 ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs); in free_transport()
430 ib_destroy_qp(t->qp); in free_transport()
435 spin_lock(&t->reassembly_queue_lock); in free_transport()
436 recvmsg = get_first_reassembly(t); in free_transport()
439 spin_unlock(&t->reassembly_queue_lock); in free_transport()
440 put_recvmsg(t, recvmsg); in free_transport()
442 spin_unlock(&t->reassembly_queue_lock); in free_transport()
445 t->reassembly_data_length = 0; in free_transport()
447 if (t->send_cq) in free_transport()
448 ib_free_cq(t->send_cq); in free_transport()
449 if (t->recv_cq) in free_transport()
450 ib_free_cq(t->recv_cq); in free_transport()
451 if (t->pd) in free_transport()
452 ib_dealloc_pd(t->pd); in free_transport()
453 if (t->cm_id) in free_transport()
454 rdma_destroy_id(t->cm_id); in free_transport()
456 smb_direct_destroy_pools(t); in free_transport()
457 ksmbd_conn_free(KSMBD_TRANS(t)->conn); in free_transport()
458 kfree(t); in free_transport()
462 *smb_direct_alloc_sendmsg(struct smb_direct_transport *t) in smb_direct_alloc_sendmsg() argument
466 msg = mempool_alloc(t->sendmsg_mempool, KSMBD_DEFAULT_GFP); in smb_direct_alloc_sendmsg()
469 msg->transport = t; in smb_direct_alloc_sendmsg()
475 static void smb_direct_free_sendmsg(struct smb_direct_transport *t, in smb_direct_free_sendmsg() argument
481 ib_dma_unmap_single(t->cm_id->device, in smb_direct_free_sendmsg()
485 ib_dma_unmap_page(t->cm_id->device, in smb_direct_free_sendmsg()
489 mempool_free(msg, t->sendmsg_mempool); in smb_direct_free_sendmsg()
539 struct smb_direct_transport *t; in recv_done() local
542 t = recvmsg->transport; in recv_done()
549 smb_direct_disconnect_rdma_connection(t); in recv_done()
551 put_empty_recvmsg(t, recvmsg); in recv_done()
565 put_empty_recvmsg(t, recvmsg); in recv_done()
568 t->negotiation_requested = true; in recv_done()
569 t->full_packet_received = true; in recv_done()
570 t->status = SMB_DIRECT_CS_CONNECTED; in recv_done()
571 enqueue_reassembly(t, recvmsg, 0); in recv_done()
572 wake_up_interruptible(&t->wait_status); in recv_done()
582 put_empty_recvmsg(t, recvmsg); in recv_done()
590 put_empty_recvmsg(t, recvmsg); in recv_done()
594 if (t->full_packet_received) in recv_done()
598 t->full_packet_received = false; in recv_done()
600 t->full_packet_received = true; in recv_done()
602 enqueue_reassembly(t, recvmsg, (int)data_length); in recv_done()
603 wake_up_interruptible(&t->wait_reassembly_queue); in recv_done()
605 spin_lock(&t->receive_credit_lock); in recv_done()
606 receive_credits = --(t->recv_credits); in recv_done()
607 avail_recvmsg_count = t->count_avail_recvmsg; in recv_done()
608 spin_unlock(&t->receive_credit_lock); in recv_done()
610 put_empty_recvmsg(t, recvmsg); in recv_done()
612 spin_lock(&t->receive_credit_lock); in recv_done()
613 receive_credits = --(t->recv_credits); in recv_done()
614 avail_recvmsg_count = ++(t->count_avail_recvmsg); in recv_done()
615 spin_unlock(&t->receive_credit_lock); in recv_done()
618 t->recv_credit_target = in recv_done()
621 &t->send_credits); in recv_done()
625 queue_work(smb_direct_wq, &t->send_immediate_work); in recv_done()
627 if (atomic_read(&t->send_credits) > 0) in recv_done()
628 wake_up_interruptible(&t->wait_send_credits); in recv_done()
632 &t->post_recv_credits_work, 0); in recv_done()
640 static int smb_direct_post_recv(struct smb_direct_transport *t, in smb_direct_post_recv() argument
646 recvmsg->sge.addr = ib_dma_map_single(t->cm_id->device, in smb_direct_post_recv()
647 recvmsg->packet, t->max_recv_size, in smb_direct_post_recv()
649 ret = ib_dma_mapping_error(t->cm_id->device, recvmsg->sge.addr); in smb_direct_post_recv()
652 recvmsg->sge.length = t->max_recv_size; in smb_direct_post_recv()
653 recvmsg->sge.lkey = t->pd->local_dma_lkey; in smb_direct_post_recv()
661 ret = ib_post_recv(t->qp, &wr, NULL); in smb_direct_post_recv()
663 pr_err("Can't post recv: %d\n", ret); in smb_direct_post_recv()
664 ib_dma_unmap_single(t->cm_id->device, in smb_direct_post_recv()
667 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_recv()
673 static int smb_direct_read(struct ksmbd_transport *t, char *buf, in smb_direct_read() argument
681 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_read()
803 struct smb_direct_transport *t = container_of(work, in smb_direct_post_recv_credits() local
810 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
811 receive_credits = t->recv_credits; in smb_direct_post_recv_credits()
812 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
814 if (receive_credits < t->recv_credit_target) { in smb_direct_post_recv_credits()
817 recvmsg = get_free_recvmsg(t); in smb_direct_post_recv_credits()
819 recvmsg = get_empty_recvmsg(t); in smb_direct_post_recv_credits()
832 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_post_recv_credits()
834 pr_err("Can't post recv: %d\n", ret); in smb_direct_post_recv_credits()
835 put_recvmsg(t, recvmsg); in smb_direct_post_recv_credits()
842 spin_lock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
843 t->recv_credits += credits; in smb_direct_post_recv_credits()
844 t->count_avail_recvmsg -= credits; in smb_direct_post_recv_credits()
845 spin_unlock(&t->receive_credit_lock); in smb_direct_post_recv_credits()
847 spin_lock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
848 t->new_recv_credits += credits; in smb_direct_post_recv_credits()
849 spin_unlock(&t->lock_new_recv_credits); in smb_direct_post_recv_credits()
852 queue_work(smb_direct_wq, &t->send_immediate_work); in smb_direct_post_recv_credits()
858 struct smb_direct_transport *t; in send_done() local
862 t = sendmsg->transport; in send_done()
872 smb_direct_disconnect_rdma_connection(t); in send_done()
875 if (atomic_dec_and_test(&t->send_pending)) in send_done()
876 wake_up(&t->wait_send_pending); in send_done()
884 smb_direct_free_sendmsg(t, sibling); in send_done()
888 smb_direct_free_sendmsg(t, sibling); in send_done()
891 static int manage_credits_prior_sending(struct smb_direct_transport *t) in manage_credits_prior_sending() argument
895 spin_lock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
896 new_credits = t->new_recv_credits; in manage_credits_prior_sending()
897 t->new_recv_credits = 0; in manage_credits_prior_sending()
898 spin_unlock(&t->lock_new_recv_credits); in manage_credits_prior_sending()
903 static int smb_direct_post_send(struct smb_direct_transport *t, in smb_direct_post_send() argument
908 atomic_inc(&t->send_pending); in smb_direct_post_send()
909 ret = ib_post_send(t->qp, wr, NULL); in smb_direct_post_send()
912 if (atomic_dec_and_test(&t->send_pending)) in smb_direct_post_send()
913 wake_up(&t->wait_send_pending); in smb_direct_post_send()
914 smb_direct_disconnect_rdma_connection(t); in smb_direct_post_send()
919 static void smb_direct_send_ctx_init(struct smb_direct_transport *t, in smb_direct_send_ctx_init() argument
930 static int smb_direct_flush_send_list(struct smb_direct_transport *t, in smb_direct_flush_send_list() argument
954 ret = smb_direct_post_send(t, &first->wr); in smb_direct_flush_send_list()
956 smb_direct_send_ctx_init(t, send_ctx, in smb_direct_flush_send_list()
960 atomic_add(send_ctx->wr_cnt, &t->send_credits); in smb_direct_flush_send_list()
961 wake_up(&t->wait_send_credits); in smb_direct_flush_send_list()
964 smb_direct_free_sendmsg(t, first); in smb_direct_flush_send_list()
970 static int wait_for_credits(struct smb_direct_transport *t, in wait_for_credits() argument
983 t->status != SMB_DIRECT_CS_CONNECTED); in wait_for_credits()
985 if (t->status != SMB_DIRECT_CS_CONNECTED) in wait_for_credits()
992 static int wait_for_send_credits(struct smb_direct_transport *t, in wait_for_send_credits() argument
998 (send_ctx->wr_cnt >= 16 || atomic_read(&t->send_credits) <= 1)) { in wait_for_send_credits()
999 ret = smb_direct_flush_send_list(t, send_ctx, false); in wait_for_send_credits()
1004 return wait_for_credits(t, &t->wait_send_credits, &t->send_credits, 1); in wait_for_send_credits()
1007 static int wait_for_rw_credits(struct smb_direct_transport *t, int credits) in wait_for_rw_credits() argument
1009 return wait_for_credits(t, &t->wait_rw_credits, &t->rw_credits, credits); in wait_for_rw_credits()
1012 static int calc_rw_credits(struct smb_direct_transport *t, in calc_rw_credits() argument
1016 t->pages_per_rw_credit); in calc_rw_credits()
1019 static int smb_direct_create_header(struct smb_direct_transport *t, in smb_direct_create_header() argument
1028 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_create_header()
1034 packet->credits_requested = cpu_to_le16(t->send_credit_target); in smb_direct_create_header()
1035 packet->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_create_header()
1057 /* If this is a packet without payload, don't send padding */ in smb_direct_create_header()
1062 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_create_header()
1066 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_create_header()
1068 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_create_header()
1074 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_create_header()
1124 static int post_sendmsg(struct smb_direct_transport *t, in post_sendmsg() argument
1131 ib_dma_sync_single_for_device(t->cm_id->device, in post_sendmsg()
1159 return smb_direct_post_send(t, &msg->wr); in post_sendmsg()
1162 static int smb_direct_post_send_data(struct smb_direct_transport *t, in smb_direct_post_send_data() argument
1172 ret = wait_for_send_credits(t, send_ctx); in smb_direct_post_send_data()
1180 ret = smb_direct_create_header(t, data_length, remaining_data_length, in smb_direct_post_send_data()
1183 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1192 sg_cnt = get_mapped_sg_list(t->cm_id->device, in smb_direct_post_send_data()
1203 ib_dma_unmap_sg(t->cm_id->device, sg, sg_cnt, in smb_direct_post_send_data()
1212 sge->lkey = t->pd->local_dma_lkey; in smb_direct_post_send_data()
1217 ret = post_sendmsg(t, send_ctx, msg); in smb_direct_post_send_data()
1222 smb_direct_free_sendmsg(t, msg); in smb_direct_post_send_data()
1223 atomic_inc(&t->send_credits); in smb_direct_post_send_data()
1227 static int smb_direct_writev(struct ksmbd_transport *t, in smb_direct_writev() argument
1231 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_writev()
1306 * As an optimization, we don't wait for individual I/O to finish in smb_direct_writev()
1317 static void smb_direct_free_rdma_rw_msg(struct smb_direct_transport *t, in smb_direct_free_rdma_rw_msg() argument
1321 rdma_rw_ctx_destroy(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_free_rdma_rw_msg()
1332 struct smb_direct_transport *t = msg->t; in read_write_done() local
1339 smb_direct_disconnect_rdma_connection(t); in read_write_done()
1355 static int smb_direct_rdma_xmit(struct smb_direct_transport *t, in smb_direct_rdma_xmit() argument
1370 if (t->status != SMB_DIRECT_CS_CONNECTED) in smb_direct_rdma_xmit()
1373 if (buf_len > t->max_rdma_rw_size) in smb_direct_rdma_xmit()
1393 credits_needed += calc_rw_credits(t, desc_buf, desc_buf_len); in smb_direct_rdma_xmit()
1402 ret = wait_for_rw_credits(t, credits_needed); in smb_direct_rdma_xmit()
1418 msg->t = t; in smb_direct_rdma_xmit()
1440 ret = rdma_rw_ctx_init(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1461 first_wr = rdma_rw_ctx_wrs(&msg->rw_ctx, t->qp, t->qp->port, in smb_direct_rdma_xmit()
1465 ret = ib_post_send(t->qp, first_wr, NULL); in smb_direct_rdma_xmit()
1477 smb_direct_free_rdma_rw_msg(t, msg, in smb_direct_rdma_xmit()
1480 atomic_add(credits_needed, &t->rw_credits); in smb_direct_rdma_xmit()
1481 wake_up(&t->wait_rw_credits); in smb_direct_rdma_xmit()
1485 static int smb_direct_rdma_write(struct ksmbd_transport *t, in smb_direct_rdma_write() argument
1490 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_write()
1494 static int smb_direct_rdma_read(struct ksmbd_transport *t, in smb_direct_rdma_read() argument
1499 return smb_direct_rdma_xmit(smb_trans_direct_transfort(t), buf, buflen, in smb_direct_rdma_read()
1503 static void smb_direct_disconnect(struct ksmbd_transport *t) in smb_direct_disconnect() argument
1505 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_disconnect()
1515 static void smb_direct_shutdown(struct ksmbd_transport *t) in smb_direct_shutdown() argument
1517 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_shutdown()
1527 struct smb_direct_transport *t = cm_id->context; in smb_direct_cm_handler() local
1534 t->status = SMB_DIRECT_CS_CONNECTED; in smb_direct_cm_handler()
1535 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1540 ib_drain_qp(t->qp); in smb_direct_cm_handler()
1542 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1543 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1544 wake_up_interruptible(&t->wait_reassembly_queue); in smb_direct_cm_handler()
1545 wake_up(&t->wait_send_credits); in smb_direct_cm_handler()
1549 t->status = SMB_DIRECT_CS_DISCONNECTED; in smb_direct_cm_handler()
1550 wake_up_interruptible(&t->wait_status); in smb_direct_cm_handler()
1564 struct smb_direct_transport *t = context; in smb_direct_qpair_handler() local
1567 t->cm_id, ib_event_msg(event->event), event->event); in smb_direct_qpair_handler()
1572 smb_direct_disconnect_rdma_connection(t); in smb_direct_qpair_handler()
1579 static int smb_direct_send_negotiate_response(struct smb_direct_transport *t, in smb_direct_send_negotiate_response() argument
1586 sendmsg = smb_direct_alloc_sendmsg(t); in smb_direct_send_negotiate_response()
1603 cpu_to_le16(t->send_credit_target); in smb_direct_send_negotiate_response()
1604 resp->credits_granted = cpu_to_le16(manage_credits_prior_sending(t)); in smb_direct_send_negotiate_response()
1605 resp->max_readwrite_size = cpu_to_le32(t->max_rdma_rw_size); in smb_direct_send_negotiate_response()
1606 resp->preferred_send_size = cpu_to_le32(t->max_send_size); in smb_direct_send_negotiate_response()
1607 resp->max_receive_size = cpu_to_le32(t->max_recv_size); in smb_direct_send_negotiate_response()
1609 cpu_to_le32(t->max_fragmented_recv_size); in smb_direct_send_negotiate_response()
1612 sendmsg->sge[0].addr = ib_dma_map_single(t->cm_id->device, in smb_direct_send_negotiate_response()
1615 ret = ib_dma_mapping_error(t->cm_id->device, sendmsg->sge[0].addr); in smb_direct_send_negotiate_response()
1617 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1623 sendmsg->sge[0].lkey = t->pd->local_dma_lkey; in smb_direct_send_negotiate_response()
1625 ret = post_sendmsg(t, NULL, sendmsg); in smb_direct_send_negotiate_response()
1627 smb_direct_free_sendmsg(t, sendmsg); in smb_direct_send_negotiate_response()
1631 wait_event(t->wait_send_pending, in smb_direct_send_negotiate_response()
1632 atomic_read(&t->send_pending) == 0); in smb_direct_send_negotiate_response()
1636 static int smb_direct_accept_client(struct smb_direct_transport *t) in smb_direct_accept_client() argument
1644 conn_param.initiator_depth = min_t(u8, t->cm_id->device->attrs.max_qp_rd_atom, in smb_direct_accept_client()
1648 t->cm_id->device->ops.get_port_immutable(t->cm_id->device, in smb_direct_accept_client()
1649 t->cm_id->port_num, in smb_direct_accept_client()
1664 ret = rdma_accept(t->cm_id, &conn_param); in smb_direct_accept_client()
1672 static int smb_direct_prepare_negotiation(struct smb_direct_transport *t) in smb_direct_prepare_negotiation() argument
1677 recvmsg = get_free_recvmsg(t); in smb_direct_prepare_negotiation()
1682 ret = smb_direct_post_recv(t, recvmsg); in smb_direct_prepare_negotiation()
1684 pr_err("Can't post recv: %d\n", ret); in smb_direct_prepare_negotiation()
1688 t->negotiation_requested = false; in smb_direct_prepare_negotiation()
1689 ret = smb_direct_accept_client(t); in smb_direct_prepare_negotiation()
1691 pr_err("Can't accept client\n"); in smb_direct_prepare_negotiation()
1695 smb_direct_post_recv_credits(&t->post_recv_credits_work.work); in smb_direct_prepare_negotiation()
1698 put_recvmsg(t, recvmsg); in smb_direct_prepare_negotiation()
1702 static unsigned int smb_direct_get_max_fr_pages(struct smb_direct_transport *t) in smb_direct_get_max_fr_pages() argument
1705 t->cm_id->device->attrs.max_fast_reg_page_list_len, in smb_direct_get_max_fr_pages()
1709 static int smb_direct_init_params(struct smb_direct_transport *t, in smb_direct_init_params() argument
1712 struct ib_device *device = t->cm_id->device; in smb_direct_init_params()
1719 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1720 max_send_sges = DIV_ROUND_UP(t->max_send_size, PAGE_SIZE) + 3; in smb_direct_init_params()
1722 pr_err("max_send_size %d is too large\n", t->max_send_size); in smb_direct_init_params()
1733 t->max_rdma_rw_size = smb_direct_max_read_write_size; in smb_direct_init_params()
1734 t->pages_per_rw_credit = smb_direct_get_max_fr_pages(t); in smb_direct_init_params()
1735 t->max_rw_credits = DIV_ROUND_UP(t->max_rdma_rw_size, in smb_direct_init_params()
1736 (t->pages_per_rw_credit - 1) * in smb_direct_init_params()
1744 DIV_ROUND_UP(t->pages_per_rw_credit, in smb_direct_init_params()
1746 max_rw_wrs = t->max_rw_credits * wrs_per_credit; in smb_direct_init_params()
1773 t->recv_credits = 0; in smb_direct_init_params()
1774 t->count_avail_recvmsg = 0; in smb_direct_init_params()
1776 t->recv_credit_max = smb_direct_receive_credit_max; in smb_direct_init_params()
1777 t->recv_credit_target = 10; in smb_direct_init_params()
1778 t->new_recv_credits = 0; in smb_direct_init_params()
1780 t->send_credit_target = smb_direct_send_credit_target; in smb_direct_init_params()
1781 atomic_set(&t->send_credits, 0); in smb_direct_init_params()
1782 atomic_set(&t->rw_credits, t->max_rw_credits); in smb_direct_init_params()
1784 t->max_send_size = smb_direct_max_send_size; in smb_direct_init_params()
1785 t->max_recv_size = smb_direct_max_receive_size; in smb_direct_init_params()
1786 t->max_fragmented_recv_size = smb_direct_max_fragmented_recv_size; in smb_direct_init_params()
1789 cap->max_recv_wr = t->recv_credit_max; in smb_direct_init_params()
1793 cap->max_rdma_ctxs = t->max_rw_credits; in smb_direct_init_params()
1797 static void smb_direct_destroy_pools(struct smb_direct_transport *t) in smb_direct_destroy_pools() argument
1801 while ((recvmsg = get_free_recvmsg(t))) in smb_direct_destroy_pools()
1802 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1803 while ((recvmsg = get_empty_recvmsg(t))) in smb_direct_destroy_pools()
1804 mempool_free(recvmsg, t->recvmsg_mempool); in smb_direct_destroy_pools()
1806 mempool_destroy(t->recvmsg_mempool); in smb_direct_destroy_pools()
1807 t->recvmsg_mempool = NULL; in smb_direct_destroy_pools()
1809 kmem_cache_destroy(t->recvmsg_cache); in smb_direct_destroy_pools()
1810 t->recvmsg_cache = NULL; in smb_direct_destroy_pools()
1812 mempool_destroy(t->sendmsg_mempool); in smb_direct_destroy_pools()
1813 t->sendmsg_mempool = NULL; in smb_direct_destroy_pools()
1815 kmem_cache_destroy(t->sendmsg_cache); in smb_direct_destroy_pools()
1816 t->sendmsg_cache = NULL; in smb_direct_destroy_pools()
1819 static int smb_direct_create_pools(struct smb_direct_transport *t) in smb_direct_create_pools() argument
1825 snprintf(name, sizeof(name), "smb_direct_rqst_pool_%p", t); in smb_direct_create_pools()
1826 t->sendmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1830 if (!t->sendmsg_cache) in smb_direct_create_pools()
1833 t->sendmsg_mempool = mempool_create(t->send_credit_target, in smb_direct_create_pools()
1835 t->sendmsg_cache); in smb_direct_create_pools()
1836 if (!t->sendmsg_mempool) in smb_direct_create_pools()
1839 snprintf(name, sizeof(name), "smb_direct_resp_%p", t); in smb_direct_create_pools()
1840 t->recvmsg_cache = kmem_cache_create(name, in smb_direct_create_pools()
1842 t->max_recv_size, in smb_direct_create_pools()
1844 if (!t->recvmsg_cache) in smb_direct_create_pools()
1847 t->recvmsg_mempool = in smb_direct_create_pools()
1848 mempool_create(t->recv_credit_max, mempool_alloc_slab, in smb_direct_create_pools()
1849 mempool_free_slab, t->recvmsg_cache); in smb_direct_create_pools()
1850 if (!t->recvmsg_mempool) in smb_direct_create_pools()
1853 INIT_LIST_HEAD(&t->recvmsg_queue); in smb_direct_create_pools()
1855 for (i = 0; i < t->recv_credit_max; i++) { in smb_direct_create_pools()
1856 recvmsg = mempool_alloc(t->recvmsg_mempool, KSMBD_DEFAULT_GFP); in smb_direct_create_pools()
1859 recvmsg->transport = t; in smb_direct_create_pools()
1860 list_add(&recvmsg->list, &t->recvmsg_queue); in smb_direct_create_pools()
1862 t->count_avail_recvmsg = t->recv_credit_max; in smb_direct_create_pools()
1866 smb_direct_destroy_pools(t); in smb_direct_create_pools()
1870 static int smb_direct_create_qpair(struct smb_direct_transport *t, in smb_direct_create_qpair() argument
1877 t->pd = ib_alloc_pd(t->cm_id->device, 0); in smb_direct_create_qpair()
1878 if (IS_ERR(t->pd)) { in smb_direct_create_qpair()
1879 pr_err("Can't create RDMA PD\n"); in smb_direct_create_qpair()
1880 ret = PTR_ERR(t->pd); in smb_direct_create_qpair()
1881 t->pd = NULL; in smb_direct_create_qpair()
1885 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1888 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair()
1889 pr_err("Can't create RDMA send CQ\n"); in smb_direct_create_qpair()
1890 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair()
1891 t->send_cq = NULL; in smb_direct_create_qpair()
1895 t->recv_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair()
1896 t->recv_credit_max, 0, IB_POLL_WORKQUEUE); in smb_direct_create_qpair()
1897 if (IS_ERR(t->recv_cq)) { in smb_direct_create_qpair()
1898 pr_err("Can't create RDMA recv CQ\n"); in smb_direct_create_qpair()
1899 ret = PTR_ERR(t->recv_cq); in smb_direct_create_qpair()
1900 t->recv_cq = NULL; in smb_direct_create_qpair()
1906 qp_attr.qp_context = t; in smb_direct_create_qpair()
1910 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair()
1911 qp_attr.recv_cq = t->recv_cq; in smb_direct_create_qpair()
1914 ret = rdma_create_qp(t->cm_id, t->pd, &qp_attr); in smb_direct_create_qpair()
1916 pr_err("Can't create RDMA QP: %d\n", ret); in smb_direct_create_qpair()
1920 t->qp = t->cm_id->qp; in smb_direct_create_qpair()
1921 t->cm_id->event_handler = smb_direct_cm_handler; in smb_direct_create_qpair()
1923 pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1; in smb_direct_create_qpair()
1924 if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) { in smb_direct_create_qpair()
1925 ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, in smb_direct_create_qpair()
1926 t->max_rw_credits, IB_MR_TYPE_MEM_REG, in smb_direct_create_qpair()
1927 t->pages_per_rw_credit, 0); in smb_direct_create_qpair()
1930 t->max_rw_credits, t->pages_per_rw_credit); in smb_direct_create_qpair()
1937 if (t->qp) { in smb_direct_create_qpair()
1938 ib_destroy_qp(t->qp); in smb_direct_create_qpair()
1939 t->qp = NULL; in smb_direct_create_qpair()
1941 if (t->recv_cq) { in smb_direct_create_qpair()
1942 ib_destroy_cq(t->recv_cq); in smb_direct_create_qpair()
1943 t->recv_cq = NULL; in smb_direct_create_qpair()
1945 if (t->send_cq) { in smb_direct_create_qpair()
1946 ib_destroy_cq(t->send_cq); in smb_direct_create_qpair()
1947 t->send_cq = NULL; in smb_direct_create_qpair()
1949 if (t->pd) { in smb_direct_create_qpair()
1950 ib_dealloc_pd(t->pd); in smb_direct_create_qpair()
1951 t->pd = NULL; in smb_direct_create_qpair()
1956 static int smb_direct_prepare(struct ksmbd_transport *t) in smb_direct_prepare() argument
1958 struct smb_direct_transport *st = smb_trans_direct_transfort(t); in smb_direct_prepare()
2007 pr_err("Can't configure RDMA parameters\n"); in smb_direct_connect()
2013 pr_err("Can't init RDMA pool: %d\n", ret); in smb_direct_connect()
2019 pr_err("Can't accept RDMA client: %d\n", ret); in smb_direct_connect()
2025 pr_err("Can't negotiate: %d\n", ret); in smb_direct_connect()
2042 struct smb_direct_transport *t; in smb_direct_handle_connect_request() local
2053 t = alloc_transport(new_cm_id); in smb_direct_handle_connect_request()
2054 if (!t) in smb_direct_handle_connect_request()
2057 ret = smb_direct_connect(t); in smb_direct_handle_connect_request()
2062 KSMBD_TRANS(t)->conn, "ksmbd:r%u", in smb_direct_handle_connect_request()
2066 pr_err("Can't start thread\n"); in smb_direct_handle_connect_request()
2072 free_transport(t); in smb_direct_handle_connect_request()
2084 pr_err("Can't create transport: %d\n", ret); in smb_direct_listen_handler()
2113 pr_err("Can't create cm id: %ld\n", PTR_ERR(cm_id)); in smb_direct_listen()
2119 pr_err("Can't bind: %d\n", ret); in smb_direct_listen()
2127 pr_err("Can't listen: %d\n", ret); in smb_direct_listen()
2209 pr_err("Can't listen: %d\n", ret); in ksmbd_rdma_init()