Lines Matching +full:no +full:- +full:memory +full:- +full:wc
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
14 #include <linux/blk-mq.h>
15 #include <linux/blk-mq-rdma.h>
25 #include <linux/nvme-rdma.h>
143 * allows read and write access to all physical memory.
148 "Use memory registration even for contiguous memory regions");
152 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
160 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
166 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
167 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
172 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size()
178 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); in nvme_rdma_free_qe()
179 kfree(qe->data); in nvme_rdma_free_qe()
185 qe->data = kzalloc(capsule_size, GFP_KERNEL); in nvme_rdma_alloc_qe()
186 if (!qe->data) in nvme_rdma_alloc_qe()
187 return -ENOMEM; in nvme_rdma_alloc_qe()
189 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); in nvme_rdma_alloc_qe()
190 if (ib_dma_mapping_error(ibdev, qe->dma)) { in nvme_rdma_alloc_qe()
191 kfree(qe->data); in nvme_rdma_alloc_qe()
192 qe->data = NULL; in nvme_rdma_alloc_qe()
193 return -ENOMEM; in nvme_rdma_alloc_qe()
224 * will issue error recovery and queue re-creation. in nvme_rdma_alloc_ring()
241 ib_event_msg(event->event), event->event); in nvme_rdma_qp_event()
249 ret = wait_for_completion_interruptible_timeout(&queue->cm_done, in nvme_rdma_wait_for_cm()
254 return -ETIMEDOUT; in nvme_rdma_wait_for_cm()
255 WARN_ON_ONCE(queue->cm_error > 0); in nvme_rdma_wait_for_cm()
256 return queue->cm_error; in nvme_rdma_wait_for_cm()
261 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_create_qp()
268 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp()
270 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp()
272 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; in nvme_rdma_create_qp()
275 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp()
276 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp()
277 if (queue->pi_support) in nvme_rdma_create_qp()
281 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
283 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
292 kfree(req->sqe.data); in nvme_rdma_exit_request()
299 struct nvme_rdma_ctrl *ctrl = set->driver_data; in nvme_rdma_init_request()
301 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request()
302 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request()
304 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_rdma_init_request()
305 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request()
306 if (!req->sqe.data) in nvme_rdma_init_request()
307 return -ENOMEM; in nvme_rdma_init_request()
310 if (queue->pi_support) in nvme_rdma_init_request()
311 req->metadata_sgl = (void *)nvme_req(rq) + in nvme_rdma_init_request()
315 req->queue = queue; in nvme_rdma_init_request()
324 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx()
326 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx()
328 hctx->driver_data = queue; in nvme_rdma_init_hctx()
336 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx()
340 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
350 list_del(&ndev->entry); in nvme_rdma_free_dev()
353 ib_dealloc_pd(ndev->pd); in nvme_rdma_free_dev()
359 kref_put(&dev->ref, nvme_rdma_free_dev); in nvme_rdma_dev_put()
364 return kref_get_unless_zero(&dev->ref); in nvme_rdma_dev_get()
374 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device()
383 ndev->dev = cm_id->device; in nvme_rdma_find_get_device()
384 kref_init(&ndev->ref); in nvme_rdma_find_get_device()
386 ndev->pd = ib_alloc_pd(ndev->dev, in nvme_rdma_find_get_device()
388 if (IS_ERR(ndev->pd)) in nvme_rdma_find_get_device()
391 if (!(ndev->dev->attrs.device_cap_flags & in nvme_rdma_find_get_device()
393 dev_err(&ndev->dev->dev, in nvme_rdma_find_get_device()
394 "Memory registrations not supported.\n"); in nvme_rdma_find_get_device()
398 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, in nvme_rdma_find_get_device()
399 ndev->dev->attrs.max_send_sge - 1); in nvme_rdma_find_get_device()
400 list_add(&ndev->entry, &device_list); in nvme_rdma_find_get_device()
406 ib_dealloc_pd(ndev->pd); in nvme_rdma_find_get_device()
417 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq()
419 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
427 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) in nvme_rdma_destroy_queue_ib()
430 dev = queue->device; in nvme_rdma_destroy_queue_ib()
431 ibdev = dev->dev; in nvme_rdma_destroy_queue_ib()
433 if (queue->pi_support) in nvme_rdma_destroy_queue_ib()
434 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_destroy_queue_ib()
435 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_destroy_queue_ib()
442 ib_destroy_qp(queue->qp); in nvme_rdma_destroy_queue_ib()
445 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib()
456 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; in nvme_rdma_get_max_fr_pages()
458 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; in nvme_rdma_get_max_fr_pages()
460 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); in nvme_rdma_get_max_fr_pages()
473 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; in nvme_rdma_create_cq()
478 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
482 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
486 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq()
487 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq()
501 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
502 if (!queue->device) { in nvme_rdma_create_queue_ib()
503 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
504 "no client data found!\n"); in nvme_rdma_create_queue_ib()
505 return -ECONNREFUSED; in nvme_rdma_create_queue_ib()
507 ibdev = queue->device->dev; in nvme_rdma_create_queue_ib()
510 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
520 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib()
522 if (!queue->rsp_ring) { in nvme_rdma_create_queue_ib()
523 ret = -ENOMEM; in nvme_rdma_create_queue_ib()
532 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; in nvme_rdma_create_queue_ib()
533 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, in nvme_rdma_create_queue_ib()
534 queue->queue_size, in nvme_rdma_create_queue_ib()
538 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
540 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
544 if (queue->pi_support) { in nvme_rdma_create_queue_ib()
545 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, in nvme_rdma_create_queue_ib()
546 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib()
549 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
551 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
556 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); in nvme_rdma_create_queue_ib()
561 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_create_queue_ib()
563 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_create_queue_ib()
566 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
570 nvme_rdma_dev_put(queue->device); in nvme_rdma_create_queue_ib()
581 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
582 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
583 if (idx && ctrl->ctrl.max_integrity_segments) in nvme_rdma_alloc_queue()
584 queue->pi_support = true; in nvme_rdma_alloc_queue()
586 queue->pi_support = false; in nvme_rdma_alloc_queue()
587 init_completion(&queue->cm_done); in nvme_rdma_alloc_queue()
590 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
592 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_rdma_alloc_queue()
594 queue->queue_size = queue_size; in nvme_rdma_alloc_queue()
596 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_alloc_queue()
598 if (IS_ERR(queue->cm_id)) { in nvme_rdma_alloc_queue()
599 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
600 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); in nvme_rdma_alloc_queue()
601 return PTR_ERR(queue->cm_id); in nvme_rdma_alloc_queue()
604 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) in nvme_rdma_alloc_queue()
605 src_addr = (struct sockaddr *)&ctrl->src_addr; in nvme_rdma_alloc_queue()
607 queue->cm_error = -ETIMEDOUT; in nvme_rdma_alloc_queue()
608 ret = rdma_resolve_addr(queue->cm_id, src_addr, in nvme_rdma_alloc_queue()
609 (struct sockaddr *)&ctrl->addr, in nvme_rdma_alloc_queue()
612 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
619 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
624 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); in nvme_rdma_alloc_queue()
629 rdma_destroy_id(queue->cm_id); in nvme_rdma_alloc_queue()
636 rdma_disconnect(queue->cm_id); in __nvme_rdma_stop_queue()
637 ib_drain_qp(queue->qp); in __nvme_rdma_stop_queue()
642 if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) in nvme_rdma_stop_queue()
649 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_free_queue()
653 rdma_destroy_id(queue->cm_id); in nvme_rdma_free_queue()
660 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_free_io_queues()
661 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_free_io_queues()
668 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_stop_io_queues()
669 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_stop_io_queues()
674 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue()
679 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll); in nvme_rdma_start_queue()
681 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_rdma_start_queue()
684 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_start_queue()
686 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_start_queue()
688 dev_info(ctrl->ctrl.device, in nvme_rdma_start_queue()
698 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_start_io_queues()
707 for (i--; i >= 1; i--) in nvme_rdma_start_io_queues()
708 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_start_io_queues()
714 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_alloc_io_queues()
715 struct ib_device *ibdev = ctrl->device->dev; in nvme_rdma_alloc_io_queues()
720 nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors, in nvme_rdma_alloc_io_queues()
721 min(opts->nr_io_queues, num_online_cpus())); in nvme_rdma_alloc_io_queues()
722 nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors, in nvme_rdma_alloc_io_queues()
723 min(opts->nr_write_queues, num_online_cpus())); in nvme_rdma_alloc_io_queues()
724 nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus()); in nvme_rdma_alloc_io_queues()
727 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_rdma_alloc_io_queues()
731 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_rdma_alloc_io_queues()
732 if (ctrl->ctrl.queue_count < 2) in nvme_rdma_alloc_io_queues()
735 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
738 if (opts->nr_write_queues && nr_read_queues < nr_io_queues) { in nvme_rdma_alloc_io_queues()
744 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_rdma_alloc_io_queues()
745 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_alloc_io_queues()
746 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_rdma_alloc_io_queues()
748 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_alloc_io_queues()
752 * either no write queues were requested, or we don't have in nvme_rdma_alloc_io_queues()
755 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_rdma_alloc_io_queues()
757 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_alloc_io_queues()
760 if (opts->nr_poll_queues && nr_io_queues) { in nvme_rdma_alloc_io_queues()
762 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_rdma_alloc_io_queues()
766 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_alloc_io_queues()
768 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues()
776 for (i--; i >= 1; i--) in nvme_rdma_alloc_io_queues()
777 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_alloc_io_queues()
790 set = &ctrl->admin_tag_set; in nvme_rdma_alloc_tagset()
792 set->ops = &nvme_rdma_admin_mq_ops; in nvme_rdma_alloc_tagset()
793 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_rdma_alloc_tagset()
794 set->reserved_tags = 2; /* connect + keep-alive */ in nvme_rdma_alloc_tagset()
795 set->numa_node = nctrl->numa_node; in nvme_rdma_alloc_tagset()
796 set->cmd_size = sizeof(struct nvme_rdma_request) + in nvme_rdma_alloc_tagset()
798 set->driver_data = ctrl; in nvme_rdma_alloc_tagset()
799 set->nr_hw_queues = 1; in nvme_rdma_alloc_tagset()
800 set->timeout = ADMIN_TIMEOUT; in nvme_rdma_alloc_tagset()
801 set->flags = BLK_MQ_F_NO_SCHED; in nvme_rdma_alloc_tagset()
803 set = &ctrl->tag_set; in nvme_rdma_alloc_tagset()
805 set->ops = &nvme_rdma_mq_ops; in nvme_rdma_alloc_tagset()
806 set->queue_depth = nctrl->sqsize + 1; in nvme_rdma_alloc_tagset()
807 set->reserved_tags = 1; /* fabric connect */ in nvme_rdma_alloc_tagset()
808 set->numa_node = nctrl->numa_node; in nvme_rdma_alloc_tagset()
809 set->flags = BLK_MQ_F_SHOULD_MERGE; in nvme_rdma_alloc_tagset()
810 set->cmd_size = sizeof(struct nvme_rdma_request) + in nvme_rdma_alloc_tagset()
812 if (nctrl->max_integrity_segments) in nvme_rdma_alloc_tagset()
813 set->cmd_size += sizeof(struct nvme_rdma_sgl) + in nvme_rdma_alloc_tagset()
815 set->driver_data = ctrl; in nvme_rdma_alloc_tagset()
816 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_rdma_alloc_tagset()
817 set->timeout = NVME_IO_TIMEOUT; in nvme_rdma_alloc_tagset()
818 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_rdma_alloc_tagset()
832 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_rdma_destroy_admin_queue()
833 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_rdma_destroy_admin_queue()
834 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); in nvme_rdma_destroy_admin_queue()
836 if (ctrl->async_event_sqe.data) { in nvme_rdma_destroy_admin_queue()
837 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_rdma_destroy_admin_queue()
838 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_destroy_admin_queue()
840 ctrl->async_event_sqe.data = NULL; in nvme_rdma_destroy_admin_queue()
842 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_destroy_admin_queue()
855 ctrl->device = ctrl->queues[0].device; in nvme_rdma_configure_admin_queue()
856 ctrl->ctrl.numa_node = dev_to_node(ctrl->device->dev->dma_device); in nvme_rdma_configure_admin_queue()
858 /* T10-PI support */ in nvme_rdma_configure_admin_queue()
859 if (ctrl->device->dev->attrs.device_cap_flags & in nvme_rdma_configure_admin_queue()
863 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, in nvme_rdma_configure_admin_queue()
869 * error recovery and queue re-creation. in nvme_rdma_configure_admin_queue()
871 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
877 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); in nvme_rdma_configure_admin_queue()
878 if (IS_ERR(ctrl->ctrl.admin_tagset)) { in nvme_rdma_configure_admin_queue()
879 error = PTR_ERR(ctrl->ctrl.admin_tagset); in nvme_rdma_configure_admin_queue()
883 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_rdma_configure_admin_queue()
884 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_rdma_configure_admin_queue()
885 error = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_rdma_configure_admin_queue()
889 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_rdma_configure_admin_queue()
890 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_rdma_configure_admin_queue()
891 error = PTR_ERR(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
900 error = nvme_enable_ctrl(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
904 ctrl->ctrl.max_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
905 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
907 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
909 ctrl->ctrl.max_integrity_segments = 0; in nvme_rdma_configure_admin_queue()
911 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
913 error = nvme_init_identify(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
920 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
923 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
926 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_rdma_configure_admin_queue()
929 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); in nvme_rdma_configure_admin_queue()
931 if (ctrl->async_event_sqe.data) { in nvme_rdma_configure_admin_queue()
932 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
934 ctrl->async_event_sqe.data = NULL; in nvme_rdma_configure_admin_queue()
937 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
945 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_rdma_destroy_io_queues()
946 blk_mq_free_tag_set(ctrl->ctrl.tagset); in nvme_rdma_destroy_io_queues()
960 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); in nvme_rdma_configure_io_queues()
961 if (IS_ERR(ctrl->ctrl.tagset)) { in nvme_rdma_configure_io_queues()
962 ret = PTR_ERR(ctrl->ctrl.tagset); in nvme_rdma_configure_io_queues()
966 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_rdma_configure_io_queues()
967 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_rdma_configure_io_queues()
968 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_rdma_configure_io_queues()
978 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
979 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { in nvme_rdma_configure_io_queues()
985 ret = -ENODEV; in nvme_rdma_configure_io_queues()
988 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, in nvme_rdma_configure_io_queues()
989 ctrl->ctrl.queue_count - 1); in nvme_rdma_configure_io_queues()
990 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
996 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1000 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_rdma_configure_io_queues()
1003 blk_mq_free_tag_set(ctrl->ctrl.tagset); in nvme_rdma_configure_io_queues()
1012 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1013 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1014 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_teardown_admin_queue()
1015 if (ctrl->ctrl.admin_tagset) { in nvme_rdma_teardown_admin_queue()
1016 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, in nvme_rdma_teardown_admin_queue()
1017 nvme_cancel_request, &ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
1018 blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset); in nvme_rdma_teardown_admin_queue()
1021 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1028 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_teardown_io_queues()
1029 nvme_start_freeze(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1030 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1031 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1033 if (ctrl->ctrl.tagset) { in nvme_rdma_teardown_io_queues()
1034 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, in nvme_rdma_teardown_io_queues()
1035 nvme_cancel_request, &ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1036 blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset); in nvme_rdma_teardown_io_queues()
1039 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1048 if (list_empty(&ctrl->list)) in nvme_rdma_free_ctrl()
1052 list_del(&ctrl->list); in nvme_rdma_free_ctrl()
1055 nvmf_free_options(nctrl->opts); in nvme_rdma_free_ctrl()
1057 kfree(ctrl->queues); in nvme_rdma_free_ctrl()
1064 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { in nvme_rdma_reconnect_or_remove()
1065 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || in nvme_rdma_reconnect_or_remove()
1066 ctrl->ctrl.state == NVME_CTRL_LIVE); in nvme_rdma_reconnect_or_remove()
1070 if (nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_rdma_reconnect_or_remove()
1071 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", in nvme_rdma_reconnect_or_remove()
1072 ctrl->ctrl.opts->reconnect_delay); in nvme_rdma_reconnect_or_remove()
1073 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, in nvme_rdma_reconnect_or_remove()
1074 ctrl->ctrl.opts->reconnect_delay * HZ); in nvme_rdma_reconnect_or_remove()
1076 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
1082 int ret = -EINVAL; in nvme_rdma_setup_ctrl()
1089 if (ctrl->ctrl.icdoff) { in nvme_rdma_setup_ctrl()
1090 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); in nvme_rdma_setup_ctrl()
1094 if (!(ctrl->ctrl.sgls & (1 << 2))) { in nvme_rdma_setup_ctrl()
1095 dev_err(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1100 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl()
1101 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1103 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl()
1106 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl()
1107 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1109 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl()
1110 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl()
1113 if (ctrl->ctrl.sgls & (1 << 20)) in nvme_rdma_setup_ctrl()
1114 ctrl->use_inline_data = true; in nvme_rdma_setup_ctrl()
1116 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1122 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_rdma_setup_ctrl()
1129 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && in nvme_rdma_setup_ctrl()
1130 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); in nvme_rdma_setup_ctrl()
1132 ret = -EINVAL; in nvme_rdma_setup_ctrl()
1136 nvme_start_ctrl(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1140 if (ctrl->ctrl.queue_count > 1) in nvme_rdma_setup_ctrl()
1143 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_setup_ctrl()
1153 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reconnect_ctrl_work()
1158 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", in nvme_rdma_reconnect_ctrl_work()
1159 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1161 ctrl->ctrl.nr_reconnects = 0; in nvme_rdma_reconnect_ctrl_work()
1166 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", in nvme_rdma_reconnect_ctrl_work()
1167 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1176 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1178 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1180 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_error_recovery_work()
1182 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_error_recovery_work()
1184 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && in nvme_rdma_error_recovery_work()
1185 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); in nvme_rdma_error_recovery_work()
1194 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) in nvme_rdma_error_recovery()
1197 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); in nvme_rdma_error_recovery()
1198 queue_work(nvme_reset_wq, &ctrl->err_work); in nvme_rdma_error_recovery()
1205 if (!refcount_dec_and_test(&req->ref)) in nvme_rdma_end_request()
1207 if (!nvme_try_complete_req(rq, req->status, req->result)) in nvme_rdma_end_request()
1211 static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc, in nvme_rdma_wr_error() argument
1214 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_wr_error()
1215 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error()
1217 if (ctrl->ctrl.state == NVME_CTRL_LIVE) in nvme_rdma_wr_error()
1218 dev_info(ctrl->ctrl.device, in nvme_rdma_wr_error()
1220 op, wc->wr_cqe, in nvme_rdma_wr_error()
1221 ib_wc_status_msg(wc->status), wc->status); in nvme_rdma_wr_error()
1225 static void nvme_rdma_memreg_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_memreg_done() argument
1227 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_memreg_done()
1228 nvme_rdma_wr_error(cq, wc, "MEMREG"); in nvme_rdma_memreg_done()
1231 static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_inv_rkey_done() argument
1234 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done()
1236 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_inv_rkey_done()
1237 nvme_rdma_wr_error(cq, wc, "LOCAL_INV"); in nvme_rdma_inv_rkey_done()
1250 .ex.invalidate_rkey = req->mr->rkey, in nvme_rdma_inv_rkey()
1253 req->reg_cqe.done = nvme_rdma_inv_rkey_done; in nvme_rdma_inv_rkey()
1254 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey()
1256 return ib_post_send(queue->qp, &wr, NULL); in nvme_rdma_inv_rkey()
1263 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_unmap_data()
1264 struct ib_device *ibdev = dev->dev; in nvme_rdma_unmap_data()
1265 struct list_head *pool = &queue->qp->rdma_mrs; in nvme_rdma_unmap_data()
1271 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, in nvme_rdma_unmap_data()
1272 req->metadata_sgl->nents, rq_dma_dir(rq)); in nvme_rdma_unmap_data()
1273 sg_free_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_unmap_data()
1277 if (req->use_sig_mr) in nvme_rdma_unmap_data()
1278 pool = &queue->qp->sig_mrs; in nvme_rdma_unmap_data()
1280 if (req->mr) { in nvme_rdma_unmap_data()
1281 ib_mr_pool_put(queue->qp, pool, req->mr); in nvme_rdma_unmap_data()
1282 req->mr = NULL; in nvme_rdma_unmap_data()
1285 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, in nvme_rdma_unmap_data()
1287 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); in nvme_rdma_unmap_data()
1292 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_set_sg_null()
1294 sg->addr = 0; in nvme_rdma_set_sg_null()
1295 put_unaligned_le24(0, sg->length); in nvme_rdma_set_sg_null()
1296 put_unaligned_le32(0, sg->key); in nvme_rdma_set_sg_null()
1297 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_set_sg_null()
1305 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_rdma_map_sg_inline()
1306 struct scatterlist *sgl = req->data_sgl.sg_table.sgl; in nvme_rdma_map_sg_inline()
1307 struct ib_sge *sge = &req->sge[1]; in nvme_rdma_map_sg_inline()
1312 sge->addr = sg_dma_address(sgl); in nvme_rdma_map_sg_inline()
1313 sge->length = sg_dma_len(sgl); in nvme_rdma_map_sg_inline()
1314 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_map_sg_inline()
1315 len += sge->length; in nvme_rdma_map_sg_inline()
1318 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1319 sg->length = cpu_to_le32(len); in nvme_rdma_map_sg_inline()
1320 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_rdma_map_sg_inline()
1322 req->num_sge += count; in nvme_rdma_map_sg_inline()
1329 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_single()
1331 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); in nvme_rdma_map_sg_single()
1332 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); in nvme_rdma_map_sg_single()
1333 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); in nvme_rdma_map_sg_single()
1334 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_map_sg_single()
1342 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_fr()
1345 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_map_sg_fr()
1346 if (WARN_ON_ONCE(!req->mr)) in nvme_rdma_map_sg_fr()
1347 return -EAGAIN; in nvme_rdma_map_sg_fr()
1353 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, in nvme_rdma_map_sg_fr()
1356 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); in nvme_rdma_map_sg_fr()
1357 req->mr = NULL; in nvme_rdma_map_sg_fr()
1360 return -EINVAL; in nvme_rdma_map_sg_fr()
1363 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in nvme_rdma_map_sg_fr()
1365 req->reg_cqe.done = nvme_rdma_memreg_done; in nvme_rdma_map_sg_fr()
1366 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); in nvme_rdma_map_sg_fr()
1367 req->reg_wr.wr.opcode = IB_WR_REG_MR; in nvme_rdma_map_sg_fr()
1368 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr()
1369 req->reg_wr.wr.num_sge = 0; in nvme_rdma_map_sg_fr()
1370 req->reg_wr.mr = req->mr; in nvme_rdma_map_sg_fr()
1371 req->reg_wr.key = req->mr->rkey; in nvme_rdma_map_sg_fr()
1372 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | in nvme_rdma_map_sg_fr()
1376 sg->addr = cpu_to_le64(req->mr->iova); in nvme_rdma_map_sg_fr()
1377 put_unaligned_le24(req->mr->length, sg->length); in nvme_rdma_map_sg_fr()
1378 put_unaligned_le32(req->mr->rkey, sg->key); in nvme_rdma_map_sg_fr()
1379 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | in nvme_rdma_map_sg_fr()
1389 domain->sig_type = IB_SIG_TYPE_T10_DIF; in nvme_rdma_set_sig_domain()
1390 domain->sig.dif.bg_type = IB_T10DIF_CRC; in nvme_rdma_set_sig_domain()
1391 domain->sig.dif.pi_interval = 1 << bi->interval_exp; in nvme_rdma_set_sig_domain()
1392 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); in nvme_rdma_set_sig_domain()
1394 domain->sig.dif.ref_remap = true; in nvme_rdma_set_sig_domain()
1396 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); in nvme_rdma_set_sig_domain()
1397 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); in nvme_rdma_set_sig_domain()
1398 domain->sig.dif.app_escape = true; in nvme_rdma_set_sig_domain()
1400 domain->sig.dif.ref_escape = true; in nvme_rdma_set_sig_domain()
1407 u16 control = le16_to_cpu(cmd->rw.control); in nvme_rdma_set_sig_attrs()
1411 /* for WRITE_INSERT/READ_STRIP no memory domain */ in nvme_rdma_set_sig_attrs()
1412 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; in nvme_rdma_set_sig_attrs()
1413 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvme_rdma_set_sig_attrs()
1417 cmd->rw.control = cpu_to_le16(control); in nvme_rdma_set_sig_attrs()
1419 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ in nvme_rdma_set_sig_attrs()
1420 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvme_rdma_set_sig_attrs()
1422 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, in nvme_rdma_set_sig_attrs()
1430 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) in nvme_rdma_set_prot_checks()
1432 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) in nvme_rdma_set_prot_checks()
1436 static void nvme_rdma_sig_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_sig_done() argument
1438 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_sig_done()
1439 nvme_rdma_wr_error(cq, wc, "SIG"); in nvme_rdma_sig_done()
1446 struct nvme_rdma_sgl *sgl = &req->data_sgl; in nvme_rdma_map_sg_pi()
1447 struct ib_reg_wr *wr = &req->reg_wr; in nvme_rdma_map_sg_pi()
1449 struct nvme_ns *ns = rq->q->queuedata; in nvme_rdma_map_sg_pi()
1450 struct bio *bio = rq->bio; in nvme_rdma_map_sg_pi()
1451 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_pi()
1454 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_map_sg_pi()
1455 if (WARN_ON_ONCE(!req->mr)) in nvme_rdma_map_sg_pi()
1456 return -EAGAIN; in nvme_rdma_map_sg_pi()
1458 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, in nvme_rdma_map_sg_pi()
1459 req->metadata_sgl->sg_table.sgl, pi_count, NULL, in nvme_rdma_map_sg_pi()
1464 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_disk), c, in nvme_rdma_map_sg_pi()
1465 req->mr->sig_attrs, ns->pi_type); in nvme_rdma_map_sg_pi()
1466 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); in nvme_rdma_map_sg_pi()
1468 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in nvme_rdma_map_sg_pi()
1470 req->reg_cqe.done = nvme_rdma_sig_done; in nvme_rdma_map_sg_pi()
1472 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in nvme_rdma_map_sg_pi()
1473 wr->wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_pi()
1474 wr->wr.num_sge = 0; in nvme_rdma_map_sg_pi()
1475 wr->wr.send_flags = 0; in nvme_rdma_map_sg_pi()
1476 wr->mr = req->mr; in nvme_rdma_map_sg_pi()
1477 wr->key = req->mr->rkey; in nvme_rdma_map_sg_pi()
1478 wr->access = IB_ACCESS_LOCAL_WRITE | in nvme_rdma_map_sg_pi()
1482 sg->addr = cpu_to_le64(req->mr->iova); in nvme_rdma_map_sg_pi()
1483 put_unaligned_le24(req->mr->length, sg->length); in nvme_rdma_map_sg_pi()
1484 put_unaligned_le32(req->mr->rkey, sg->key); in nvme_rdma_map_sg_pi()
1485 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_map_sg_pi()
1490 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); in nvme_rdma_map_sg_pi()
1491 req->mr = NULL; in nvme_rdma_map_sg_pi()
1494 return -EINVAL; in nvme_rdma_map_sg_pi()
1501 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_map_data()
1502 struct ib_device *ibdev = dev->dev; in nvme_rdma_map_data()
1506 req->num_sge = 1; in nvme_rdma_map_data()
1507 refcount_set(&req->ref, 2); /* send and recv completions */ in nvme_rdma_map_data()
1509 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_rdma_map_data()
1514 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); in nvme_rdma_map_data()
1515 ret = sg_alloc_table_chained(&req->data_sgl.sg_table, in nvme_rdma_map_data()
1516 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, in nvme_rdma_map_data()
1519 return -ENOMEM; in nvme_rdma_map_data()
1521 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, in nvme_rdma_map_data()
1522 req->data_sgl.sg_table.sgl); in nvme_rdma_map_data()
1524 count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, in nvme_rdma_map_data()
1525 req->data_sgl.nents, rq_dma_dir(rq)); in nvme_rdma_map_data()
1527 ret = -EIO; in nvme_rdma_map_data()
1532 req->metadata_sgl->sg_table.sgl = in nvme_rdma_map_data()
1533 (struct scatterlist *)(req->metadata_sgl + 1); in nvme_rdma_map_data()
1534 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_map_data()
1535 blk_rq_count_integrity_sg(rq->q, rq->bio), in nvme_rdma_map_data()
1536 req->metadata_sgl->sg_table.sgl, in nvme_rdma_map_data()
1539 ret = -ENOMEM; in nvme_rdma_map_data()
1543 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, in nvme_rdma_map_data()
1544 rq->bio, req->metadata_sgl->sg_table.sgl); in nvme_rdma_map_data()
1546 req->metadata_sgl->sg_table.sgl, in nvme_rdma_map_data()
1547 req->metadata_sgl->nents, in nvme_rdma_map_data()
1550 ret = -EIO; in nvme_rdma_map_data()
1555 if (req->use_sig_mr) { in nvme_rdma_map_data()
1560 if (count <= dev->num_inline_segments) { in nvme_rdma_map_data()
1562 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1569 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { in nvme_rdma_map_data()
1584 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, in nvme_rdma_map_data()
1585 req->metadata_sgl->nents, rq_dma_dir(rq)); in nvme_rdma_map_data()
1588 sg_free_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_map_data()
1591 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, in nvme_rdma_map_data()
1594 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); in nvme_rdma_map_data()
1598 static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_send_done() argument
1601 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done()
1605 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_send_done()
1606 nvme_rdma_wr_error(cq, wc, "SEND"); in nvme_rdma_send_done()
1618 sge->addr = qe->dma; in nvme_rdma_post_send()
1619 sge->length = sizeof(struct nvme_command); in nvme_rdma_post_send()
1620 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_send()
1623 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send()
1630 first->next = ≀ in nvme_rdma_post_send()
1634 ret = ib_post_send(queue->qp, first, NULL); in nvme_rdma_post_send()
1636 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1649 list.addr = qe->dma; in nvme_rdma_post_recv()
1651 list.lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_recv()
1653 qe->cqe.done = nvme_rdma_recv_done; in nvme_rdma_post_recv()
1656 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv()
1660 ret = ib_post_recv(queue->qp, &wr, NULL); in nvme_rdma_post_recv()
1662 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1673 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1674 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1677 static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_async_done() argument
1679 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_async_done()
1680 nvme_rdma_wr_error(cq, wc, "ASYNC"); in nvme_rdma_async_done()
1686 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event()
1687 struct ib_device *dev = queue->device->dev; in nvme_rdma_submit_async_event()
1688 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event()
1689 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event()
1693 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event()
1696 cmd->common.opcode = nvme_admin_async_event; in nvme_rdma_submit_async_event()
1697 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_rdma_submit_async_event()
1698 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_rdma_submit_async_event()
1701 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event()
1703 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), in nvme_rdma_submit_async_event()
1711 struct nvme_completion *cqe, struct ib_wc *wc) in nvme_rdma_process_nvme_rsp() argument
1716 rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id); in nvme_rdma_process_nvme_rsp()
1718 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1720 cqe->command_id, queue->qp->qp_num); in nvme_rdma_process_nvme_rsp()
1721 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1726 req->status = cqe->status; in nvme_rdma_process_nvme_rsp()
1727 req->result = cqe->result; in nvme_rdma_process_nvme_rsp()
1729 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { in nvme_rdma_process_nvme_rsp()
1730 if (unlikely(!req->mr || in nvme_rdma_process_nvme_rsp()
1731 wc->ex.invalidate_rkey != req->mr->rkey)) { in nvme_rdma_process_nvme_rsp()
1732 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1734 req->mr ? req->mr->rkey : 0); in nvme_rdma_process_nvme_rsp()
1735 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1737 } else if (req->mr) { in nvme_rdma_process_nvme_rsp()
1742 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1744 req->mr->rkey, ret); in nvme_rdma_process_nvme_rsp()
1745 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1754 static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) in nvme_rdma_recv_done() argument
1757 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
1758 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_recv_done()
1759 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_recv_done()
1760 struct nvme_completion *cqe = qe->data; in nvme_rdma_recv_done()
1763 if (unlikely(wc->status != IB_WC_SUCCESS)) { in nvme_rdma_recv_done()
1764 nvme_rdma_wr_error(cq, wc, "RECV"); in nvme_rdma_recv_done()
1769 if (unlikely(wc->byte_len < len)) { in nvme_rdma_recv_done()
1770 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1771 "Unexpected nvme completion length(%d)\n", wc->byte_len); in nvme_rdma_recv_done()
1772 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1776 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); in nvme_rdma_recv_done()
1784 cqe->command_id))) in nvme_rdma_recv_done()
1785 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1786 &cqe->result); in nvme_rdma_recv_done()
1788 nvme_rdma_process_nvme_rsp(queue, cqe, wc); in nvme_rdma_recv_done()
1789 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); in nvme_rdma_recv_done()
1798 for (i = 0; i < queue->queue_size; i++) { in nvme_rdma_conn_established()
1799 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); in nvme_rdma_conn_established()
1814 struct rdma_cm_id *cm_id = queue->cm_id; in nvme_rdma_conn_rejected()
1815 int status = ev->status; in nvme_rdma_conn_rejected()
1824 u16 sts = le16_to_cpu(rej_data->sts); in nvme_rdma_conn_rejected()
1826 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1830 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1834 return -ECONNRESET; in nvme_rdma_conn_rejected()
1839 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved()
1846 if (ctrl->opts->tos >= 0) in nvme_rdma_addr_resolved()
1847 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1848 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CONNECT_TIMEOUT_MS); in nvme_rdma_addr_resolved()
1850 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", in nvme_rdma_addr_resolved()
1851 queue->cm_error); in nvme_rdma_addr_resolved()
1864 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved()
1869 param.qp_num = queue->qp->qp_num; in nvme_rdma_route_resolved()
1872 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; in nvme_rdma_route_resolved()
1887 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvme_rdma_route_resolved()
1894 priv.hrqsize = cpu_to_le16(queue->queue_size); in nvme_rdma_route_resolved()
1895 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1898 ret = rdma_connect_locked(queue->cm_id, ¶m); in nvme_rdma_route_resolved()
1900 dev_err(ctrl->ctrl.device, in nvme_rdma_route_resolved()
1915 struct nvme_rdma_queue *queue = cm_id->context; in nvme_rdma_cm_handler()
1918 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1919 rdma_event_msg(ev->event), ev->event, in nvme_rdma_cm_handler()
1920 ev->status, cm_id); in nvme_rdma_cm_handler()
1922 switch (ev->event) { in nvme_rdma_cm_handler()
1930 queue->cm_error = nvme_rdma_conn_established(queue); in nvme_rdma_cm_handler()
1932 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1943 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1944 "CM error event %d\n", ev->event); in nvme_rdma_cm_handler()
1945 cm_error = -ECONNRESET; in nvme_rdma_cm_handler()
1950 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1951 "disconnect received - connection closed\n"); in nvme_rdma_cm_handler()
1952 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1958 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1959 "Unexpected RDMA CM event (%d)\n", ev->event); in nvme_rdma_cm_handler()
1960 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1965 queue->cm_error = cm_error; in nvme_rdma_cm_handler()
1966 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1975 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out()
1979 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_rdma_complete_timed_out()
1988 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout()
1989 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout()
1991 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", in nvme_rdma_timeout()
1992 rq->tag, nvme_rdma_queue_idx(queue)); in nvme_rdma_timeout()
1994 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { in nvme_rdma_timeout()
1999 * - ctrl disable/shutdown fabrics requests in nvme_rdma_timeout()
2000 * - connect requests in nvme_rdma_timeout()
2001 * - initialization admin requests in nvme_rdma_timeout()
2002 * - I/O requests that entered after unquiescing and in nvme_rdma_timeout()
2023 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
2024 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq()
2025 struct request *rq = bd->rq; in nvme_rdma_queue_rq()
2027 struct nvme_rdma_qe *sqe = &req->sqe; in nvme_rdma_queue_rq()
2028 struct nvme_command *c = sqe->data; in nvme_rdma_queue_rq()
2030 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_queue_rq()
2034 WARN_ON_ONCE(rq->tag < 0); in nvme_rdma_queue_rq()
2036 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
2037 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2039 dev = queue->device->dev; in nvme_rdma_queue_rq()
2041 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, in nvme_rdma_queue_rq()
2044 err = ib_dma_mapping_error(dev, req->sqe.dma); in nvme_rdma_queue_rq()
2048 ib_dma_sync_single_for_cpu(dev, sqe->dma, in nvme_rdma_queue_rq()
2058 queue->pi_support && in nvme_rdma_queue_rq()
2059 (c->common.opcode == nvme_cmd_write || in nvme_rdma_queue_rq()
2060 c->common.opcode == nvme_cmd_read) && in nvme_rdma_queue_rq()
2062 req->use_sig_mr = true; in nvme_rdma_queue_rq()
2064 req->use_sig_mr = false; in nvme_rdma_queue_rq()
2068 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2073 sqe->cqe.done = nvme_rdma_send_done; in nvme_rdma_queue_rq()
2075 ib_dma_sync_single_for_device(dev, sqe->dma, in nvme_rdma_queue_rq()
2078 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2079 req->mr ? &req->reg_wr.wr : NULL); in nvme_rdma_queue_rq()
2088 if (err == -ENOMEM || err == -EAGAIN) in nvme_rdma_queue_rq()
2094 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_queue_rq()
2101 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll()
2103 return ib_process_cq_direct(queue->ib_cq, -1); in nvme_rdma_poll()
2112 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); in nvme_rdma_check_pi_status()
2115 nvme_req(rq)->status = NVME_SC_INVALID_PI; in nvme_rdma_check_pi_status()
2122 nvme_req(rq)->status = NVME_SC_GUARD_CHECK; in nvme_rdma_check_pi_status()
2125 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; in nvme_rdma_check_pi_status()
2128 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; in nvme_rdma_check_pi_status()
2140 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_rq()
2141 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_complete_rq()
2143 if (req->use_sig_mr) in nvme_rdma_complete_rq()
2147 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_complete_rq()
2154 struct nvme_rdma_ctrl *ctrl = set->driver_data; in nvme_rdma_map_queues()
2155 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_map_queues()
2157 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_rdma_map_queues()
2159 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues()
2160 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2161 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_rdma_map_queues()
2162 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues()
2163 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_map_queues()
2164 set->map[HCTX_TYPE_READ].queue_offset = in nvme_rdma_map_queues()
2165 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2168 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_rdma_map_queues()
2169 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2170 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_rdma_map_queues()
2171 set->map[HCTX_TYPE_READ].nr_queues = in nvme_rdma_map_queues()
2172 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2173 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_rdma_map_queues()
2175 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], in nvme_rdma_map_queues()
2176 ctrl->device->dev, 0); in nvme_rdma_map_queues()
2177 blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], in nvme_rdma_map_queues()
2178 ctrl->device->dev, 0); in nvme_rdma_map_queues()
2180 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_rdma_map_queues()
2182 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_rdma_map_queues()
2183 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_rdma_map_queues()
2184 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_rdma_map_queues()
2185 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_map_queues()
2186 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_map_queues()
2187 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_rdma_map_queues()
2190 dev_info(ctrl->ctrl.device, in nvme_rdma_map_queues()
2192 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_rdma_map_queues()
2193 ctrl->io_queues[HCTX_TYPE_READ], in nvme_rdma_map_queues()
2194 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_rdma_map_queues()
2221 cancel_work_sync(&ctrl->err_work); in nvme_rdma_shutdown_ctrl()
2222 cancel_delayed_work_sync(&ctrl->reconnect_work); in nvme_rdma_shutdown_ctrl()
2225 blk_mq_quiesce_queue(ctrl->ctrl.admin_q); in nvme_rdma_shutdown_ctrl()
2227 nvme_shutdown_ctrl(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2229 nvme_disable_ctrl(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2243 nvme_stop_ctrl(&ctrl->ctrl); in nvme_rdma_reset_ctrl_work()
2246 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_reset_ctrl_work()
2258 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reset_ctrl_work()
2281 * existing controller with all the other parameters the same and no
2295 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_rdma_existing_controller()
2313 return ERR_PTR(-ENOMEM); in nvme_rdma_create_ctrl()
2314 ctrl->ctrl.opts = opts; in nvme_rdma_create_ctrl()
2315 INIT_LIST_HEAD(&ctrl->list); in nvme_rdma_create_ctrl()
2317 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_rdma_create_ctrl()
2318 opts->trsvcid = in nvme_rdma_create_ctrl()
2320 if (!opts->trsvcid) { in nvme_rdma_create_ctrl()
2321 ret = -ENOMEM; in nvme_rdma_create_ctrl()
2324 opts->mask |= NVMF_OPT_TRSVCID; in nvme_rdma_create_ctrl()
2328 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_rdma_create_ctrl()
2331 opts->traddr, opts->trsvcid); in nvme_rdma_create_ctrl()
2335 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_rdma_create_ctrl()
2337 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_rdma_create_ctrl()
2340 opts->host_traddr); in nvme_rdma_create_ctrl()
2345 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { in nvme_rdma_create_ctrl()
2346 ret = -EALREADY; in nvme_rdma_create_ctrl()
2350 INIT_DELAYED_WORK(&ctrl->reconnect_work, in nvme_rdma_create_ctrl()
2352 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); in nvme_rdma_create_ctrl()
2353 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); in nvme_rdma_create_ctrl()
2355 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_rdma_create_ctrl()
2356 opts->nr_poll_queues + 1; in nvme_rdma_create_ctrl()
2357 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_rdma_create_ctrl()
2358 ctrl->ctrl.kato = opts->kato; in nvme_rdma_create_ctrl()
2360 ret = -ENOMEM; in nvme_rdma_create_ctrl()
2361 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_rdma_create_ctrl()
2363 if (!ctrl->queues) in nvme_rdma_create_ctrl()
2366 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, in nvme_rdma_create_ctrl()
2367 0 /* no quirks, we're perfect! */); in nvme_rdma_create_ctrl()
2371 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); in nvme_rdma_create_ctrl()
2378 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", in nvme_rdma_create_ctrl()
2379 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_rdma_create_ctrl()
2382 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); in nvme_rdma_create_ctrl()
2385 return &ctrl->ctrl; in nvme_rdma_create_ctrl()
2388 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2389 nvme_put_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2391 ret = -EIO; in nvme_rdma_create_ctrl()
2394 kfree(ctrl->queues); in nvme_rdma_create_ctrl()
2419 if (ndev->dev == ib_device) { in nvme_rdma_remove_one()
2432 if (ctrl->device->dev != ib_device) in nvme_rdma_remove_one()
2434 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_remove_one()
2474 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_cleanup_module()