Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
12 #include <linux/nvme-keyring.h>
18 #include <linux/blk-mq.h>
30 * A non-zero value being sufficient to indicate general consideration of any
44 MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
60 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
61 * because dependencies are tracked for both nvme-tcp and user contexts. Using
62 * a separate class prevents lockdep from conflating nvme-tcp socket use with
63 * user-space socket API use.
70 struct sock *sk = sock->sk; in nvme_tcp_reclassify_socket()
75 switch (sk->sk_family) { in nvme_tcp_reclassify_socket()
77 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
79 "sk_lock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
83 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
85 "sk_lock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
164 struct nvme_tcp_ctrl *ctrl; member
194 struct nvme_ctrl ctrl; member
209 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
211 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
216 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
240 return queue->tls_enabled; in nvme_tcp_queue_tls()
246 static inline bool nvme_tcp_tls_configured(struct nvme_ctrl *ctrl) in nvme_tcp_tls_configured() argument
251 return ctrl->opts->tls || ctrl->opts->concat; in nvme_tcp_tls_configured()
259 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
260 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
265 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
270 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
275 return req->pdu; in nvme_tcp_req_cmd_pdu()
281 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - in nvme_tcp_req_data_pdu()
287 if (nvme_is_fabrics(req->req.cmd)) in nvme_tcp_inline_data_size()
289 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
294 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
306 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
307 req->data_len <= nvme_tcp_inline_data_size(req); in nvme_tcp_has_inline_data()
312 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
317 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
322 return min_t(size_t, iov_iter_single_seg_count(&req->iter), in nvme_tcp_req_cur_length()
323 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
329 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
333 int len) in nvme_tcp_pdu_last_send() argument
335 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
347 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
348 vec = &rq->special_vec; in nvme_tcp_init_iter()
353 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
357 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
362 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
363 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
366 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); in nvme_tcp_init_iter()
367 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
371 int len) in nvme_tcp_advance_req() argument
373 req->data_sent += len; in nvme_tcp_advance_req()
374 req->pdu_sent += len; in nvme_tcp_advance_req()
375 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
376 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
377 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
378 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
395 return !list_empty(&queue->send_list) || in nvme_tcp_queue_has_pending()
396 !llist_empty(&queue->req_list); in nvme_tcp_queue_has_pending()
408 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
411 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
412 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
419 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
420 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
422 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
426 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
434 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
436 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
445 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
449 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
455 list_del(&req->entry); in nvme_tcp_fetch_request()
467 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
472 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
473 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
478 void *pdu, size_t len) in nvme_tcp_hdgst() argument
482 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
483 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
494 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
495 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
498 return -EPROTO; in nvme_tcp_verify_hdgst()
501 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
502 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
503 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
505 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
508 return -EIO; in nvme_tcp_verify_hdgst()
518 u32 len; in nvme_tcp_check_ddgst() local
520 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
521 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
523 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
524 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
527 return -EPROTO; in nvme_tcp_check_ddgst()
529 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
539 page_frag_free(req->pdu); in nvme_tcp_exit_request()
546 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); in nvme_tcp_init_request() local
549 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
550 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
553 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
556 if (!req->pdu) in nvme_tcp_init_request()
557 return -ENOMEM; in nvme_tcp_init_request()
559 pdu = req->pdu; in nvme_tcp_init_request()
560 req->queue = queue; in nvme_tcp_init_request()
561 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
562 nvme_req(rq)->cmd = &pdu->cmd; in nvme_tcp_init_request()
570 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); in nvme_tcp_init_hctx() local
571 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
573 hctx->driver_data = queue; in nvme_tcp_init_hctx()
580 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); in nvme_tcp_init_admin_hctx() local
581 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
583 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
590 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
591 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
597 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
599 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
600 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
601 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
604 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
606 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
609 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
610 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
619 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
621 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
623 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
624 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
625 return -EINVAL; in nvme_tcp_process_nvme_cqe()
629 if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) in nvme_tcp_process_nvme_cqe()
630 req->status = cqe->status; in nvme_tcp_process_nvme_cqe()
632 if (!nvme_try_complete_req(rq, req->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
634 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
644 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
646 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
648 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
649 return -ENOENT; in nvme_tcp_handle_c2h_data()
653 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
655 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
656 return -EIO; in nvme_tcp_handle_c2h_data()
659 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
661 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
662 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
663 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
665 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
666 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
667 return -EPROTO; in nvme_tcp_handle_c2h_data()
676 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
686 cqe->command_id))) in nvme_tcp_handle_comp()
687 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
688 &cqe->result); in nvme_tcp_handle_comp()
698 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
700 u32 h2cdata_sent = req->pdu_len; in nvme_tcp_setup_h2c_data_pdu()
704 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_setup_h2c_data_pdu()
705 req->offset = 0; in nvme_tcp_setup_h2c_data_pdu()
706 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); in nvme_tcp_setup_h2c_data_pdu()
707 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
708 req->h2cdata_left -= req->pdu_len; in nvme_tcp_setup_h2c_data_pdu()
709 req->h2cdata_offset += h2cdata_sent; in nvme_tcp_setup_h2c_data_pdu()
712 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
713 if (!req->h2cdata_left) in nvme_tcp_setup_h2c_data_pdu()
714 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
715 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
716 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
717 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
718 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
719 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
720 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
721 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
722 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
723 data->ttag = req->ttag; in nvme_tcp_setup_h2c_data_pdu()
724 data->command_id = nvme_cid(rq); in nvme_tcp_setup_h2c_data_pdu()
725 data->data_offset = cpu_to_le32(req->h2cdata_offset); in nvme_tcp_setup_h2c_data_pdu()
726 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
734 u32 r2t_length = le32_to_cpu(pdu->r2t_length); in nvme_tcp_handle_r2t()
735 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); in nvme_tcp_handle_r2t()
737 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
739 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
741 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
742 return -ENOENT; in nvme_tcp_handle_r2t()
747 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
748 "req %d r2t len is %u, probably a bug...\n", in nvme_tcp_handle_r2t()
749 rq->tag, r2t_length); in nvme_tcp_handle_r2t()
750 return -EPROTO; in nvme_tcp_handle_r2t()
753 if (unlikely(req->data_sent + r2t_length > req->data_len)) { in nvme_tcp_handle_r2t()
754 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
755 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_handle_r2t()
756 rq->tag, r2t_length, req->data_len, req->data_sent); in nvme_tcp_handle_r2t()
757 return -EPROTO; in nvme_tcp_handle_r2t()
760 if (unlikely(r2t_offset < req->data_sent)) { in nvme_tcp_handle_r2t()
761 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
763 rq->tag, r2t_offset, req->data_sent); in nvme_tcp_handle_r2t()
764 return -EPROTO; in nvme_tcp_handle_r2t()
767 req->pdu_len = 0; in nvme_tcp_handle_r2t()
768 req->h2cdata_left = r2t_length; in nvme_tcp_handle_r2t()
769 req->h2cdata_offset = r2t_offset; in nvme_tcp_handle_r2t()
770 req->ttag = pdu->ttag; in nvme_tcp_handle_r2t()
783 u32 plen = le32_to_cpu(pdu->hdr.plen); in nvme_tcp_handle_c2h_term()
796 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
802 fes = le16_to_cpu(pdu->fes); in nvme_tcp_handle_c2h_term()
808 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_term()
813 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
816 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
817 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
821 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
825 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
826 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
828 *len -= rcv_len; in nvme_tcp_recv_pdu()
829 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
832 hdr = queue->pdu; in nvme_tcp_recv_pdu()
833 if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) { in nvme_tcp_recv_pdu()
834 if (!nvme_tcp_recv_pdu_supported(hdr->type)) in nvme_tcp_recv_pdu()
837 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
839 hdr->type, hdr->hlen); in nvme_tcp_recv_pdu()
840 return -EPROTO; in nvme_tcp_recv_pdu()
843 if (unlikely(hdr->type == nvme_tcp_c2h_term)) { in nvme_tcp_recv_pdu()
848 nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
849 return -EINVAL; in nvme_tcp_recv_pdu()
852 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
853 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
859 if (queue->data_digest) { in nvme_tcp_recv_pdu()
860 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
865 switch (hdr->type) { in nvme_tcp_recv_pdu()
867 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
870 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
873 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
879 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
880 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
881 return -EINVAL; in nvme_tcp_recv_pdu()
893 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
895 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
897 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
903 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
907 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
908 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
914 if (!req->curr_bio) { in nvme_tcp_recv_data()
915 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
917 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
919 return -EIO; in nvme_tcp_recv_data()
926 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
928 if (queue->data_digest) in nvme_tcp_recv_data()
930 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
933 &req->iter, recv_len); in nvme_tcp_recv_data()
935 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
937 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
941 *len -= recv_len; in nvme_tcp_recv_data()
943 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
946 if (!queue->data_remaining) { in nvme_tcp_recv_data()
947 if (queue->data_digest) { in nvme_tcp_recv_data()
948 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
949 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
951 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
953 le16_to_cpu(req->status)); in nvme_tcp_recv_data()
954 queue->nr_cqe++; in nvme_tcp_recv_data()
964 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
966 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
967 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
968 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
969 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
976 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
978 *len -= recv_len; in nvme_tcp_recv_ddgst()
979 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
982 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
984 pdu->command_id); in nvme_tcp_recv_ddgst()
987 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); in nvme_tcp_recv_ddgst()
989 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
991 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
992 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
995 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
997 pdu->command_id); in nvme_tcp_recv_ddgst()
1000 nvme_tcp_end_request(rq, le16_to_cpu(req->status)); in nvme_tcp_recv_ddgst()
1001 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
1009 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
1011 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
1012 size_t consumed = len; in nvme_tcp_recv_skb()
1015 if (unlikely(!queue->rd_enabled)) in nvme_tcp_recv_skb()
1016 return -EFAULT; in nvme_tcp_recv_skb()
1018 while (len) { in nvme_tcp_recv_skb()
1021 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
1024 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
1027 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
1030 result = -EFAULT; in nvme_tcp_recv_skb()
1033 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
1035 queue->rd_enabled = false; in nvme_tcp_recv_skb()
1036 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
1050 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
1051 queue = sk->sk_user_data; in nvme_tcp_data_ready()
1052 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
1053 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
1054 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
1055 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
1062 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
1063 queue = sk->sk_user_data; in nvme_tcp_write_space()
1065 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
1066 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
1068 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
1075 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
1076 queue = sk->sk_user_data; in nvme_tcp_state_change()
1080 switch (sk->sk_state) { in nvme_tcp_state_change()
1086 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
1089 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
1091 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
1094 queue->state_change(sk); in nvme_tcp_state_change()
1096 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
1101 queue->request = NULL; in nvme_tcp_done_send_req()
1109 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
1119 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
1120 int req_data_len = req->data_len; in nvme_tcp_try_send_data()
1121 u32 h2cdata_left = req->h2cdata_left; in nvme_tcp_try_send_data()
1130 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
1131 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
1132 int req_data_sent = req->data_sent; in nvme_tcp_try_send_data()
1135 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
1140 if (!sendpages_ok(page, len, offset)) in nvme_tcp_try_send_data()
1143 bvec_set_page(&bvec, page, len, offset); in nvme_tcp_try_send_data()
1144 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data()
1145 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data()
1149 if (queue->data_digest) in nvme_tcp_try_send_data()
1150 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
1162 if (last && ret == len) { in nvme_tcp_try_send_data()
1163 if (queue->data_digest) { in nvme_tcp_try_send_data()
1164 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
1165 &req->ddgst); in nvme_tcp_try_send_data()
1166 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
1167 req->offset = 0; in nvme_tcp_try_send_data()
1177 return -EAGAIN; in nvme_tcp_try_send_data()
1182 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
1188 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
1196 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1197 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1199 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_cmd_pdu()
1200 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_cmd_pdu()
1201 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_cmd_pdu()
1205 len -= ret; in nvme_tcp_try_send_cmd_pdu()
1206 if (!len) { in nvme_tcp_try_send_cmd_pdu()
1208 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
1209 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1210 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1216 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
1218 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
1223 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
1228 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
1231 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1232 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1234 if (!req->h2cdata_left) in nvme_tcp_try_send_data_pdu()
1237 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_data_pdu()
1238 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data_pdu()
1239 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data_pdu()
1243 len -= ret; in nvme_tcp_try_send_data_pdu()
1244 if (!len) { in nvme_tcp_try_send_data_pdu()
1245 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1246 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1247 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1250 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1252 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1257 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1258 size_t offset = req->offset; in nvme_tcp_try_send_ddgst()
1259 u32 h2cdata_left = req->h2cdata_left; in nvme_tcp_try_send_ddgst()
1263 .iov_base = (u8 *)&req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1264 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1272 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1284 req->offset += ret; in nvme_tcp_try_send_ddgst()
1285 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1294 if (!queue->request) { in nvme_tcp_try_send()
1295 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1296 if (!queue->request) in nvme_tcp_try_send()
1299 req = queue->request; in nvme_tcp_try_send()
1302 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1310 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1316 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1322 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1325 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1328 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1330 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1340 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1341 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1348 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1349 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1364 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1366 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1379 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1384 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1389 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1391 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1392 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1404 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1405 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1407 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1409 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1410 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1412 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1416 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1419 return -ENOMEM; in nvme_tcp_alloc_crypto()
1422 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1424 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1426 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1429 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1431 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1432 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1435 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1438 if (!async->pdu) in nvme_tcp_alloc_async_req()
1439 return -ENOMEM; in nvme_tcp_alloc_async_req()
1441 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1447 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1448 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1451 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1454 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1457 page_frag_cache_drain(&queue->pf_cache); in nvme_tcp_free_queue()
1460 /* ->sock will be released by fput() */ in nvme_tcp_free_queue()
1461 fput(queue->sock->file); in nvme_tcp_free_queue()
1462 queue->sock = NULL; in nvme_tcp_free_queue()
1465 kfree(queue->pdu); in nvme_tcp_free_queue()
1466 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1467 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1484 return -ENOMEM; in nvme_tcp_init_connection()
1488 ret = -ENOMEM; in nvme_tcp_init_connection()
1492 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1493 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1494 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1495 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1496 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1497 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1498 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1499 if (queue->hdr_digest) in nvme_tcp_init_connection()
1500 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1501 if (queue->data_digest) in nvme_tcp_init_connection()
1502 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1506 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1521 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1524 ret = -ECONNRESET; in nvme_tcp_init_connection()
1530 ret = -ENOTCONN; in nvme_tcp_init_connection()
1532 ctype = tls_get_record_type(queue->sock->sk, in nvme_tcp_init_connection()
1540 ret = -EINVAL; in nvme_tcp_init_connection()
1541 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1543 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1547 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1549 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1553 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1555 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1559 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1560 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1561 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1562 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1564 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1569 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1570 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1571 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1572 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1574 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1579 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1581 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1585 maxh2cdata = le32_to_cpu(icresp->maxdata); in nvme_tcp_init_connection()
1591 queue->maxh2cdata = maxh2cdata; in nvme_tcp_init_connection()
1608 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1612 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1617 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1622 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1623 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1628 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1634 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1635 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1636 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1640 * Track the number of queues assigned to each cpu using a global per-cpu
1650 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1651 struct blk_mq_tag_set *set = &ctrl->tag_set; in nvme_tcp_set_queue_io_cpu()
1652 int qid = nvme_tcp_queue_id(queue) - 1; in nvme_tcp_set_queue_io_cpu()
1660 mq_map = set->map[HCTX_TYPE_DEFAULT].mq_map; in nvme_tcp_set_queue_io_cpu()
1662 mq_map = set->map[HCTX_TYPE_READ].mq_map; in nvme_tcp_set_queue_io_cpu()
1664 mq_map = set->map[HCTX_TYPE_POLL].mq_map; in nvme_tcp_set_queue_io_cpu()
1682 queue->io_cpu = io_cpu; in nvme_tcp_set_queue_io_cpu()
1684 set_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags); in nvme_tcp_set_queue_io_cpu()
1687 dev_dbg(ctrl->ctrl.device, "queue %d: using cpu %d\n", in nvme_tcp_set_queue_io_cpu()
1688 qid, queue->io_cpu); in nvme_tcp_set_queue_io_cpu()
1694 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_tls_done() local
1698 dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n", in nvme_tcp_tls_done()
1702 queue->tls_err = -status; in nvme_tcp_tls_done()
1708 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n", in nvme_tcp_tls_done()
1710 queue->tls_err = -ENOKEY; in nvme_tcp_tls_done()
1712 queue->tls_enabled = true; in nvme_tcp_tls_done()
1714 ctrl->ctrl.tls_pskid = key_serial(tls_key); in nvme_tcp_tls_done()
1716 queue->tls_err = 0; in nvme_tcp_tls_done()
1720 complete(&queue->tls_complete); in nvme_tcp_tls_done()
1733 dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n", in nvme_tcp_start_tls()
1736 args.ta_sock = queue->sock; in nvme_tcp_start_tls()
1741 if (nctrl->opts->keyring) in nvme_tcp_start_tls()
1742 keyring = key_serial(nctrl->opts->keyring); in nvme_tcp_start_tls()
1745 queue->tls_err = -EOPNOTSUPP; in nvme_tcp_start_tls()
1746 init_completion(&queue->tls_complete); in nvme_tcp_start_tls()
1749 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n", in nvme_tcp_start_tls()
1753 ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo); in nvme_tcp_start_tls()
1756 ret = -ETIMEDOUT; in nvme_tcp_start_tls()
1758 dev_err(nctrl->device, in nvme_tcp_start_tls()
1761 tls_handshake_cancel(queue->sock->sk); in nvme_tcp_start_tls()
1763 dev_dbg(nctrl->device, in nvme_tcp_start_tls()
1765 qid, queue->tls_err); in nvme_tcp_start_tls()
1766 ret = queue->tls_err; in nvme_tcp_start_tls()
1774 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1775 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1779 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1780 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1781 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1782 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1783 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1784 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1787 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1789 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1792 ret = sock_create_kern(current->nsproxy->net_ns, in nvme_tcp_alloc_queue()
1793 ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1794 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1796 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1801 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); in nvme_tcp_alloc_queue()
1807 sk_net_refcnt_upgrade(queue->sock->sk); in nvme_tcp_alloc_queue()
1808 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1811 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1814 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1821 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1824 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1827 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1828 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1831 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1833 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1834 queue->sock->sk->sk_use_task_frag = false; in nvme_tcp_alloc_queue()
1835 queue->io_cpu = WORK_CPU_UNBOUND; in nvme_tcp_alloc_queue()
1836 queue->request = NULL; in nvme_tcp_alloc_queue()
1837 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1838 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1839 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1840 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1841 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1843 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1844 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1845 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1847 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1854 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_alloc_queue()
1855 char *iface = nctrl->opts->host_iface; in nvme_tcp_alloc_queue()
1858 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1861 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1868 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1869 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1870 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1873 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1881 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1882 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1883 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1887 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1890 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1891 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1893 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1909 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1914 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1916 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1918 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1921 /* ->sock will be released by fput() */ in nvme_tcp_alloc_queue()
1922 fput(queue->sock->file); in nvme_tcp_alloc_queue()
1923 queue->sock = NULL; in nvme_tcp_alloc_queue()
1925 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1926 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1932 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1934 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1935 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_ops()
1936 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1937 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1938 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1939 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1944 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1946 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1951 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue_nowait() local
1952 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue_nowait()
1954 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1957 if (test_and_clear_bit(NVME_TCP_Q_IO_CPU_SET, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1958 atomic_dec(&nvme_tcp_cpu_queues[queue->io_cpu]); in nvme_tcp_stop_queue_nowait()
1960 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1961 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue_nowait()
1964 queue->tls_enabled = false; in nvme_tcp_stop_queue_nowait()
1965 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue_nowait()
1970 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_wait_queue() local
1971 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_wait_queue()
1975 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags) || in nvme_tcp_wait_queue()
1976 !sk_wmem_alloc_get(queue->sock->sk)) in nvme_tcp_wait_queue()
1979 timeout -= 2; in nvme_tcp_wait_queue()
1981 dev_warn(nctrl->device, in nvme_tcp_wait_queue()
1995 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1996 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1997 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1998 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1999 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
2000 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
2001 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
2002 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
2004 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
2006 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
2011 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
2012 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue()
2015 queue->rd_enabled = true; in nvme_tcp_start_queue()
2026 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
2028 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
2030 dev_err(nctrl->device, in nvme_tcp_start_queue()
2036 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
2038 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
2039 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
2040 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
2041 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
2044 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
2047 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
2051 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
2052 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
2055 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
2059 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
2060 nvme_tcp_stop_queue_nowait(ctrl, i); in nvme_tcp_stop_io_queues()
2061 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
2062 nvme_tcp_wait_queue(ctrl, i); in nvme_tcp_stop_io_queues()
2065 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_start_io_queues() argument
2071 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
2079 for (i--; i >= first; i--) in nvme_tcp_start_io_queues()
2080 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
2084 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
2089 if (nvme_tcp_tls_configured(ctrl)) { in nvme_tcp_alloc_admin_queue()
2090 if (ctrl->opts->tls_key) in nvme_tcp_alloc_admin_queue()
2091 pskid = key_serial(ctrl->opts->tls_key); in nvme_tcp_alloc_admin_queue()
2092 else if (ctrl->opts->tls) { in nvme_tcp_alloc_admin_queue()
2093 pskid = nvme_tls_psk_default(ctrl->opts->keyring, in nvme_tcp_alloc_admin_queue()
2094 ctrl->opts->host->nqn, in nvme_tcp_alloc_admin_queue()
2095 ctrl->opts->subsysnqn); in nvme_tcp_alloc_admin_queue()
2097 dev_err(ctrl->device, "no valid PSK found\n"); in nvme_tcp_alloc_admin_queue()
2098 return -ENOKEY; in nvme_tcp_alloc_admin_queue()
2103 ret = nvme_tcp_alloc_queue(ctrl, 0, pskid); in nvme_tcp_alloc_admin_queue()
2107 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
2114 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
2118 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
2122 if (nvme_tcp_tls_configured(ctrl)) { in __nvme_tcp_alloc_io_queues()
2123 if (ctrl->opts->concat) { in __nvme_tcp_alloc_io_queues()
2128 if (!ctrl->opts->tls_key) { in __nvme_tcp_alloc_io_queues()
2129 dev_err(ctrl->device, "no PSK generated\n"); in __nvme_tcp_alloc_io_queues()
2130 return -ENOKEY; in __nvme_tcp_alloc_io_queues()
2132 if (ctrl->tls_pskid && in __nvme_tcp_alloc_io_queues()
2133 ctrl->tls_pskid != key_serial(ctrl->opts->tls_key)) { in __nvme_tcp_alloc_io_queues()
2134 dev_err(ctrl->device, "Stale PSK id %08x\n", ctrl->tls_pskid); in __nvme_tcp_alloc_io_queues()
2135 ctrl->tls_pskid = 0; in __nvme_tcp_alloc_io_queues()
2137 } else if (!ctrl->tls_pskid) { in __nvme_tcp_alloc_io_queues()
2138 dev_err(ctrl->device, "no PSK negotiated\n"); in __nvme_tcp_alloc_io_queues()
2139 return -ENOKEY; in __nvme_tcp_alloc_io_queues()
2143 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
2144 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
2145 ctrl->tls_pskid); in __nvme_tcp_alloc_io_queues()
2153 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
2154 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
2159 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
2164 nr_io_queues = nvmf_nr_io_queues(ctrl->opts); in nvme_tcp_alloc_io_queues()
2165 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
2170 dev_err(ctrl->device, in nvme_tcp_alloc_io_queues()
2172 return -ENOMEM; in nvme_tcp_alloc_io_queues()
2175 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
2176 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
2179 nvmf_set_io_queues(ctrl->opts, nr_io_queues, in nvme_tcp_alloc_io_queues()
2180 to_tcp_ctrl(ctrl)->io_queues); in nvme_tcp_alloc_io_queues()
2181 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
2184 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
2188 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2193 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set, in nvme_tcp_configure_io_queues()
2195 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, in nvme_tcp_configure_io_queues()
2206 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); in nvme_tcp_configure_io_queues()
2207 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); in nvme_tcp_configure_io_queues()
2212 nvme_start_freeze(ctrl); in nvme_tcp_configure_io_queues()
2213 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2214 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
2220 ret = -ENODEV; in nvme_tcp_configure_io_queues()
2221 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
2224 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
2225 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
2226 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
2233 ret = nvme_tcp_start_io_queues(ctrl, nr_queues, in nvme_tcp_configure_io_queues()
2234 ctrl->tagset->nr_hw_queues + 1); in nvme_tcp_configure_io_queues()
2241 nvme_quiesce_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2242 nvme_sync_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2243 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2245 nvme_cancel_tagset(ctrl); in nvme_tcp_configure_io_queues()
2247 nvme_remove_io_tag_set(ctrl); in nvme_tcp_configure_io_queues()
2249 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2253 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
2257 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2262 error = nvme_alloc_admin_tag_set(ctrl, in nvme_tcp_configure_admin_queue()
2263 &to_tcp_ctrl(ctrl)->admin_tag_set, in nvme_tcp_configure_admin_queue()
2270 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
2274 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
2278 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2280 error = nvme_init_ctrl_finish(ctrl, false); in nvme_tcp_configure_admin_queue()
2287 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2288 blk_sync_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
2290 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
2291 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_configure_admin_queue()
2294 nvme_remove_admin_tag_set(ctrl); in nvme_tcp_configure_admin_queue()
2296 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2300 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
2303 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2304 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
2305 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
2306 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_teardown_admin_queue()
2308 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2309 nvme_remove_admin_tag_set(ctrl); in nvme_tcp_teardown_admin_queue()
2311 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2312 if (ctrl->tls_pskid) { in nvme_tcp_teardown_admin_queue()
2313 dev_dbg(ctrl->device, "Wipe negotiated TLS_PSK %08x\n", in nvme_tcp_teardown_admin_queue()
2314 ctrl->tls_pskid); in nvme_tcp_teardown_admin_queue()
2315 ctrl->tls_pskid = 0; in nvme_tcp_teardown_admin_queue()
2319 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
2322 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
2324 nvme_quiesce_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2325 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2326 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2327 nvme_cancel_tagset(ctrl); in nvme_tcp_teardown_io_queues()
2329 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2330 nvme_remove_io_tag_set(ctrl); in nvme_tcp_teardown_io_queues()
2332 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2335 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl, in nvme_tcp_reconnect_or_remove() argument
2338 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_reconnect_or_remove()
2346 if (nvmf_should_reconnect(ctrl, status)) { in nvme_tcp_reconnect_or_remove()
2347 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
2348 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
2349 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
2350 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
2352 dev_info(ctrl->device, "Removing controller (%d)...\n", in nvme_tcp_reconnect_or_remove()
2354 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
2361 * - concatenation is enabled (otherwise it's a static key set by the user)
2363 * - the generated key is present in ctrl->tls_key (otherwise there's nothing
2366 * - a valid PSK key ID has been set in ctrl->tls_pskid (otherwise TLS
2371 * DH-HMAC-CHAP negotiation (which generates the key, so it _must not_ be set),
2374 static bool nvme_tcp_key_revoke_needed(struct nvme_ctrl *ctrl) in nvme_tcp_key_revoke_needed() argument
2376 return ctrl->opts->concat && ctrl->opts->tls_key && ctrl->tls_pskid; in nvme_tcp_key_revoke_needed()
2379 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
2381 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
2384 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2388 if (ctrl->opts && ctrl->opts->concat && !ctrl->tls_pskid) { in nvme_tcp_setup_ctrl()
2390 dev_dbg(ctrl->device, "restart admin queue for secure concatenation\n"); in nvme_tcp_setup_ctrl()
2391 nvme_stop_keep_alive(ctrl); in nvme_tcp_setup_ctrl()
2392 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_setup_ctrl()
2393 ret = nvme_tcp_configure_admin_queue(ctrl, false); in nvme_tcp_setup_ctrl()
2398 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
2399 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2400 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
2404 if (!nvme_ctrl_sgl_supported(ctrl)) { in nvme_tcp_setup_ctrl()
2405 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2406 dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); in nvme_tcp_setup_ctrl()
2410 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
2411 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2412 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
2413 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
2415 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
2416 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2417 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
2418 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
2419 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2422 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2423 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2428 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
2430 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
2434 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_setup_ctrl()
2439 ret = -EINVAL; in nvme_tcp_setup_ctrl()
2443 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
2447 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2448 nvme_quiesce_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2449 nvme_sync_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2450 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2451 nvme_cancel_tagset(ctrl); in nvme_tcp_setup_ctrl()
2453 nvme_remove_io_tag_set(ctrl); in nvme_tcp_setup_ctrl()
2454 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2457 nvme_stop_keep_alive(ctrl); in nvme_tcp_setup_ctrl()
2458 nvme_tcp_teardown_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2466 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2469 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2471 ret = nvme_tcp_setup_ctrl(ctrl, false); in nvme_tcp_reconnect_ctrl_work()
2475 dev_info(ctrl->device, "Successfully reconnected (attempt %d/%d)\n", in nvme_tcp_reconnect_ctrl_work()
2476 ctrl->nr_reconnects, ctrl->opts->max_reconnects); in nvme_tcp_reconnect_ctrl_work()
2478 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2483 dev_info(ctrl->device, "Failed reconnect attempt %d/%d\n", in nvme_tcp_reconnect_ctrl_work()
2484 ctrl->nr_reconnects, ctrl->opts->max_reconnects); in nvme_tcp_reconnect_ctrl_work()
2485 nvme_tcp_reconnect_or_remove(ctrl, ret); in nvme_tcp_reconnect_ctrl_work()
2492 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2494 if (nvme_tcp_key_revoke_needed(ctrl)) in nvme_tcp_error_recovery_work()
2495 nvme_auth_revoke_tls_key(ctrl); in nvme_tcp_error_recovery_work()
2496 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2497 flush_work(&ctrl->async_event_work); in nvme_tcp_error_recovery_work()
2498 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2500 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_error_recovery_work()
2501 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2502 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_error_recovery_work()
2503 nvme_auth_stop(ctrl); in nvme_tcp_error_recovery_work()
2505 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2506 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2507 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_error_recovery_work()
2514 nvme_tcp_reconnect_or_remove(ctrl, 0); in nvme_tcp_error_recovery_work()
2517 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2519 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2520 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_ctrl()
2521 nvme_disable_ctrl(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2522 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2525 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2527 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2532 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2536 if (nvme_tcp_key_revoke_needed(ctrl)) in nvme_reset_ctrl_work()
2537 nvme_auth_revoke_tls_key(ctrl); in nvme_reset_ctrl_work()
2538 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2539 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2541 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2542 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2543 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_reset_ctrl_work()
2550 ret = nvme_tcp_setup_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2557 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2558 nvme_tcp_reconnect_or_remove(ctrl, ret); in nvme_reset_ctrl_work()
2561 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_stop_ctrl() argument
2563 flush_work(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_stop_ctrl()
2564 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_stop_ctrl()
2569 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2571 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2575 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2578 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2580 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2581 kfree(ctrl); in nvme_tcp_free_ctrl()
2586 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2588 sg->addr = 0; in nvme_tcp_set_sg_null()
2589 sg->length = 0; in nvme_tcp_set_sg_null()
2590 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2597 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2599 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2600 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2601 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2607 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2609 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2610 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2611 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2617 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2618 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2619 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2620 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2624 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2625 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2626 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2627 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2628 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2630 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2631 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2632 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2635 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2636 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2637 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2638 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2640 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2646 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2648 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2655 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2657 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_timeout()
2658 int qid = nvme_tcp_queue_id(req->queue); in nvme_tcp_timeout()
2660 dev_warn(ctrl->device, in nvme_tcp_timeout()
2662 rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode, in nvme_tcp_timeout()
2665 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2670 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2671 * - connect requests in nvme_tcp_timeout()
2672 * - initialization admin requests in nvme_tcp_timeout()
2673 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2687 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2696 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2698 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2703 req->data_len <= nvme_tcp_inline_data_size(req)) in nvme_tcp_map_data()
2704 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2706 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2716 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2724 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2725 req->status = cpu_to_le16(NVME_SC_SUCCESS); in nvme_tcp_setup_cmd_pdu()
2726 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2727 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2728 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2729 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2730 req->h2cdata_left = 0; in nvme_tcp_setup_cmd_pdu()
2731 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2733 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2734 if (req->curr_bio && req->data_len) in nvme_tcp_setup_cmd_pdu()
2738 req->data_len <= nvme_tcp_inline_data_size(req)) in nvme_tcp_setup_cmd_pdu()
2739 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2741 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2742 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2743 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2744 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2745 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2746 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2749 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2750 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2751 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2752 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2757 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2767 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2769 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2770 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2776 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2777 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2778 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2780 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2783 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2784 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2792 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2799 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); in nvme_tcp_map_queues() local
2801 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); in nvme_tcp_map_queues()
2806 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2807 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2810 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2813 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2814 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2817 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2818 return ret < 0 ? ret : queue->nr_cqe; in nvme_tcp_poll()
2821 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) in nvme_tcp_get_address() argument
2823 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; in nvme_tcp_get_address()
2825 int ret, len; in nvme_tcp_get_address() local
2827 len = nvmf_get_address(ctrl, buf, size); in nvme_tcp_get_address()
2829 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_get_address()
2830 return len; in nvme_tcp_get_address()
2832 mutex_lock(&queue->queue_lock); in nvme_tcp_get_address()
2834 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); in nvme_tcp_get_address()
2836 if (len > 0) in nvme_tcp_get_address()
2837 len--; /* strip trailing newline */ in nvme_tcp_get_address()
2838 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", in nvme_tcp_get_address()
2839 (len) ? "," : "", &src_addr); in nvme_tcp_get_address()
2842 mutex_unlock(&queue->queue_lock); in nvme_tcp_get_address()
2844 return len; in nvme_tcp_get_address()
2886 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2890 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2891 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2903 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_alloc_ctrl() local
2906 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_alloc_ctrl()
2907 if (!ctrl) in nvme_tcp_alloc_ctrl()
2908 return ERR_PTR(-ENOMEM); in nvme_tcp_alloc_ctrl()
2910 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_alloc_ctrl()
2911 ctrl->ctrl.opts = opts; in nvme_tcp_alloc_ctrl()
2912 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_alloc_ctrl()
2913 opts->nr_poll_queues + 1; in nvme_tcp_alloc_ctrl()
2914 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_alloc_ctrl()
2915 ctrl->ctrl.kato = opts->kato; in nvme_tcp_alloc_ctrl()
2917 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_alloc_ctrl()
2919 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_alloc_ctrl()
2920 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_alloc_ctrl()
2922 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_alloc_ctrl()
2923 opts->trsvcid = in nvme_tcp_alloc_ctrl()
2925 if (!opts->trsvcid) { in nvme_tcp_alloc_ctrl()
2926 ret = -ENOMEM; in nvme_tcp_alloc_ctrl()
2929 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_alloc_ctrl()
2933 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_alloc_ctrl()
2936 opts->traddr, opts->trsvcid); in nvme_tcp_alloc_ctrl()
2940 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_ctrl()
2942 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_alloc_ctrl()
2945 opts->host_traddr); in nvme_tcp_alloc_ctrl()
2950 if (opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_alloc_ctrl()
2951 if (!__dev_get_by_name(&init_net, opts->host_iface)) { in nvme_tcp_alloc_ctrl()
2953 opts->host_iface); in nvme_tcp_alloc_ctrl()
2954 ret = -ENODEV; in nvme_tcp_alloc_ctrl()
2959 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_alloc_ctrl()
2960 ret = -EALREADY; in nvme_tcp_alloc_ctrl()
2964 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_alloc_ctrl()
2966 if (!ctrl->queues) { in nvme_tcp_alloc_ctrl()
2967 ret = -ENOMEM; in nvme_tcp_alloc_ctrl()
2971 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_alloc_ctrl()
2975 return ctrl; in nvme_tcp_alloc_ctrl()
2977 kfree(ctrl->queues); in nvme_tcp_alloc_ctrl()
2979 kfree(ctrl); in nvme_tcp_alloc_ctrl()
2986 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2989 ctrl = nvme_tcp_alloc_ctrl(dev, opts); in nvme_tcp_create_ctrl()
2990 if (IS_ERR(ctrl)) in nvme_tcp_create_ctrl()
2991 return ERR_CAST(ctrl); in nvme_tcp_create_ctrl()
2993 ret = nvme_add_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2997 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2999 ret = -EINTR; in nvme_tcp_create_ctrl()
3003 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
3007 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n", in nvme_tcp_create_ctrl()
3008 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn); in nvme_tcp_create_ctrl()
3011 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
3014 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
3017 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
3019 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
3021 ret = -EIO; in nvme_tcp_create_ctrl()
3057 return -ENOMEM; in nvme_tcp_init_module()
3068 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
3073 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
3074 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()