Lines Matching +full:ctrl +full:- +full:len

1 // SPDX-License-Identifier: GPL-2.0
12 #include <linux/nvme-tcp.h>
13 #include <linux/nvme-keyring.h>
19 #include <linux/blk-mq.h>
31 * A non-zero value being sufficient to indicate general consideration of any
51 * sk_lock -> mmap_lock (page fault) -> fs locks -> sk_lock
52 * because dependencies are tracked for both nvme-tcp and user contexts. Using
53 * a separate class prevents lockdep from conflating nvme-tcp socket use with
54 * user-space socket API use.
61 struct sock *sk = sock->sk; in nvme_tcp_reclassify_socket()
66 switch (sk->sk_family) { in nvme_tcp_reclassify_socket()
68 sock_lock_init_class_and_name(sk, "slock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
70 "sk_lock-AF_INET-NVME", in nvme_tcp_reclassify_socket()
74 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
76 "sk_lock-AF_INET6-NVME", in nvme_tcp_reclassify_socket()
154 struct nvme_tcp_ctrl *ctrl; member
183 struct nvme_ctrl ctrl; member
198 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
200 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
205 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
208 static inline bool nvme_tcp_tls(struct nvme_ctrl *ctrl) in nvme_tcp_tls() argument
213 return ctrl->opts->tls; in nvme_tcp_tls()
221 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
222 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
227 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
232 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
237 return req->pdu; in nvme_tcp_req_cmd_pdu()
243 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - in nvme_tcp_req_data_pdu()
249 if (nvme_is_fabrics(req->req.cmd)) in nvme_tcp_inline_data_size()
251 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
256 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
268 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
269 req->data_len <= nvme_tcp_inline_data_size(req); in nvme_tcp_has_inline_data()
274 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
279 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
284 return min_t(size_t, iov_iter_single_seg_count(&req->iter), in nvme_tcp_req_cur_length()
285 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
291 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
295 int len) in nvme_tcp_pdu_last_send() argument
297 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
309 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
310 vec = &rq->special_vec; in nvme_tcp_init_iter()
315 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
319 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
324 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
325 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
328 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size); in nvme_tcp_init_iter()
329 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
333 int len) in nvme_tcp_advance_req() argument
335 req->data_sent += len; in nvme_tcp_advance_req()
336 req->pdu_sent += len; in nvme_tcp_advance_req()
337 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
338 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
339 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
340 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
357 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
358 !llist_empty(&queue->req_list); in nvme_tcp_queue_more()
364 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
367 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
368 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
375 if (queue->io_cpu == raw_smp_processor_id() && in nvme_tcp_queue_request()
376 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
378 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
382 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
390 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
392 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
401 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
405 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
411 list_del(&req->entry); in nvme_tcp_fetch_request()
423 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
428 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
429 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
434 void *pdu, size_t len) in nvme_tcp_hdgst() argument
438 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
439 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
450 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
451 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
454 return -EPROTO; in nvme_tcp_verify_hdgst()
457 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
458 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
459 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
461 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
464 return -EIO; in nvme_tcp_verify_hdgst()
474 u32 len; in nvme_tcp_check_ddgst() local
476 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
477 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
479 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
480 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
483 return -EPROTO; in nvme_tcp_check_ddgst()
485 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
495 page_frag_free(req->pdu); in nvme_tcp_exit_request()
502 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); in nvme_tcp_init_request() local
505 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
506 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
509 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
512 if (!req->pdu) in nvme_tcp_init_request()
513 return -ENOMEM; in nvme_tcp_init_request()
515 pdu = req->pdu; in nvme_tcp_init_request()
516 req->queue = queue; in nvme_tcp_init_request()
517 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
518 nvme_req(rq)->cmd = &pdu->cmd; in nvme_tcp_init_request()
526 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); in nvme_tcp_init_hctx() local
527 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
529 hctx->driver_data = queue; in nvme_tcp_init_hctx()
536 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(data); in nvme_tcp_init_admin_hctx() local
537 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
539 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
546 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
547 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
553 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
555 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
556 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
557 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
560 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
562 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
565 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
566 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
575 rq = nvme_find_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
577 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
579 cqe->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_process_nvme_cqe()
580 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
581 return -EINVAL; in nvme_tcp_process_nvme_cqe()
585 if (req->status == cpu_to_le16(NVME_SC_SUCCESS)) in nvme_tcp_process_nvme_cqe()
586 req->status = cqe->status; in nvme_tcp_process_nvme_cqe()
588 if (!nvme_try_complete_req(rq, req->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
590 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
600 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
602 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
604 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_c2h_data()
605 return -ENOENT; in nvme_tcp_handle_c2h_data()
609 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
611 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
612 return -EIO; in nvme_tcp_handle_c2h_data()
615 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
617 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
618 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
619 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
621 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
622 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
623 return -EPROTO; in nvme_tcp_handle_c2h_data()
632 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
642 cqe->command_id))) in nvme_tcp_handle_comp()
643 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
644 &cqe->result); in nvme_tcp_handle_comp()
654 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
656 u32 h2cdata_sent = req->pdu_len; in nvme_tcp_setup_h2c_data_pdu()
660 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_setup_h2c_data_pdu()
661 req->offset = 0; in nvme_tcp_setup_h2c_data_pdu()
662 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata); in nvme_tcp_setup_h2c_data_pdu()
663 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
664 req->h2cdata_left -= req->pdu_len; in nvme_tcp_setup_h2c_data_pdu()
665 req->h2cdata_offset += h2cdata_sent; in nvme_tcp_setup_h2c_data_pdu()
668 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
669 if (!req->h2cdata_left) in nvme_tcp_setup_h2c_data_pdu()
670 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
671 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
672 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
673 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
674 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
675 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
676 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
677 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
678 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
679 data->ttag = req->ttag; in nvme_tcp_setup_h2c_data_pdu()
680 data->command_id = nvme_cid(rq); in nvme_tcp_setup_h2c_data_pdu()
681 data->data_offset = cpu_to_le32(req->h2cdata_offset); in nvme_tcp_setup_h2c_data_pdu()
682 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
690 u32 r2t_length = le32_to_cpu(pdu->r2t_length); in nvme_tcp_handle_r2t()
691 u32 r2t_offset = le32_to_cpu(pdu->r2t_offset); in nvme_tcp_handle_r2t()
693 rq = nvme_find_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
695 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
697 pdu->command_id, nvme_tcp_queue_id(queue)); in nvme_tcp_handle_r2t()
698 return -ENOENT; in nvme_tcp_handle_r2t()
703 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
704 "req %d r2t len is %u, probably a bug...\n", in nvme_tcp_handle_r2t()
705 rq->tag, r2t_length); in nvme_tcp_handle_r2t()
706 return -EPROTO; in nvme_tcp_handle_r2t()
709 if (unlikely(req->data_sent + r2t_length > req->data_len)) { in nvme_tcp_handle_r2t()
710 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
711 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_handle_r2t()
712 rq->tag, r2t_length, req->data_len, req->data_sent); in nvme_tcp_handle_r2t()
713 return -EPROTO; in nvme_tcp_handle_r2t()
716 if (unlikely(r2t_offset < req->data_sent)) { in nvme_tcp_handle_r2t()
717 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
719 rq->tag, r2t_offset, req->data_sent); in nvme_tcp_handle_r2t()
720 return -EPROTO; in nvme_tcp_handle_r2t()
723 req->pdu_len = 0; in nvme_tcp_handle_r2t()
724 req->h2cdata_left = r2t_length; in nvme_tcp_handle_r2t()
725 req->h2cdata_offset = r2t_offset; in nvme_tcp_handle_r2t()
726 req->ttag = pdu->ttag; in nvme_tcp_handle_r2t()
735 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
738 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
739 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
743 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
747 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
748 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
750 *len -= rcv_len; in nvme_tcp_recv_pdu()
751 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
754 hdr = queue->pdu; in nvme_tcp_recv_pdu()
755 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
756 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
762 if (queue->data_digest) { in nvme_tcp_recv_pdu()
763 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
768 switch (hdr->type) { in nvme_tcp_recv_pdu()
770 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
773 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
776 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
778 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
779 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
780 return -EINVAL; in nvme_tcp_recv_pdu()
793 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
795 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
797 nvme_cid_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
803 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
807 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
808 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
814 if (!req->curr_bio) { in nvme_tcp_recv_data()
815 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
817 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
819 return -EIO; in nvme_tcp_recv_data()
826 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
828 if (queue->data_digest) in nvme_tcp_recv_data()
830 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
833 &req->iter, recv_len); in nvme_tcp_recv_data()
835 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
837 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
841 *len -= recv_len; in nvme_tcp_recv_data()
843 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
846 if (!queue->data_remaining) { in nvme_tcp_recv_data()
847 if (queue->data_digest) { in nvme_tcp_recv_data()
848 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
849 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
851 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
853 le16_to_cpu(req->status)); in nvme_tcp_recv_data()
854 queue->nr_cqe++; in nvme_tcp_recv_data()
864 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
866 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
867 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
868 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
869 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
876 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
878 *len -= recv_len; in nvme_tcp_recv_ddgst()
879 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
882 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
884 pdu->command_id); in nvme_tcp_recv_ddgst()
887 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR); in nvme_tcp_recv_ddgst()
889 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
891 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
892 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
895 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
897 pdu->command_id); in nvme_tcp_recv_ddgst()
900 nvme_tcp_end_request(rq, le16_to_cpu(req->status)); in nvme_tcp_recv_ddgst()
901 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
909 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
911 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
912 size_t consumed = len; in nvme_tcp_recv_skb()
915 if (unlikely(!queue->rd_enabled)) in nvme_tcp_recv_skb()
916 return -EFAULT; in nvme_tcp_recv_skb()
918 while (len) { in nvme_tcp_recv_skb()
921 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
924 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
927 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
930 result = -EFAULT; in nvme_tcp_recv_skb()
933 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
935 queue->rd_enabled = false; in nvme_tcp_recv_skb()
936 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
950 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
951 queue = sk->sk_user_data; in nvme_tcp_data_ready()
952 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
953 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
954 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
955 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
962 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
963 queue = sk->sk_user_data; in nvme_tcp_write_space()
965 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
966 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
968 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
975 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
976 queue = sk->sk_user_data; in nvme_tcp_state_change()
980 switch (sk->sk_state) { in nvme_tcp_state_change()
986 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
989 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
991 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
994 queue->state_change(sk); in nvme_tcp_state_change()
996 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_state_change()
1001 queue->request = NULL; in nvme_tcp_done_send_req()
1009 nvme_complete_async_event(&req->queue->ctrl->ctrl, in nvme_tcp_fail_request()
1019 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
1020 int req_data_len = req->data_len; in nvme_tcp_try_send_data()
1021 u32 h2cdata_left = req->h2cdata_left; in nvme_tcp_try_send_data()
1030 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
1031 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
1032 int req_data_sent = req->data_sent; in nvme_tcp_try_send_data()
1035 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
1043 bvec_set_page(&bvec, page, len, offset); in nvme_tcp_try_send_data()
1044 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data()
1045 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data()
1049 if (queue->data_digest) in nvme_tcp_try_send_data()
1050 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
1062 if (last && ret == len) { in nvme_tcp_try_send_data()
1063 if (queue->data_digest) { in nvme_tcp_try_send_data()
1064 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
1065 &req->ddgst); in nvme_tcp_try_send_data()
1066 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
1067 req->offset = 0; in nvme_tcp_try_send_data()
1077 return -EAGAIN; in nvme_tcp_try_send_data()
1082 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
1088 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
1096 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
1097 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
1099 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_cmd_pdu()
1100 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_cmd_pdu()
1101 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_cmd_pdu()
1105 len -= ret; in nvme_tcp_try_send_cmd_pdu()
1106 if (!len) { in nvme_tcp_try_send_cmd_pdu()
1108 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
1109 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
1110 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
1116 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
1118 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
1123 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
1128 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
1131 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
1132 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
1134 if (!req->h2cdata_left) in nvme_tcp_try_send_data_pdu()
1137 bvec_set_virt(&bvec, (void *)pdu + req->offset, len); in nvme_tcp_try_send_data_pdu()
1138 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len); in nvme_tcp_try_send_data_pdu()
1139 ret = sock_sendmsg(queue->sock, &msg); in nvme_tcp_try_send_data_pdu()
1143 len -= ret; in nvme_tcp_try_send_data_pdu()
1144 if (!len) { in nvme_tcp_try_send_data_pdu()
1145 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1146 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1147 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1150 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1152 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1157 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1158 size_t offset = req->offset; in nvme_tcp_try_send_ddgst()
1159 u32 h2cdata_left = req->h2cdata_left; in nvme_tcp_try_send_ddgst()
1163 .iov_base = (u8 *)&req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1164 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1172 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1184 req->offset += ret; in nvme_tcp_try_send_ddgst()
1185 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1194 if (!queue->request) { in nvme_tcp_try_send()
1195 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1196 if (!queue->request) in nvme_tcp_try_send()
1199 req = queue->request; in nvme_tcp_try_send()
1202 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1210 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1216 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1222 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1225 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1228 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1230 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1240 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1241 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1248 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1249 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1264 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1266 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1279 if (!pending || !queue->rd_enabled) in nvme_tcp_io_work()
1284 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1289 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1291 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1292 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1304 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1305 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1307 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1309 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1310 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1312 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1316 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1319 return -ENOMEM; in nvme_tcp_alloc_crypto()
1322 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1324 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1326 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1329 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1331 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1332 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1335 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1338 if (!async->pdu) in nvme_tcp_alloc_async_req()
1339 return -ENOMEM; in nvme_tcp_alloc_async_req()
1341 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1348 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1349 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1352 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1355 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1358 if (queue->pf_cache.va) { in nvme_tcp_free_queue()
1359 page = virt_to_head_page(queue->pf_cache.va); in nvme_tcp_free_queue()
1360 __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); in nvme_tcp_free_queue()
1361 queue->pf_cache.va = NULL; in nvme_tcp_free_queue()
1365 /* ->sock will be released by fput() */ in nvme_tcp_free_queue()
1366 fput(queue->sock->file); in nvme_tcp_free_queue()
1367 queue->sock = NULL; in nvme_tcp_free_queue()
1370 kfree(queue->pdu); in nvme_tcp_free_queue()
1371 mutex_destroy(&queue->send_mutex); in nvme_tcp_free_queue()
1372 mutex_destroy(&queue->queue_lock); in nvme_tcp_free_queue()
1389 return -ENOMEM; in nvme_tcp_init_connection()
1393 ret = -ENOMEM; in nvme_tcp_init_connection()
1397 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1398 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1399 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1400 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1401 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1402 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1403 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1404 if (queue->hdr_digest) in nvme_tcp_init_connection()
1405 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1406 if (queue->data_digest) in nvme_tcp_init_connection()
1407 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1411 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1421 if (nvme_tcp_tls(&queue->ctrl->ctrl)) { in nvme_tcp_init_connection()
1425 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1432 ret = -ENOTCONN; in nvme_tcp_init_connection()
1433 if (nvme_tcp_tls(&queue->ctrl->ctrl)) { in nvme_tcp_init_connection()
1434 ctype = tls_get_record_type(queue->sock->sk, in nvme_tcp_init_connection()
1442 ret = -EINVAL; in nvme_tcp_init_connection()
1443 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1445 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1449 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1451 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1455 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1457 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1461 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1462 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1463 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1464 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1466 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1471 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1472 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1473 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1474 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1476 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1481 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1483 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1487 maxh2cdata = le32_to_cpu(icresp->maxdata); in nvme_tcp_init_connection()
1493 queue->maxh2cdata = maxh2cdata; in nvme_tcp_init_connection()
1510 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1514 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1519 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1524 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1525 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1530 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1536 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1537 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1538 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1543 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1548 n = qid - 1; in nvme_tcp_set_queue_io_cpu()
1550 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; in nvme_tcp_set_queue_io_cpu()
1552 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - in nvme_tcp_set_queue_io_cpu()
1553 ctrl->io_queues[HCTX_TYPE_READ] - 1; in nvme_tcp_set_queue_io_cpu()
1554 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1560 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_tls_done() local
1564 dev_dbg(ctrl->ctrl.device, "queue %d: TLS handshake done, key %x, status %d\n", in nvme_tcp_tls_done()
1568 queue->tls_err = -status; in nvme_tcp_tls_done()
1574 dev_warn(ctrl->ctrl.device, "queue %d: Invalid key %x\n", in nvme_tcp_tls_done()
1576 queue->tls_err = -ENOKEY; in nvme_tcp_tls_done()
1578 ctrl->ctrl.tls_key = tls_key; in nvme_tcp_tls_done()
1579 queue->tls_err = 0; in nvme_tcp_tls_done()
1583 complete(&queue->tls_complete); in nvme_tcp_tls_done()
1596 dev_dbg(nctrl->device, "queue %d: start TLS with key %x\n", in nvme_tcp_start_tls()
1599 args.ta_sock = queue->sock; in nvme_tcp_start_tls()
1604 if (nctrl->opts->keyring) in nvme_tcp_start_tls()
1605 keyring = key_serial(nctrl->opts->keyring); in nvme_tcp_start_tls()
1608 queue->tls_err = -EOPNOTSUPP; in nvme_tcp_start_tls()
1609 init_completion(&queue->tls_complete); in nvme_tcp_start_tls()
1612 dev_err(nctrl->device, "queue %d: failed to start TLS: %d\n", in nvme_tcp_start_tls()
1616 ret = wait_for_completion_interruptible_timeout(&queue->tls_complete, tmo); in nvme_tcp_start_tls()
1619 ret = -ETIMEDOUT; in nvme_tcp_start_tls()
1621 dev_err(nctrl->device, in nvme_tcp_start_tls()
1624 tls_handshake_cancel(queue->sock->sk); in nvme_tcp_start_tls()
1626 dev_dbg(nctrl->device, in nvme_tcp_start_tls()
1628 qid, queue->tls_err); in nvme_tcp_start_tls()
1629 ret = queue->tls_err; in nvme_tcp_start_tls()
1637 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1638 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1642 mutex_init(&queue->queue_lock); in nvme_tcp_alloc_queue()
1643 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1644 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1645 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1646 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1647 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1650 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1652 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1655 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1656 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1658 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1663 sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL); in nvme_tcp_alloc_queue()
1668 nvme_tcp_reclassify_socket(queue->sock); in nvme_tcp_alloc_queue()
1671 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1674 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1681 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1684 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1687 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1688 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1691 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1693 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1694 queue->sock->sk->sk_use_task_frag = false; in nvme_tcp_alloc_queue()
1696 queue->request = NULL; in nvme_tcp_alloc_queue()
1697 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1698 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1699 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1700 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1701 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1703 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1704 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1705 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1707 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1714 if (nctrl->opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_alloc_queue()
1715 char *iface = nctrl->opts->host_iface; in nvme_tcp_alloc_queue()
1718 ret = sock_setsockopt(queue->sock, SOL_SOCKET, SO_BINDTODEVICE, in nvme_tcp_alloc_queue()
1721 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1728 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1729 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1730 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1733 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1741 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1742 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1743 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1747 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1750 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1751 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1753 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1769 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1774 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1776 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1778 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1781 /* ->sock will be released by fput() */ in nvme_tcp_alloc_queue()
1782 fput(queue->sock->file); in nvme_tcp_alloc_queue()
1783 queue->sock = NULL; in nvme_tcp_alloc_queue()
1785 mutex_destroy(&queue->send_mutex); in nvme_tcp_alloc_queue()
1786 mutex_destroy(&queue->queue_lock); in nvme_tcp_alloc_queue()
1792 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_ops()
1794 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1795 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_ops()
1796 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_ops()
1797 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_ops()
1798 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_ops()
1799 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_ops()
1804 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1806 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1811 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue() local
1812 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue()
1814 if (!test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_stop_queue()
1817 mutex_lock(&queue->queue_lock); in nvme_tcp_stop_queue()
1818 if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1820 mutex_unlock(&queue->queue_lock); in nvme_tcp_stop_queue()
1825 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1826 queue->sock->sk->sk_user_data = queue; in nvme_tcp_setup_sock_ops()
1827 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_setup_sock_ops()
1828 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_setup_sock_ops()
1829 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_setup_sock_ops()
1830 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_setup_sock_ops()
1831 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_setup_sock_ops()
1832 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_setup_sock_ops()
1834 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_setup_sock_ops()
1836 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_setup_sock_ops()
1841 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1842 struct nvme_tcp_queue *queue = &ctrl->queues[idx]; in nvme_tcp_start_queue()
1845 queue->rd_enabled = true; in nvme_tcp_start_queue()
1855 set_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_start_queue()
1857 if (test_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_start_queue()
1859 dev_err(nctrl->device, in nvme_tcp_start_queue()
1865 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1867 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1868 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
1869 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1870 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1873 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1876 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1880 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1881 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1884 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1888 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1889 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1892 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_start_io_queues() argument
1898 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1906 for (i--; i >= first; i--) in nvme_tcp_start_io_queues()
1907 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1911 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1916 if (nvme_tcp_tls(ctrl)) { in nvme_tcp_alloc_admin_queue()
1917 if (ctrl->opts->tls_key) in nvme_tcp_alloc_admin_queue()
1918 pskid = key_serial(ctrl->opts->tls_key); in nvme_tcp_alloc_admin_queue()
1920 pskid = nvme_tls_psk_default(ctrl->opts->keyring, in nvme_tcp_alloc_admin_queue()
1921 ctrl->opts->host->nqn, in nvme_tcp_alloc_admin_queue()
1922 ctrl->opts->subsysnqn); in nvme_tcp_alloc_admin_queue()
1924 dev_err(ctrl->device, "no valid PSK found\n"); in nvme_tcp_alloc_admin_queue()
1925 return -ENOKEY; in nvme_tcp_alloc_admin_queue()
1929 ret = nvme_tcp_alloc_queue(ctrl, 0, pskid); in nvme_tcp_alloc_admin_queue()
1933 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1940 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1944 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1948 if (nvme_tcp_tls(ctrl) && !ctrl->tls_key) { in __nvme_tcp_alloc_io_queues()
1949 dev_err(ctrl->device, "no PSK negotiated\n"); in __nvme_tcp_alloc_io_queues()
1950 return -ENOKEY; in __nvme_tcp_alloc_io_queues()
1952 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1953 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
1954 key_serial(ctrl->tls_key)); in __nvme_tcp_alloc_io_queues()
1962 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1963 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1968 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1973 nr_io_queues = nvmf_nr_io_queues(ctrl->opts); in nvme_tcp_alloc_io_queues()
1974 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1979 dev_err(ctrl->device, in nvme_tcp_alloc_io_queues()
1981 return -ENOMEM; in nvme_tcp_alloc_io_queues()
1984 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1985 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1988 nvmf_set_io_queues(ctrl->opts, nr_io_queues, in nvme_tcp_alloc_io_queues()
1989 to_tcp_ctrl(ctrl)->io_queues); in nvme_tcp_alloc_io_queues()
1990 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1993 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1995 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1997 nvme_remove_io_tag_set(ctrl); in nvme_tcp_destroy_io_queues()
1998 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
2001 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
2005 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2010 ret = nvme_alloc_io_tag_set(ctrl, &to_tcp_ctrl(ctrl)->tag_set, in nvme_tcp_configure_io_queues()
2012 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, in nvme_tcp_configure_io_queues()
2023 nr_queues = min(ctrl->tagset->nr_hw_queues + 1, ctrl->queue_count); in nvme_tcp_configure_io_queues()
2024 ret = nvme_tcp_start_io_queues(ctrl, 1, nr_queues); in nvme_tcp_configure_io_queues()
2029 nvme_start_freeze(ctrl); in nvme_tcp_configure_io_queues()
2030 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2031 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
2037 ret = -ENODEV; in nvme_tcp_configure_io_queues()
2038 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
2041 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
2042 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
2043 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
2050 ret = nvme_tcp_start_io_queues(ctrl, nr_queues, in nvme_tcp_configure_io_queues()
2051 ctrl->tagset->nr_hw_queues + 1); in nvme_tcp_configure_io_queues()
2058 nvme_quiesce_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2059 nvme_sync_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2060 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2062 nvme_cancel_tagset(ctrl); in nvme_tcp_configure_io_queues()
2064 nvme_remove_io_tag_set(ctrl); in nvme_tcp_configure_io_queues()
2066 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
2070 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
2072 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
2074 nvme_remove_admin_tag_set(ctrl); in nvme_tcp_destroy_admin_queue()
2075 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
2078 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
2082 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2087 error = nvme_alloc_admin_tag_set(ctrl, in nvme_tcp_configure_admin_queue()
2088 &to_tcp_ctrl(ctrl)->admin_tag_set, in nvme_tcp_configure_admin_queue()
2095 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
2099 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
2103 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2105 error = nvme_init_ctrl_finish(ctrl, false); in nvme_tcp_configure_admin_queue()
2112 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2113 blk_sync_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
2115 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
2116 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_configure_admin_queue()
2119 nvme_remove_admin_tag_set(ctrl); in nvme_tcp_configure_admin_queue()
2121 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
2125 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
2128 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2129 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
2130 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
2131 nvme_cancel_admin_tagset(ctrl); in nvme_tcp_teardown_admin_queue()
2133 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_teardown_admin_queue()
2134 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
2137 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
2140 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
2142 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_io_queues()
2143 nvme_quiesce_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2144 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2145 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2146 nvme_cancel_tagset(ctrl); in nvme_tcp_teardown_io_queues()
2148 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
2149 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
2152 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
2154 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_reconnect_or_remove()
2162 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
2163 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
2164 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
2165 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
2166 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
2168 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
2169 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
2173 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
2175 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
2178 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2182 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
2183 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2184 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
2188 if (!nvme_ctrl_sgl_supported(ctrl)) { in nvme_tcp_setup_ctrl()
2189 ret = -EOPNOTSUPP; in nvme_tcp_setup_ctrl()
2190 dev_err(ctrl->device, "Mandatory sgls are not supported!\n"); in nvme_tcp_setup_ctrl()
2194 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
2195 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2196 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
2197 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
2199 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
2200 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
2201 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
2202 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
2203 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
2206 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2207 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2212 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
2214 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
2218 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_setup_ctrl()
2223 ret = -EINVAL; in nvme_tcp_setup_ctrl()
2227 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
2231 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
2232 nvme_quiesce_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2233 nvme_sync_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2234 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_setup_ctrl()
2235 nvme_cancel_tagset(ctrl); in nvme_tcp_setup_ctrl()
2236 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
2239 nvme_stop_keep_alive(ctrl); in nvme_tcp_setup_ctrl()
2240 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_setup_ctrl()
2248 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2250 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2252 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
2255 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
2256 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2258 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2263 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
2264 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2265 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
2272 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2274 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2275 flush_work(&ctrl->async_event_work); in nvme_tcp_error_recovery_work()
2276 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2278 nvme_unquiesce_io_queues(ctrl); in nvme_tcp_error_recovery_work()
2279 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2280 nvme_unquiesce_admin_queue(ctrl); in nvme_tcp_error_recovery_work()
2281 nvme_auth_stop(ctrl); in nvme_tcp_error_recovery_work()
2283 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2284 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2285 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_tcp_error_recovery_work()
2292 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
2295 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2297 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2298 nvme_quiesce_admin_queue(ctrl); in nvme_tcp_teardown_ctrl()
2299 nvme_disable_ctrl(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2300 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2303 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2305 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2310 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2313 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2314 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2316 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2317 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2318 enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); in nvme_reset_ctrl_work()
2325 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
2331 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2332 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
2335 static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_stop_ctrl() argument
2337 flush_work(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_stop_ctrl()
2338 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_stop_ctrl()
2343 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2345 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2349 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2352 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2354 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2355 kfree(ctrl); in nvme_tcp_free_ctrl()
2360 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2362 sg->addr = 0; in nvme_tcp_set_sg_null()
2363 sg->length = 0; in nvme_tcp_set_sg_null()
2364 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2371 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2373 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2374 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2375 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2381 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2383 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2384 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2385 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2391 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2392 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2393 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2394 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2398 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2399 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2400 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2401 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2402 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2404 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2405 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2406 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2409 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2410 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2411 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2412 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2414 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2420 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2422 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2429 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2431 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_timeout()
2432 int qid = nvme_tcp_queue_id(req->queue); in nvme_tcp_timeout()
2434 dev_warn(ctrl->device, in nvme_tcp_timeout()
2436 rq->tag, nvme_cid(rq), pdu->hdr.type, cmd->common.opcode, in nvme_tcp_timeout()
2439 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2444 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2445 * - connect requests in nvme_tcp_timeout()
2446 * - initialization admin requests in nvme_tcp_timeout()
2447 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2461 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2470 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2472 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2477 req->data_len <= nvme_tcp_inline_data_size(req)) in nvme_tcp_map_data()
2478 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2480 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2490 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2498 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2499 req->status = cpu_to_le16(NVME_SC_SUCCESS); in nvme_tcp_setup_cmd_pdu()
2500 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2501 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2502 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2503 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2504 req->h2cdata_left = 0; in nvme_tcp_setup_cmd_pdu()
2505 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2507 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2508 if (req->curr_bio && req->data_len) in nvme_tcp_setup_cmd_pdu()
2512 req->data_len <= nvme_tcp_inline_data_size(req)) in nvme_tcp_setup_cmd_pdu()
2513 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2515 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2516 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2517 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2518 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2519 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2520 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2523 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2524 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2525 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2526 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2531 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2541 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2543 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2544 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2550 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2551 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2552 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2554 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2557 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2558 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2566 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2573 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(set->driver_data); in nvme_tcp_map_queues() local
2575 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); in nvme_tcp_map_queues()
2580 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2581 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2583 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2586 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2587 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2590 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2591 return queue->nr_cqe; in nvme_tcp_poll()
2594 static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size) in nvme_tcp_get_address() argument
2596 struct nvme_tcp_queue *queue = &to_tcp_ctrl(ctrl)->queues[0]; in nvme_tcp_get_address()
2598 int ret, len; in nvme_tcp_get_address() local
2600 len = nvmf_get_address(ctrl, buf, size); in nvme_tcp_get_address()
2602 mutex_lock(&queue->queue_lock); in nvme_tcp_get_address()
2604 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_get_address()
2606 ret = kernel_getsockname(queue->sock, (struct sockaddr *)&src_addr); in nvme_tcp_get_address()
2608 if (len > 0) in nvme_tcp_get_address()
2609 len--; /* strip trailing newline */ in nvme_tcp_get_address()
2610 len += scnprintf(buf + len, size - len, "%ssrc_addr=%pISc\n", in nvme_tcp_get_address()
2611 (len) ? "," : "", &src_addr); in nvme_tcp_get_address()
2614 mutex_unlock(&queue->queue_lock); in nvme_tcp_get_address()
2616 return len; in nvme_tcp_get_address()
2657 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2661 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2662 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2674 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2677 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2678 if (!ctrl) in nvme_tcp_create_ctrl()
2679 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2681 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2682 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2683 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2684 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2685 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2686 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2688 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2690 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2691 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2693 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2694 opts->trsvcid = in nvme_tcp_create_ctrl()
2696 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2697 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2700 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2704 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2707 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2711 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2713 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2716 opts->host_traddr); in nvme_tcp_create_ctrl()
2721 if (opts->mask & NVMF_OPT_HOST_IFACE) { in nvme_tcp_create_ctrl()
2722 if (!__dev_get_by_name(&init_net, opts->host_iface)) { in nvme_tcp_create_ctrl()
2724 opts->host_iface); in nvme_tcp_create_ctrl()
2725 ret = -ENODEV; in nvme_tcp_create_ctrl()
2730 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2731 ret = -EALREADY; in nvme_tcp_create_ctrl()
2735 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2737 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2738 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2742 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2746 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2748 ret = -EINTR; in nvme_tcp_create_ctrl()
2752 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2756 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp, hostnqn: %s\n", in nvme_tcp_create_ctrl()
2757 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr, opts->host->nqn); in nvme_tcp_create_ctrl()
2760 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2763 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2766 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2767 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2769 ret = -EIO; in nvme_tcp_create_ctrl()
2772 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2774 kfree(ctrl); in nvme_tcp_create_ctrl()
2805 return -ENOMEM; in nvme_tcp_init_module()
2813 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2818 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2819 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()