Lines Matching +full:ctrl +full:- +full:len
1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/nvme-tcp.h>
14 #include <linux/blk-mq.h>
25 * A non-zero value being sufficient to indicate general consideration of any
97 struct nvme_tcp_ctrl *ctrl; member
125 struct nvme_ctrl ctrl; member
140 static inline struct nvme_tcp_ctrl *to_tcp_ctrl(struct nvme_ctrl *ctrl) in to_tcp_ctrl() argument
142 return container_of(ctrl, struct nvme_tcp_ctrl, ctrl); in to_tcp_ctrl()
147 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
155 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
156 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
161 return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_hdgst_len()
166 return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; in nvme_tcp_ddgst_len()
171 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_tcp_inline_data_size()
176 return req == &req->queue->ctrl->async_req; in nvme_tcp_async_req()
188 return rq_data_dir(rq) == WRITE && req->data_len && in nvme_tcp_has_inline_data()
189 req->data_len <= nvme_tcp_inline_data_size(req->queue); in nvme_tcp_has_inline_data()
194 return req->iter.bvec->bv_page; in nvme_tcp_req_cur_page()
199 return req->iter.bvec->bv_offset + req->iter.iov_offset; in nvme_tcp_req_cur_offset()
204 return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset, in nvme_tcp_req_cur_length()
205 req->pdu_len - req->pdu_sent); in nvme_tcp_req_cur_length()
210 return req->iter.iov_offset; in nvme_tcp_req_offset()
216 req->pdu_len - req->pdu_sent : 0; in nvme_tcp_pdu_data_left()
220 int len) in nvme_tcp_pdu_last_send() argument
222 return nvme_tcp_pdu_data_left(req) <= len; in nvme_tcp_pdu_last_send()
234 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) { in nvme_tcp_init_iter()
235 vec = &rq->special_vec; in nvme_tcp_init_iter()
240 struct bio *bio = req->curr_bio; in nvme_tcp_init_iter()
242 vec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter); in nvme_tcp_init_iter()
244 size = bio->bi_iter.bi_size; in nvme_tcp_init_iter()
245 offset = bio->bi_iter.bi_bvec_done; in nvme_tcp_init_iter()
248 iov_iter_bvec(&req->iter, dir, vec, nsegs, size); in nvme_tcp_init_iter()
249 req->iter.iov_offset = offset; in nvme_tcp_init_iter()
253 int len) in nvme_tcp_advance_req() argument
255 req->data_sent += len; in nvme_tcp_advance_req()
256 req->pdu_sent += len; in nvme_tcp_advance_req()
257 iov_iter_advance(&req->iter, len); in nvme_tcp_advance_req()
258 if (!iov_iter_count(&req->iter) && in nvme_tcp_advance_req()
259 req->data_sent < req->data_len) { in nvme_tcp_advance_req()
260 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_advance_req()
268 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_queue_request()
271 empty = llist_add(&req->lentry, &queue->req_list) && in nvme_tcp_queue_request()
272 list_empty(&queue->send_list) && !queue->request; in nvme_tcp_queue_request()
279 if (queue->io_cpu == smp_processor_id() && in nvme_tcp_queue_request()
280 sync && empty && mutex_trylock(&queue->send_mutex)) { in nvme_tcp_queue_request()
281 queue->more_requests = !last; in nvme_tcp_queue_request()
283 queue->more_requests = false; in nvme_tcp_queue_request()
284 mutex_unlock(&queue->send_mutex); in nvme_tcp_queue_request()
286 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_queue_request()
295 for (node = llist_del_all(&queue->req_list); node; node = node->next) { in nvme_tcp_process_req_list()
297 list_add(&req->entry, &queue->send_list); in nvme_tcp_process_req_list()
306 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
310 req = list_first_entry_or_null(&queue->send_list, in nvme_tcp_fetch_request()
316 list_del(&req->entry); in nvme_tcp_fetch_request()
328 struct page *page, off_t off, size_t len) in nvme_tcp_ddgst_update() argument
333 sg_set_page(&sg, page, len, off); in nvme_tcp_ddgst_update()
334 ahash_request_set_crypt(hash, &sg, NULL, len); in nvme_tcp_ddgst_update()
339 void *pdu, size_t len) in nvme_tcp_hdgst() argument
343 sg_init_one(&sg, pdu, len); in nvme_tcp_hdgst()
344 ahash_request_set_crypt(hash, &sg, pdu + len, len); in nvme_tcp_hdgst()
355 if (unlikely(!(hdr->flags & NVME_TCP_F_HDGST))) { in nvme_tcp_verify_hdgst()
356 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
359 return -EPROTO; in nvme_tcp_verify_hdgst()
362 recv_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
363 nvme_tcp_hdgst(queue->rcv_hash, pdu, pdu_len); in nvme_tcp_verify_hdgst()
364 exp_digest = *(__le32 *)(pdu + hdr->hlen); in nvme_tcp_verify_hdgst()
366 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_verify_hdgst()
369 return -EIO; in nvme_tcp_verify_hdgst()
379 u32 len; in nvme_tcp_check_ddgst() local
381 len = le32_to_cpu(hdr->plen) - hdr->hlen - in nvme_tcp_check_ddgst()
382 ((hdr->flags & NVME_TCP_F_HDGST) ? digest_len : 0); in nvme_tcp_check_ddgst()
384 if (unlikely(len && !(hdr->flags & NVME_TCP_F_DDGST))) { in nvme_tcp_check_ddgst()
385 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_check_ddgst()
388 return -EPROTO; in nvme_tcp_check_ddgst()
390 crypto_ahash_init(queue->rcv_hash); in nvme_tcp_check_ddgst()
400 page_frag_free(req->pdu); in nvme_tcp_exit_request()
407 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_init_request() local
409 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_tcp_init_request()
410 struct nvme_tcp_queue *queue = &ctrl->queues[queue_idx]; in nvme_tcp_init_request()
413 req->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_init_request()
416 if (!req->pdu) in nvme_tcp_init_request()
417 return -ENOMEM; in nvme_tcp_init_request()
419 req->queue = queue; in nvme_tcp_init_request()
420 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_tcp_init_request()
428 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_hctx() local
429 struct nvme_tcp_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_tcp_init_hctx()
431 hctx->driver_data = queue; in nvme_tcp_init_hctx()
438 struct nvme_tcp_ctrl *ctrl = data; in nvme_tcp_init_admin_hctx() local
439 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_init_admin_hctx()
441 hctx->driver_data = queue; in nvme_tcp_init_admin_hctx()
448 return (queue->pdu_remaining) ? NVME_TCP_RECV_PDU : in nvme_tcp_recv_state()
449 (queue->ddgst_remaining) ? NVME_TCP_RECV_DDGST : in nvme_tcp_recv_state()
455 queue->pdu_remaining = sizeof(struct nvme_tcp_rsp_pdu) + in nvme_tcp_init_recv_ctx()
457 queue->pdu_offset = 0; in nvme_tcp_init_recv_ctx()
458 queue->data_remaining = -1; in nvme_tcp_init_recv_ctx()
459 queue->ddgst_remaining = 0; in nvme_tcp_init_recv_ctx()
462 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl) in nvme_tcp_error_recovery() argument
464 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) in nvme_tcp_error_recovery()
467 dev_warn(ctrl->device, "starting error recovery\n"); in nvme_tcp_error_recovery()
468 queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_error_recovery()
476 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
478 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_process_nvme_cqe()
480 nvme_tcp_queue_id(queue), cqe->command_id); in nvme_tcp_process_nvme_cqe()
481 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_process_nvme_cqe()
482 return -EINVAL; in nvme_tcp_process_nvme_cqe()
485 if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) in nvme_tcp_process_nvme_cqe()
487 queue->nr_cqe++; in nvme_tcp_process_nvme_cqe()
497 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
499 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
501 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_handle_c2h_data()
502 return -ENOENT; in nvme_tcp_handle_c2h_data()
506 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
508 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
509 return -EIO; in nvme_tcp_handle_c2h_data()
512 queue->data_remaining = le32_to_cpu(pdu->data_length); in nvme_tcp_handle_c2h_data()
514 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS && in nvme_tcp_handle_c2h_data()
515 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) { in nvme_tcp_handle_c2h_data()
516 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_c2h_data()
518 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_handle_c2h_data()
519 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_handle_c2h_data()
520 return -EPROTO; in nvme_tcp_handle_c2h_data()
529 struct nvme_completion *cqe = &pdu->cqe; in nvme_tcp_handle_comp()
539 cqe->command_id))) in nvme_tcp_handle_comp()
540 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_tcp_handle_comp()
541 &cqe->result); in nvme_tcp_handle_comp()
551 struct nvme_tcp_data_pdu *data = req->pdu; in nvme_tcp_setup_h2c_data_pdu()
552 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_h2c_data_pdu()
557 req->pdu_len = le32_to_cpu(pdu->r2t_length); in nvme_tcp_setup_h2c_data_pdu()
558 req->pdu_sent = 0; in nvme_tcp_setup_h2c_data_pdu()
560 if (unlikely(req->data_sent + req->pdu_len > req->data_len)) { in nvme_tcp_setup_h2c_data_pdu()
561 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
562 "req %d r2t len %u exceeded data len %u (%zu sent)\n", in nvme_tcp_setup_h2c_data_pdu()
563 rq->tag, req->pdu_len, req->data_len, in nvme_tcp_setup_h2c_data_pdu()
564 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
565 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
568 if (unlikely(le32_to_cpu(pdu->r2t_offset) < req->data_sent)) { in nvme_tcp_setup_h2c_data_pdu()
569 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_h2c_data_pdu()
571 rq->tag, le32_to_cpu(pdu->r2t_offset), in nvme_tcp_setup_h2c_data_pdu()
572 req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
573 return -EPROTO; in nvme_tcp_setup_h2c_data_pdu()
577 data->hdr.type = nvme_tcp_h2c_data; in nvme_tcp_setup_h2c_data_pdu()
578 data->hdr.flags = NVME_TCP_F_DATA_LAST; in nvme_tcp_setup_h2c_data_pdu()
579 if (queue->hdr_digest) in nvme_tcp_setup_h2c_data_pdu()
580 data->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_h2c_data_pdu()
581 if (queue->data_digest) in nvme_tcp_setup_h2c_data_pdu()
582 data->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_h2c_data_pdu()
583 data->hdr.hlen = sizeof(*data); in nvme_tcp_setup_h2c_data_pdu()
584 data->hdr.pdo = data->hdr.hlen + hdgst; in nvme_tcp_setup_h2c_data_pdu()
585 data->hdr.plen = in nvme_tcp_setup_h2c_data_pdu()
586 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_h2c_data_pdu()
587 data->ttag = pdu->ttag; in nvme_tcp_setup_h2c_data_pdu()
588 data->command_id = rq->tag; in nvme_tcp_setup_h2c_data_pdu()
589 data->data_offset = cpu_to_le32(req->data_sent); in nvme_tcp_setup_h2c_data_pdu()
590 data->data_length = cpu_to_le32(req->pdu_len); in nvme_tcp_setup_h2c_data_pdu()
601 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_handle_r2t()
603 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_handle_r2t()
605 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_handle_r2t()
606 return -ENOENT; in nvme_tcp_handle_r2t()
614 req->state = NVME_TCP_SEND_H2C_PDU; in nvme_tcp_handle_r2t()
615 req->offset = 0; in nvme_tcp_handle_r2t()
623 unsigned int *offset, size_t *len) in nvme_tcp_recv_pdu() argument
626 char *pdu = queue->pdu; in nvme_tcp_recv_pdu()
627 size_t rcv_len = min_t(size_t, *len, queue->pdu_remaining); in nvme_tcp_recv_pdu()
631 &pdu[queue->pdu_offset], rcv_len); in nvme_tcp_recv_pdu()
635 queue->pdu_remaining -= rcv_len; in nvme_tcp_recv_pdu()
636 queue->pdu_offset += rcv_len; in nvme_tcp_recv_pdu()
638 *len -= rcv_len; in nvme_tcp_recv_pdu()
639 if (queue->pdu_remaining) in nvme_tcp_recv_pdu()
642 hdr = queue->pdu; in nvme_tcp_recv_pdu()
643 if (queue->hdr_digest) { in nvme_tcp_recv_pdu()
644 ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen); in nvme_tcp_recv_pdu()
650 if (queue->data_digest) { in nvme_tcp_recv_pdu()
651 ret = nvme_tcp_check_ddgst(queue, queue->pdu); in nvme_tcp_recv_pdu()
656 switch (hdr->type) { in nvme_tcp_recv_pdu()
658 return nvme_tcp_handle_c2h_data(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
661 return nvme_tcp_handle_comp(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
664 return nvme_tcp_handle_r2t(queue, (void *)queue->pdu); in nvme_tcp_recv_pdu()
666 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_pdu()
667 "unsupported pdu type (%d)\n", hdr->type); in nvme_tcp_recv_pdu()
668 return -EINVAL; in nvme_tcp_recv_pdu()
681 unsigned int *offset, size_t *len) in nvme_tcp_recv_data() argument
683 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_data()
687 rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), pdu->command_id); in nvme_tcp_recv_data()
689 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
691 nvme_tcp_queue_id(queue), pdu->command_id); in nvme_tcp_recv_data()
692 return -ENOENT; in nvme_tcp_recv_data()
699 recv_len = min_t(size_t, *len, queue->data_remaining); in nvme_tcp_recv_data()
703 if (!iov_iter_count(&req->iter)) { in nvme_tcp_recv_data()
704 req->curr_bio = req->curr_bio->bi_next; in nvme_tcp_recv_data()
710 if (!req->curr_bio) { in nvme_tcp_recv_data()
711 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
713 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
715 return -EIO; in nvme_tcp_recv_data()
722 iov_iter_count(&req->iter)); in nvme_tcp_recv_data()
724 if (queue->data_digest) in nvme_tcp_recv_data()
726 &req->iter, recv_len, queue->rcv_hash); in nvme_tcp_recv_data()
729 &req->iter, recv_len); in nvme_tcp_recv_data()
731 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_data()
733 nvme_tcp_queue_id(queue), rq->tag); in nvme_tcp_recv_data()
737 *len -= recv_len; in nvme_tcp_recv_data()
739 queue->data_remaining -= recv_len; in nvme_tcp_recv_data()
742 if (!queue->data_remaining) { in nvme_tcp_recv_data()
743 if (queue->data_digest) { in nvme_tcp_recv_data()
744 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); in nvme_tcp_recv_data()
745 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; in nvme_tcp_recv_data()
747 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_data()
749 queue->nr_cqe++; in nvme_tcp_recv_data()
759 struct sk_buff *skb, unsigned int *offset, size_t *len) in nvme_tcp_recv_ddgst() argument
761 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu; in nvme_tcp_recv_ddgst()
762 char *ddgst = (char *)&queue->recv_ddgst; in nvme_tcp_recv_ddgst()
763 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); in nvme_tcp_recv_ddgst()
764 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; in nvme_tcp_recv_ddgst()
771 queue->ddgst_remaining -= recv_len; in nvme_tcp_recv_ddgst()
773 *len -= recv_len; in nvme_tcp_recv_ddgst()
774 if (queue->ddgst_remaining) in nvme_tcp_recv_ddgst()
777 if (queue->recv_ddgst != queue->exp_ddgst) { in nvme_tcp_recv_ddgst()
778 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_ddgst()
780 le32_to_cpu(queue->recv_ddgst), in nvme_tcp_recv_ddgst()
781 le32_to_cpu(queue->exp_ddgst)); in nvme_tcp_recv_ddgst()
782 return -EIO; in nvme_tcp_recv_ddgst()
785 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) { in nvme_tcp_recv_ddgst()
787 pdu->command_id); in nvme_tcp_recv_ddgst()
790 queue->nr_cqe++; in nvme_tcp_recv_ddgst()
798 unsigned int offset, size_t len) in nvme_tcp_recv_skb() argument
800 struct nvme_tcp_queue *queue = desc->arg.data; in nvme_tcp_recv_skb()
801 size_t consumed = len; in nvme_tcp_recv_skb()
804 while (len) { in nvme_tcp_recv_skb()
807 result = nvme_tcp_recv_pdu(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
810 result = nvme_tcp_recv_data(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
813 result = nvme_tcp_recv_ddgst(queue, skb, &offset, &len); in nvme_tcp_recv_skb()
816 result = -EFAULT; in nvme_tcp_recv_skb()
819 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_recv_skb()
821 queue->rd_enabled = false; in nvme_tcp_recv_skb()
822 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_recv_skb()
834 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
835 queue = sk->sk_user_data; in nvme_tcp_data_ready()
836 if (likely(queue && queue->rd_enabled) && in nvme_tcp_data_ready()
837 !test_bit(NVME_TCP_Q_POLLING, &queue->flags)) in nvme_tcp_data_ready()
838 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_data_ready()
839 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_data_ready()
846 read_lock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
847 queue = sk->sk_user_data; in nvme_tcp_write_space()
849 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in nvme_tcp_write_space()
850 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_write_space()
852 read_unlock_bh(&sk->sk_callback_lock); in nvme_tcp_write_space()
859 read_lock(&sk->sk_callback_lock); in nvme_tcp_state_change()
860 queue = sk->sk_user_data; in nvme_tcp_state_change()
864 switch (sk->sk_state) { in nvme_tcp_state_change()
870 nvme_tcp_error_recovery(&queue->ctrl->ctrl); in nvme_tcp_state_change()
873 dev_info(queue->ctrl->ctrl.device, in nvme_tcp_state_change()
875 nvme_tcp_queue_id(queue), sk->sk_state); in nvme_tcp_state_change()
878 queue->state_change(sk); in nvme_tcp_state_change()
880 read_unlock(&sk->sk_callback_lock); in nvme_tcp_state_change()
885 return !list_empty(&queue->send_list) || in nvme_tcp_queue_more()
886 !llist_empty(&queue->req_list) || queue->more_requests; in nvme_tcp_queue_more()
891 queue->request = NULL; in nvme_tcp_done_send_req()
901 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data()
906 size_t len = nvme_tcp_req_cur_length(req); in nvme_tcp_try_send_data() local
907 bool last = nvme_tcp_pdu_last_send(req, len); in nvme_tcp_try_send_data()
910 if (last && !queue->data_digest && !nvme_tcp_queue_more(queue)) in nvme_tcp_try_send_data()
916 ret = kernel_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
919 ret = sock_no_sendpage(queue->sock, page, offset, len, in nvme_tcp_try_send_data()
926 if (queue->data_digest) in nvme_tcp_try_send_data()
927 nvme_tcp_ddgst_update(queue->snd_hash, page, in nvme_tcp_try_send_data()
931 if (last && ret == len) { in nvme_tcp_try_send_data()
932 if (queue->data_digest) { in nvme_tcp_try_send_data()
933 nvme_tcp_ddgst_final(queue->snd_hash, in nvme_tcp_try_send_data()
934 &req->ddgst); in nvme_tcp_try_send_data()
935 req->state = NVME_TCP_SEND_DDGST; in nvme_tcp_try_send_data()
936 req->offset = 0; in nvme_tcp_try_send_data()
943 return -EAGAIN; in nvme_tcp_try_send_data()
948 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_cmd_pdu()
949 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_try_send_cmd_pdu()
952 int len = sizeof(*pdu) + hdgst - req->offset; in nvme_tcp_try_send_cmd_pdu() local
961 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_cmd_pdu()
962 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_cmd_pdu()
964 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_cmd_pdu()
965 offset_in_page(pdu) + req->offset, len, flags); in nvme_tcp_try_send_cmd_pdu()
969 len -= ret; in nvme_tcp_try_send_cmd_pdu()
970 if (!len) { in nvme_tcp_try_send_cmd_pdu()
972 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_cmd_pdu()
973 if (queue->data_digest) in nvme_tcp_try_send_cmd_pdu()
974 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_cmd_pdu()
981 req->offset += ret; in nvme_tcp_try_send_cmd_pdu()
983 return -EAGAIN; in nvme_tcp_try_send_cmd_pdu()
988 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_data_pdu()
989 struct nvme_tcp_data_pdu *pdu = req->pdu; in nvme_tcp_try_send_data_pdu()
991 int len = sizeof(*pdu) - req->offset + hdgst; in nvme_tcp_try_send_data_pdu() local
994 if (queue->hdr_digest && !req->offset) in nvme_tcp_try_send_data_pdu()
995 nvme_tcp_hdgst(queue->snd_hash, pdu, sizeof(*pdu)); in nvme_tcp_try_send_data_pdu()
997 ret = kernel_sendpage(queue->sock, virt_to_page(pdu), in nvme_tcp_try_send_data_pdu()
998 offset_in_page(pdu) + req->offset, len, in nvme_tcp_try_send_data_pdu()
1003 len -= ret; in nvme_tcp_try_send_data_pdu()
1004 if (!len) { in nvme_tcp_try_send_data_pdu()
1005 req->state = NVME_TCP_SEND_DATA; in nvme_tcp_try_send_data_pdu()
1006 if (queue->data_digest) in nvme_tcp_try_send_data_pdu()
1007 crypto_ahash_init(queue->snd_hash); in nvme_tcp_try_send_data_pdu()
1008 if (!req->data_sent) in nvme_tcp_try_send_data_pdu()
1012 req->offset += ret; in nvme_tcp_try_send_data_pdu()
1014 return -EAGAIN; in nvme_tcp_try_send_data_pdu()
1019 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_try_send_ddgst()
1023 .iov_base = &req->ddgst + req->offset, in nvme_tcp_try_send_ddgst()
1024 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset in nvme_tcp_try_send_ddgst()
1032 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_try_send_ddgst()
1036 if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) { in nvme_tcp_try_send_ddgst()
1041 req->offset += ret; in nvme_tcp_try_send_ddgst()
1042 return -EAGAIN; in nvme_tcp_try_send_ddgst()
1050 if (!queue->request) { in nvme_tcp_try_send()
1051 queue->request = nvme_tcp_fetch_request(queue); in nvme_tcp_try_send()
1052 if (!queue->request) in nvme_tcp_try_send()
1055 req = queue->request; in nvme_tcp_try_send()
1057 if (req->state == NVME_TCP_SEND_CMD_PDU) { in nvme_tcp_try_send()
1065 if (req->state == NVME_TCP_SEND_H2C_PDU) { in nvme_tcp_try_send()
1071 if (req->state == NVME_TCP_SEND_DATA) { in nvme_tcp_try_send()
1077 if (req->state == NVME_TCP_SEND_DDGST) in nvme_tcp_try_send()
1080 if (ret == -EAGAIN) { in nvme_tcp_try_send()
1083 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_try_send()
1085 if (ret != -EPIPE && ret != -ECONNRESET) in nvme_tcp_try_send()
1086 nvme_tcp_fail_request(queue->request); in nvme_tcp_try_send()
1094 struct socket *sock = queue->sock; in nvme_tcp_try_recv()
1095 struct sock *sk = sock->sk; in nvme_tcp_try_recv()
1102 queue->nr_cqe = 0; in nvme_tcp_try_recv()
1103 consumed = sock->ops->read_sock(sk, &rd_desc, nvme_tcp_recv_skb); in nvme_tcp_try_recv()
1118 if (mutex_trylock(&queue->send_mutex)) { in nvme_tcp_io_work()
1120 mutex_unlock(&queue->send_mutex); in nvme_tcp_io_work()
1138 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_io_work()
1143 struct crypto_ahash *tfm = crypto_ahash_reqtfm(queue->rcv_hash); in nvme_tcp_free_crypto()
1145 ahash_request_free(queue->rcv_hash); in nvme_tcp_free_crypto()
1146 ahash_request_free(queue->snd_hash); in nvme_tcp_free_crypto()
1158 queue->snd_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1159 if (!queue->snd_hash) in nvme_tcp_alloc_crypto()
1161 ahash_request_set_callback(queue->snd_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1163 queue->rcv_hash = ahash_request_alloc(tfm, GFP_KERNEL); in nvme_tcp_alloc_crypto()
1164 if (!queue->rcv_hash) in nvme_tcp_alloc_crypto()
1166 ahash_request_set_callback(queue->rcv_hash, 0, NULL, NULL); in nvme_tcp_alloc_crypto()
1170 ahash_request_free(queue->snd_hash); in nvme_tcp_alloc_crypto()
1173 return -ENOMEM; in nvme_tcp_alloc_crypto()
1176 static void nvme_tcp_free_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_free_async_req() argument
1178 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_free_async_req()
1180 page_frag_free(async->pdu); in nvme_tcp_free_async_req()
1183 static int nvme_tcp_alloc_async_req(struct nvme_tcp_ctrl *ctrl) in nvme_tcp_alloc_async_req() argument
1185 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1186 struct nvme_tcp_request *async = &ctrl->async_req; in nvme_tcp_alloc_async_req()
1189 async->pdu = page_frag_alloc(&queue->pf_cache, in nvme_tcp_alloc_async_req()
1192 if (!async->pdu) in nvme_tcp_alloc_async_req()
1193 return -ENOMEM; in nvme_tcp_alloc_async_req()
1195 async->queue = &ctrl->queues[0]; in nvme_tcp_alloc_async_req()
1201 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_queue() local
1202 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_free_queue()
1204 if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) in nvme_tcp_free_queue()
1207 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_free_queue()
1210 sock_release(queue->sock); in nvme_tcp_free_queue()
1211 kfree(queue->pdu); in nvme_tcp_free_queue()
1225 return -ENOMEM; in nvme_tcp_init_connection()
1229 ret = -ENOMEM; in nvme_tcp_init_connection()
1233 icreq->hdr.type = nvme_tcp_icreq; in nvme_tcp_init_connection()
1234 icreq->hdr.hlen = sizeof(*icreq); in nvme_tcp_init_connection()
1235 icreq->hdr.pdo = 0; in nvme_tcp_init_connection()
1236 icreq->hdr.plen = cpu_to_le32(icreq->hdr.hlen); in nvme_tcp_init_connection()
1237 icreq->pfv = cpu_to_le16(NVME_TCP_PFV_1_0); in nvme_tcp_init_connection()
1238 icreq->maxr2t = 0; /* single inflight r2t supported */ in nvme_tcp_init_connection()
1239 icreq->hpda = 0; /* no alignment constraint */ in nvme_tcp_init_connection()
1240 if (queue->hdr_digest) in nvme_tcp_init_connection()
1241 icreq->digest |= NVME_TCP_HDR_DIGEST_ENABLE; in nvme_tcp_init_connection()
1242 if (queue->data_digest) in nvme_tcp_init_connection()
1243 icreq->digest |= NVME_TCP_DATA_DIGEST_ENABLE; in nvme_tcp_init_connection()
1247 ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len); in nvme_tcp_init_connection()
1254 ret = kernel_recvmsg(queue->sock, &msg, &iov, 1, in nvme_tcp_init_connection()
1259 ret = -EINVAL; in nvme_tcp_init_connection()
1260 if (icresp->hdr.type != nvme_tcp_icresp) { in nvme_tcp_init_connection()
1262 nvme_tcp_queue_id(queue), icresp->hdr.type); in nvme_tcp_init_connection()
1266 if (le32_to_cpu(icresp->hdr.plen) != sizeof(*icresp)) { in nvme_tcp_init_connection()
1268 nvme_tcp_queue_id(queue), icresp->hdr.plen); in nvme_tcp_init_connection()
1272 if (icresp->pfv != NVME_TCP_PFV_1_0) { in nvme_tcp_init_connection()
1274 nvme_tcp_queue_id(queue), icresp->pfv); in nvme_tcp_init_connection()
1278 ctrl_ddgst = !!(icresp->digest & NVME_TCP_DATA_DIGEST_ENABLE); in nvme_tcp_init_connection()
1279 if ((queue->data_digest && !ctrl_ddgst) || in nvme_tcp_init_connection()
1280 (!queue->data_digest && ctrl_ddgst)) { in nvme_tcp_init_connection()
1281 pr_err("queue %d: data digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1283 queue->data_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1288 ctrl_hdgst = !!(icresp->digest & NVME_TCP_HDR_DIGEST_ENABLE); in nvme_tcp_init_connection()
1289 if ((queue->hdr_digest && !ctrl_hdgst) || in nvme_tcp_init_connection()
1290 (!queue->hdr_digest && ctrl_hdgst)) { in nvme_tcp_init_connection()
1291 pr_err("queue %d: header digest mismatch host: %s ctrl: %s\n", in nvme_tcp_init_connection()
1293 queue->hdr_digest ? "enabled" : "disabled", in nvme_tcp_init_connection()
1298 if (icresp->cpda != 0) { in nvme_tcp_init_connection()
1300 nvme_tcp_queue_id(queue), icresp->cpda); in nvme_tcp_init_connection()
1319 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_default_queue() local
1323 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_default_queue()
1328 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_read_queue() local
1333 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_read_queue()
1334 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_read_queue()
1339 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_poll_queue() local
1345 qid < 1 + ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_poll_queue()
1346 ctrl->io_queues[HCTX_TYPE_READ] + in nvme_tcp_poll_queue()
1347 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_poll_queue()
1352 struct nvme_tcp_ctrl *ctrl = queue->ctrl; in nvme_tcp_set_queue_io_cpu() local
1357 n = qid - 1; in nvme_tcp_set_queue_io_cpu()
1359 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - 1; in nvme_tcp_set_queue_io_cpu()
1361 n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] - in nvme_tcp_set_queue_io_cpu()
1362 ctrl->io_queues[HCTX_TYPE_READ] - 1; in nvme_tcp_set_queue_io_cpu()
1363 queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false); in nvme_tcp_set_queue_io_cpu()
1369 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_queue() local
1370 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_alloc_queue()
1373 queue->ctrl = ctrl; in nvme_tcp_alloc_queue()
1374 init_llist_head(&queue->req_list); in nvme_tcp_alloc_queue()
1375 INIT_LIST_HEAD(&queue->send_list); in nvme_tcp_alloc_queue()
1376 mutex_init(&queue->send_mutex); in nvme_tcp_alloc_queue()
1377 INIT_WORK(&queue->io_work, nvme_tcp_io_work); in nvme_tcp_alloc_queue()
1378 queue->queue_size = queue_size; in nvme_tcp_alloc_queue()
1381 queue->cmnd_capsule_len = nctrl->ioccsz * 16; in nvme_tcp_alloc_queue()
1383 queue->cmnd_capsule_len = sizeof(struct nvme_command) + in nvme_tcp_alloc_queue()
1386 ret = sock_create(ctrl->addr.ss_family, SOCK_STREAM, in nvme_tcp_alloc_queue()
1387 IPPROTO_TCP, &queue->sock); in nvme_tcp_alloc_queue()
1389 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1395 tcp_sock_set_syncnt(queue->sock->sk, 1); in nvme_tcp_alloc_queue()
1398 tcp_sock_set_nodelay(queue->sock->sk); in nvme_tcp_alloc_queue()
1405 sock_no_linger(queue->sock->sk); in nvme_tcp_alloc_queue()
1408 sock_set_priority(queue->sock->sk, so_priority); in nvme_tcp_alloc_queue()
1411 if (nctrl->opts->tos >= 0) in nvme_tcp_alloc_queue()
1412 ip_sock_set_tos(queue->sock->sk, nctrl->opts->tos); in nvme_tcp_alloc_queue()
1415 queue->sock->sk->sk_rcvtimeo = 10 * HZ; in nvme_tcp_alloc_queue()
1417 queue->sock->sk->sk_allocation = GFP_ATOMIC; in nvme_tcp_alloc_queue()
1419 queue->request = NULL; in nvme_tcp_alloc_queue()
1420 queue->data_remaining = 0; in nvme_tcp_alloc_queue()
1421 queue->ddgst_remaining = 0; in nvme_tcp_alloc_queue()
1422 queue->pdu_remaining = 0; in nvme_tcp_alloc_queue()
1423 queue->pdu_offset = 0; in nvme_tcp_alloc_queue()
1424 sk_set_memalloc(queue->sock->sk); in nvme_tcp_alloc_queue()
1426 if (nctrl->opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_alloc_queue()
1427 ret = kernel_bind(queue->sock, (struct sockaddr *)&ctrl->src_addr, in nvme_tcp_alloc_queue()
1428 sizeof(ctrl->src_addr)); in nvme_tcp_alloc_queue()
1430 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1437 queue->hdr_digest = nctrl->opts->hdr_digest; in nvme_tcp_alloc_queue()
1438 queue->data_digest = nctrl->opts->data_digest; in nvme_tcp_alloc_queue()
1439 if (queue->hdr_digest || queue->data_digest) { in nvme_tcp_alloc_queue()
1442 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1450 queue->pdu = kmalloc(rcv_pdu_size, GFP_KERNEL); in nvme_tcp_alloc_queue()
1451 if (!queue->pdu) { in nvme_tcp_alloc_queue()
1452 ret = -ENOMEM; in nvme_tcp_alloc_queue()
1456 dev_dbg(nctrl->device, "connecting queue %d\n", in nvme_tcp_alloc_queue()
1459 ret = kernel_connect(queue->sock, (struct sockaddr *)&ctrl->addr, in nvme_tcp_alloc_queue()
1460 sizeof(ctrl->addr), 0); in nvme_tcp_alloc_queue()
1462 dev_err(nctrl->device, in nvme_tcp_alloc_queue()
1471 queue->rd_enabled = true; in nvme_tcp_alloc_queue()
1472 set_bit(NVME_TCP_Q_ALLOCATED, &queue->flags); in nvme_tcp_alloc_queue()
1475 write_lock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1476 queue->sock->sk->sk_user_data = queue; in nvme_tcp_alloc_queue()
1477 queue->state_change = queue->sock->sk->sk_state_change; in nvme_tcp_alloc_queue()
1478 queue->data_ready = queue->sock->sk->sk_data_ready; in nvme_tcp_alloc_queue()
1479 queue->write_space = queue->sock->sk->sk_write_space; in nvme_tcp_alloc_queue()
1480 queue->sock->sk->sk_data_ready = nvme_tcp_data_ready; in nvme_tcp_alloc_queue()
1481 queue->sock->sk->sk_state_change = nvme_tcp_state_change; in nvme_tcp_alloc_queue()
1482 queue->sock->sk->sk_write_space = nvme_tcp_write_space; in nvme_tcp_alloc_queue()
1484 queue->sock->sk->sk_ll_usec = 1; in nvme_tcp_alloc_queue()
1486 write_unlock_bh(&queue->sock->sk->sk_callback_lock); in nvme_tcp_alloc_queue()
1491 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in nvme_tcp_alloc_queue()
1493 kfree(queue->pdu); in nvme_tcp_alloc_queue()
1495 if (queue->hdr_digest || queue->data_digest) in nvme_tcp_alloc_queue()
1498 sock_release(queue->sock); in nvme_tcp_alloc_queue()
1499 queue->sock = NULL; in nvme_tcp_alloc_queue()
1505 struct socket *sock = queue->sock; in nvme_tcp_restore_sock_calls()
1507 write_lock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1508 sock->sk->sk_user_data = NULL; in nvme_tcp_restore_sock_calls()
1509 sock->sk->sk_data_ready = queue->data_ready; in nvme_tcp_restore_sock_calls()
1510 sock->sk->sk_state_change = queue->state_change; in nvme_tcp_restore_sock_calls()
1511 sock->sk->sk_write_space = queue->write_space; in nvme_tcp_restore_sock_calls()
1512 write_unlock_bh(&sock->sk->sk_callback_lock); in nvme_tcp_restore_sock_calls()
1517 kernel_sock_shutdown(queue->sock, SHUT_RDWR); in __nvme_tcp_stop_queue()
1519 cancel_work_sync(&queue->io_work); in __nvme_tcp_stop_queue()
1524 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_stop_queue() local
1525 struct nvme_tcp_queue *queue = &ctrl->queues[qid]; in nvme_tcp_stop_queue()
1527 if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_stop_queue()
1534 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_start_queue() local
1543 set_bit(NVME_TCP_Q_LIVE, &ctrl->queues[idx].flags); in nvme_tcp_start_queue()
1545 if (test_bit(NVME_TCP_Q_ALLOCATED, &ctrl->queues[idx].flags)) in nvme_tcp_start_queue()
1546 __nvme_tcp_stop_queue(&ctrl->queues[idx]); in nvme_tcp_start_queue()
1547 dev_err(nctrl->device, in nvme_tcp_start_queue()
1556 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_alloc_tagset() local
1561 set = &ctrl->admin_tag_set; in nvme_tcp_alloc_tagset()
1563 set->ops = &nvme_tcp_admin_mq_ops; in nvme_tcp_alloc_tagset()
1564 set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_tcp_alloc_tagset()
1565 set->reserved_tags = 2; /* connect + keep-alive */ in nvme_tcp_alloc_tagset()
1566 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1567 set->flags = BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1568 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1569 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1570 set->nr_hw_queues = 1; in nvme_tcp_alloc_tagset()
1571 set->timeout = ADMIN_TIMEOUT; in nvme_tcp_alloc_tagset()
1573 set = &ctrl->tag_set; in nvme_tcp_alloc_tagset()
1575 set->ops = &nvme_tcp_mq_ops; in nvme_tcp_alloc_tagset()
1576 set->queue_depth = nctrl->sqsize + 1; in nvme_tcp_alloc_tagset()
1577 set->reserved_tags = 1; /* fabric connect */ in nvme_tcp_alloc_tagset()
1578 set->numa_node = nctrl->numa_node; in nvme_tcp_alloc_tagset()
1579 set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; in nvme_tcp_alloc_tagset()
1580 set->cmd_size = sizeof(struct nvme_tcp_request); in nvme_tcp_alloc_tagset()
1581 set->driver_data = ctrl; in nvme_tcp_alloc_tagset()
1582 set->nr_hw_queues = nctrl->queue_count - 1; in nvme_tcp_alloc_tagset()
1583 set->timeout = NVME_IO_TIMEOUT; in nvme_tcp_alloc_tagset()
1584 set->nr_maps = nctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2; in nvme_tcp_alloc_tagset()
1594 static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_free_admin_queue() argument
1596 if (to_tcp_ctrl(ctrl)->async_req.pdu) { in nvme_tcp_free_admin_queue()
1597 cancel_work_sync(&ctrl->async_event_work); in nvme_tcp_free_admin_queue()
1598 nvme_tcp_free_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_free_admin_queue()
1599 to_tcp_ctrl(ctrl)->async_req.pdu = NULL; in nvme_tcp_free_admin_queue()
1602 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_free_admin_queue()
1605 static void nvme_tcp_free_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_free_io_queues() argument
1609 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_free_io_queues()
1610 nvme_tcp_free_queue(ctrl, i); in nvme_tcp_free_io_queues()
1613 static void nvme_tcp_stop_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_stop_io_queues() argument
1617 for (i = 1; i < ctrl->queue_count; i++) in nvme_tcp_stop_io_queues()
1618 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_stop_io_queues()
1621 static int nvme_tcp_start_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_start_io_queues() argument
1625 for (i = 1; i < ctrl->queue_count; i++) { in nvme_tcp_start_io_queues()
1626 ret = nvme_tcp_start_queue(ctrl, i); in nvme_tcp_start_io_queues()
1634 for (i--; i >= 1; i--) in nvme_tcp_start_io_queues()
1635 nvme_tcp_stop_queue(ctrl, i); in nvme_tcp_start_io_queues()
1639 static int nvme_tcp_alloc_admin_queue(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_admin_queue() argument
1643 ret = nvme_tcp_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_tcp_alloc_admin_queue()
1647 ret = nvme_tcp_alloc_async_req(to_tcp_ctrl(ctrl)); in nvme_tcp_alloc_admin_queue()
1654 nvme_tcp_free_queue(ctrl, 0); in nvme_tcp_alloc_admin_queue()
1658 static int __nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in __nvme_tcp_alloc_io_queues() argument
1662 for (i = 1; i < ctrl->queue_count; i++) { in __nvme_tcp_alloc_io_queues()
1663 ret = nvme_tcp_alloc_queue(ctrl, i, in __nvme_tcp_alloc_io_queues()
1664 ctrl->sqsize + 1); in __nvme_tcp_alloc_io_queues()
1672 for (i--; i >= 1; i--) in __nvme_tcp_alloc_io_queues()
1673 nvme_tcp_free_queue(ctrl, i); in __nvme_tcp_alloc_io_queues()
1678 static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_nr_io_queues() argument
1682 nr_io_queues = min(ctrl->opts->nr_io_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1683 nr_io_queues += min(ctrl->opts->nr_write_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1684 nr_io_queues += min(ctrl->opts->nr_poll_queues, num_online_cpus()); in nvme_tcp_nr_io_queues()
1692 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_set_io_queues() local
1693 struct nvmf_ctrl_options *opts = nctrl->opts; in nvme_tcp_set_io_queues()
1695 if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) { in nvme_tcp_set_io_queues()
1701 ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues; in nvme_tcp_set_io_queues()
1702 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_set_io_queues()
1703 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1704 min(opts->nr_write_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1705 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1712 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_tcp_set_io_queues()
1713 min(opts->nr_io_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1714 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_set_io_queues()
1717 if (opts->nr_poll_queues && nr_io_queues) { in nvme_tcp_set_io_queues()
1719 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_tcp_set_io_queues()
1720 min(opts->nr_poll_queues, nr_io_queues); in nvme_tcp_set_io_queues()
1724 static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl) in nvme_tcp_alloc_io_queues() argument
1729 nr_io_queues = nvme_tcp_nr_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1730 ret = nvme_set_queue_count(ctrl, &nr_io_queues); in nvme_tcp_alloc_io_queues()
1734 ctrl->queue_count = nr_io_queues + 1; in nvme_tcp_alloc_io_queues()
1735 if (ctrl->queue_count < 2) in nvme_tcp_alloc_io_queues()
1738 dev_info(ctrl->device, in nvme_tcp_alloc_io_queues()
1741 nvme_tcp_set_io_queues(ctrl, nr_io_queues); in nvme_tcp_alloc_io_queues()
1743 return __nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_alloc_io_queues()
1746 static void nvme_tcp_destroy_io_queues(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_io_queues() argument
1748 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1750 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_destroy_io_queues()
1751 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_destroy_io_queues()
1753 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_destroy_io_queues()
1756 static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_io_queues() argument
1760 ret = nvme_tcp_alloc_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1765 ctrl->tagset = nvme_tcp_alloc_tagset(ctrl, false); in nvme_tcp_configure_io_queues()
1766 if (IS_ERR(ctrl->tagset)) { in nvme_tcp_configure_io_queues()
1767 ret = PTR_ERR(ctrl->tagset); in nvme_tcp_configure_io_queues()
1771 ctrl->connect_q = blk_mq_init_queue(ctrl->tagset); in nvme_tcp_configure_io_queues()
1772 if (IS_ERR(ctrl->connect_q)) { in nvme_tcp_configure_io_queues()
1773 ret = PTR_ERR(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1778 ret = nvme_tcp_start_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1783 nvme_start_queues(ctrl); in nvme_tcp_configure_io_queues()
1784 if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { in nvme_tcp_configure_io_queues()
1790 ret = -ENODEV; in nvme_tcp_configure_io_queues()
1793 blk_mq_update_nr_hw_queues(ctrl->tagset, in nvme_tcp_configure_io_queues()
1794 ctrl->queue_count - 1); in nvme_tcp_configure_io_queues()
1795 nvme_unfreeze(ctrl); in nvme_tcp_configure_io_queues()
1801 nvme_stop_queues(ctrl); in nvme_tcp_configure_io_queues()
1802 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1805 blk_cleanup_queue(ctrl->connect_q); in nvme_tcp_configure_io_queues()
1808 blk_mq_free_tag_set(ctrl->tagset); in nvme_tcp_configure_io_queues()
1810 nvme_tcp_free_io_queues(ctrl); in nvme_tcp_configure_io_queues()
1814 static void nvme_tcp_destroy_admin_queue(struct nvme_ctrl *ctrl, bool remove) in nvme_tcp_destroy_admin_queue() argument
1816 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_destroy_admin_queue()
1818 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_destroy_admin_queue()
1819 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_destroy_admin_queue()
1820 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_destroy_admin_queue()
1822 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_destroy_admin_queue()
1825 static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_configure_admin_queue() argument
1829 error = nvme_tcp_alloc_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1834 ctrl->admin_tagset = nvme_tcp_alloc_tagset(ctrl, true); in nvme_tcp_configure_admin_queue()
1835 if (IS_ERR(ctrl->admin_tagset)) { in nvme_tcp_configure_admin_queue()
1836 error = PTR_ERR(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1840 ctrl->fabrics_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1841 if (IS_ERR(ctrl->fabrics_q)) { in nvme_tcp_configure_admin_queue()
1842 error = PTR_ERR(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1846 ctrl->admin_q = blk_mq_init_queue(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1847 if (IS_ERR(ctrl->admin_q)) { in nvme_tcp_configure_admin_queue()
1848 error = PTR_ERR(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1853 error = nvme_tcp_start_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1857 error = nvme_enable_ctrl(ctrl); in nvme_tcp_configure_admin_queue()
1861 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1863 error = nvme_init_identify(ctrl); in nvme_tcp_configure_admin_queue()
1870 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_configure_admin_queue()
1873 blk_cleanup_queue(ctrl->admin_q); in nvme_tcp_configure_admin_queue()
1876 blk_cleanup_queue(ctrl->fabrics_q); in nvme_tcp_configure_admin_queue()
1879 blk_mq_free_tag_set(ctrl->admin_tagset); in nvme_tcp_configure_admin_queue()
1881 nvme_tcp_free_admin_queue(ctrl); in nvme_tcp_configure_admin_queue()
1885 static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_admin_queue() argument
1888 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1889 blk_sync_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1890 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_teardown_admin_queue()
1891 if (ctrl->admin_tagset) { in nvme_tcp_teardown_admin_queue()
1892 blk_mq_tagset_busy_iter(ctrl->admin_tagset, in nvme_tcp_teardown_admin_queue()
1893 nvme_cancel_request, ctrl); in nvme_tcp_teardown_admin_queue()
1894 blk_mq_tagset_wait_completed_request(ctrl->admin_tagset); in nvme_tcp_teardown_admin_queue()
1897 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_admin_queue()
1898 nvme_tcp_destroy_admin_queue(ctrl, remove); in nvme_tcp_teardown_admin_queue()
1901 static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, in nvme_tcp_teardown_io_queues() argument
1904 if (ctrl->queue_count <= 1) in nvme_tcp_teardown_io_queues()
1906 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_io_queues()
1907 nvme_start_freeze(ctrl); in nvme_tcp_teardown_io_queues()
1908 nvme_stop_queues(ctrl); in nvme_tcp_teardown_io_queues()
1909 nvme_sync_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1910 nvme_tcp_stop_io_queues(ctrl); in nvme_tcp_teardown_io_queues()
1911 if (ctrl->tagset) { in nvme_tcp_teardown_io_queues()
1912 blk_mq_tagset_busy_iter(ctrl->tagset, in nvme_tcp_teardown_io_queues()
1913 nvme_cancel_request, ctrl); in nvme_tcp_teardown_io_queues()
1914 blk_mq_tagset_wait_completed_request(ctrl->tagset); in nvme_tcp_teardown_io_queues()
1917 nvme_start_queues(ctrl); in nvme_tcp_teardown_io_queues()
1918 nvme_tcp_destroy_io_queues(ctrl, remove); in nvme_tcp_teardown_io_queues()
1921 static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) in nvme_tcp_reconnect_or_remove() argument
1924 if (ctrl->state != NVME_CTRL_CONNECTING) { in nvme_tcp_reconnect_or_remove()
1925 WARN_ON_ONCE(ctrl->state == NVME_CTRL_NEW || in nvme_tcp_reconnect_or_remove()
1926 ctrl->state == NVME_CTRL_LIVE); in nvme_tcp_reconnect_or_remove()
1930 if (nvmf_should_reconnect(ctrl)) { in nvme_tcp_reconnect_or_remove()
1931 dev_info(ctrl->device, "Reconnecting in %d seconds...\n", in nvme_tcp_reconnect_or_remove()
1932 ctrl->opts->reconnect_delay); in nvme_tcp_reconnect_or_remove()
1933 queue_delayed_work(nvme_wq, &to_tcp_ctrl(ctrl)->connect_work, in nvme_tcp_reconnect_or_remove()
1934 ctrl->opts->reconnect_delay * HZ); in nvme_tcp_reconnect_or_remove()
1936 dev_info(ctrl->device, "Removing controller...\n"); in nvme_tcp_reconnect_or_remove()
1937 nvme_delete_ctrl(ctrl); in nvme_tcp_reconnect_or_remove()
1941 static int nvme_tcp_setup_ctrl(struct nvme_ctrl *ctrl, bool new) in nvme_tcp_setup_ctrl() argument
1943 struct nvmf_ctrl_options *opts = ctrl->opts; in nvme_tcp_setup_ctrl()
1946 ret = nvme_tcp_configure_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
1950 if (ctrl->icdoff) { in nvme_tcp_setup_ctrl()
1951 dev_err(ctrl->device, "icdoff is not supported!\n"); in nvme_tcp_setup_ctrl()
1955 if (opts->queue_size > ctrl->sqsize + 1) in nvme_tcp_setup_ctrl()
1956 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
1957 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_tcp_setup_ctrl()
1958 opts->queue_size, ctrl->sqsize + 1); in nvme_tcp_setup_ctrl()
1960 if (ctrl->sqsize + 1 > ctrl->maxcmd) { in nvme_tcp_setup_ctrl()
1961 dev_warn(ctrl->device, in nvme_tcp_setup_ctrl()
1962 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_tcp_setup_ctrl()
1963 ctrl->sqsize + 1, ctrl->maxcmd); in nvme_tcp_setup_ctrl()
1964 ctrl->sqsize = ctrl->maxcmd - 1; in nvme_tcp_setup_ctrl()
1967 if (ctrl->queue_count > 1) { in nvme_tcp_setup_ctrl()
1968 ret = nvme_tcp_configure_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
1973 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE)) { in nvme_tcp_setup_ctrl()
1975 * state change failure is ok if we started ctrl delete, in nvme_tcp_setup_ctrl()
1979 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_setup_ctrl()
1980 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_setup_ctrl()
1982 ret = -EINVAL; in nvme_tcp_setup_ctrl()
1986 nvme_start_ctrl(ctrl); in nvme_tcp_setup_ctrl()
1990 if (ctrl->queue_count > 1) in nvme_tcp_setup_ctrl()
1991 nvme_tcp_destroy_io_queues(ctrl, new); in nvme_tcp_setup_ctrl()
1993 nvme_tcp_stop_queue(ctrl, 0); in nvme_tcp_setup_ctrl()
1994 nvme_tcp_destroy_admin_queue(ctrl, new); in nvme_tcp_setup_ctrl()
2002 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_reconnect_ctrl_work() local
2004 ++ctrl->nr_reconnects; in nvme_tcp_reconnect_ctrl_work()
2006 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_tcp_reconnect_ctrl_work()
2009 dev_info(ctrl->device, "Successfully reconnected (%d attempt)\n", in nvme_tcp_reconnect_ctrl_work()
2010 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2012 ctrl->nr_reconnects = 0; in nvme_tcp_reconnect_ctrl_work()
2017 dev_info(ctrl->device, "Failed reconnect attempt %d\n", in nvme_tcp_reconnect_ctrl_work()
2018 ctrl->nr_reconnects); in nvme_tcp_reconnect_ctrl_work()
2019 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_reconnect_ctrl_work()
2026 struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; in nvme_tcp_error_recovery_work() local
2028 nvme_stop_keep_alive(ctrl); in nvme_tcp_error_recovery_work()
2029 nvme_tcp_teardown_io_queues(ctrl, false); in nvme_tcp_error_recovery_work()
2031 nvme_start_queues(ctrl); in nvme_tcp_error_recovery_work()
2032 nvme_tcp_teardown_admin_queue(ctrl, false); in nvme_tcp_error_recovery_work()
2033 blk_mq_unquiesce_queue(ctrl->admin_q); in nvme_tcp_error_recovery_work()
2035 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_error_recovery_work()
2036 /* state change failure is ok if we started ctrl delete */ in nvme_tcp_error_recovery_work()
2037 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_tcp_error_recovery_work()
2038 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_tcp_error_recovery_work()
2042 nvme_tcp_reconnect_or_remove(ctrl); in nvme_tcp_error_recovery_work()
2045 static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) in nvme_tcp_teardown_ctrl() argument
2047 cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); in nvme_tcp_teardown_ctrl()
2048 cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); in nvme_tcp_teardown_ctrl()
2050 nvme_tcp_teardown_io_queues(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2051 blk_mq_quiesce_queue(ctrl->admin_q); in nvme_tcp_teardown_ctrl()
2053 nvme_shutdown_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2055 nvme_disable_ctrl(ctrl); in nvme_tcp_teardown_ctrl()
2056 nvme_tcp_teardown_admin_queue(ctrl, shutdown); in nvme_tcp_teardown_ctrl()
2059 static void nvme_tcp_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_tcp_delete_ctrl() argument
2061 nvme_tcp_teardown_ctrl(ctrl, true); in nvme_tcp_delete_ctrl()
2066 struct nvme_ctrl *ctrl = in nvme_reset_ctrl_work() local
2069 nvme_stop_ctrl(ctrl); in nvme_reset_ctrl_work()
2070 nvme_tcp_teardown_ctrl(ctrl, false); in nvme_reset_ctrl_work()
2072 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) { in nvme_reset_ctrl_work()
2073 /* state change failure is ok if we started ctrl delete */ in nvme_reset_ctrl_work()
2074 WARN_ON_ONCE(ctrl->state != NVME_CTRL_DELETING && in nvme_reset_ctrl_work()
2075 ctrl->state != NVME_CTRL_DELETING_NOIO); in nvme_reset_ctrl_work()
2079 if (nvme_tcp_setup_ctrl(ctrl, false)) in nvme_reset_ctrl_work()
2085 ++ctrl->nr_reconnects; in nvme_reset_ctrl_work()
2086 nvme_tcp_reconnect_or_remove(ctrl); in nvme_reset_ctrl_work()
2091 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); in nvme_tcp_free_ctrl() local
2093 if (list_empty(&ctrl->list)) in nvme_tcp_free_ctrl()
2097 list_del(&ctrl->list); in nvme_tcp_free_ctrl()
2100 nvmf_free_options(nctrl->opts); in nvme_tcp_free_ctrl()
2102 kfree(ctrl->queues); in nvme_tcp_free_ctrl()
2103 kfree(ctrl); in nvme_tcp_free_ctrl()
2108 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_null()
2110 sg->addr = 0; in nvme_tcp_set_sg_null()
2111 sg->length = 0; in nvme_tcp_set_sg_null()
2112 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_null()
2119 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_inline()
2121 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_tcp_set_sg_inline()
2122 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_inline()
2123 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_tcp_set_sg_inline()
2129 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_tcp_set_sg_host_data()
2131 sg->addr = 0; in nvme_tcp_set_sg_host_data()
2132 sg->length = cpu_to_le32(data_len); in nvme_tcp_set_sg_host_data()
2133 sg->type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_tcp_set_sg_host_data()
2139 struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(arg); in nvme_tcp_submit_async_event() local
2140 struct nvme_tcp_queue *queue = &ctrl->queues[0]; in nvme_tcp_submit_async_event()
2141 struct nvme_tcp_cmd_pdu *pdu = ctrl->async_req.pdu; in nvme_tcp_submit_async_event()
2142 struct nvme_command *cmd = &pdu->cmd; in nvme_tcp_submit_async_event()
2146 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_submit_async_event()
2147 if (queue->hdr_digest) in nvme_tcp_submit_async_event()
2148 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_submit_async_event()
2149 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_submit_async_event()
2150 pdu->hdr.plen = cpu_to_le32(pdu->hdr.hlen + hdgst); in nvme_tcp_submit_async_event()
2152 cmd->common.opcode = nvme_admin_async_event; in nvme_tcp_submit_async_event()
2153 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_tcp_submit_async_event()
2154 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_submit_async_event()
2157 ctrl->async_req.state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_submit_async_event()
2158 ctrl->async_req.offset = 0; in nvme_tcp_submit_async_event()
2159 ctrl->async_req.curr_bio = NULL; in nvme_tcp_submit_async_event()
2160 ctrl->async_req.data_len = 0; in nvme_tcp_submit_async_event()
2162 nvme_tcp_queue_request(&ctrl->async_req, true, true); in nvme_tcp_submit_async_event()
2168 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_complete_timed_out() local
2170 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); in nvme_tcp_complete_timed_out()
2172 nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; in nvme_tcp_complete_timed_out()
2181 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; in nvme_tcp_timeout() local
2182 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_timeout()
2184 dev_warn(ctrl->device, in nvme_tcp_timeout()
2186 nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type); in nvme_tcp_timeout()
2188 if (ctrl->state != NVME_CTRL_LIVE) { in nvme_tcp_timeout()
2193 * - ctrl disable/shutdown fabrics requests in nvme_tcp_timeout()
2194 * - connect requests in nvme_tcp_timeout()
2195 * - initialization admin requests in nvme_tcp_timeout()
2196 * - I/O requests that entered after unquiescing and in nvme_tcp_timeout()
2210 nvme_tcp_error_recovery(ctrl); in nvme_tcp_timeout()
2218 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_map_data()
2219 struct nvme_command *c = &pdu->cmd; in nvme_tcp_map_data()
2221 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_tcp_map_data()
2226 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_map_data()
2227 nvme_tcp_set_sg_inline(queue, c, req->data_len); in nvme_tcp_map_data()
2229 nvme_tcp_set_sg_host_data(c, req->data_len); in nvme_tcp_map_data()
2238 struct nvme_tcp_cmd_pdu *pdu = req->pdu; in nvme_tcp_setup_cmd_pdu()
2239 struct nvme_tcp_queue *queue = req->queue; in nvme_tcp_setup_cmd_pdu()
2243 ret = nvme_setup_cmd(ns, rq, &pdu->cmd); in nvme_tcp_setup_cmd_pdu()
2247 req->state = NVME_TCP_SEND_CMD_PDU; in nvme_tcp_setup_cmd_pdu()
2248 req->offset = 0; in nvme_tcp_setup_cmd_pdu()
2249 req->data_sent = 0; in nvme_tcp_setup_cmd_pdu()
2250 req->pdu_len = 0; in nvme_tcp_setup_cmd_pdu()
2251 req->pdu_sent = 0; in nvme_tcp_setup_cmd_pdu()
2252 req->data_len = blk_rq_nr_phys_segments(rq) ? in nvme_tcp_setup_cmd_pdu()
2254 req->curr_bio = rq->bio; in nvme_tcp_setup_cmd_pdu()
2257 req->data_len <= nvme_tcp_inline_data_size(queue)) in nvme_tcp_setup_cmd_pdu()
2258 req->pdu_len = req->data_len; in nvme_tcp_setup_cmd_pdu()
2259 else if (req->curr_bio) in nvme_tcp_setup_cmd_pdu()
2262 pdu->hdr.type = nvme_tcp_cmd; in nvme_tcp_setup_cmd_pdu()
2263 pdu->hdr.flags = 0; in nvme_tcp_setup_cmd_pdu()
2264 if (queue->hdr_digest) in nvme_tcp_setup_cmd_pdu()
2265 pdu->hdr.flags |= NVME_TCP_F_HDGST; in nvme_tcp_setup_cmd_pdu()
2266 if (queue->data_digest && req->pdu_len) { in nvme_tcp_setup_cmd_pdu()
2267 pdu->hdr.flags |= NVME_TCP_F_DDGST; in nvme_tcp_setup_cmd_pdu()
2270 pdu->hdr.hlen = sizeof(*pdu); in nvme_tcp_setup_cmd_pdu()
2271 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0; in nvme_tcp_setup_cmd_pdu()
2272 pdu->hdr.plen = in nvme_tcp_setup_cmd_pdu()
2273 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst); in nvme_tcp_setup_cmd_pdu()
2278 dev_err(queue->ctrl->ctrl.device, in nvme_tcp_setup_cmd_pdu()
2288 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_commit_rqs()
2290 if (!llist_empty(&queue->req_list)) in nvme_tcp_commit_rqs()
2291 queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work); in nvme_tcp_commit_rqs()
2297 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_tcp_queue_rq()
2298 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_queue_rq()
2299 struct request *rq = bd->rq; in nvme_tcp_queue_rq()
2301 bool queue_ready = test_bit(NVME_TCP_Q_LIVE, &queue->flags); in nvme_tcp_queue_rq()
2304 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_tcp_queue_rq()
2305 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_tcp_queue_rq()
2313 nvme_tcp_queue_request(req, true, bd->last); in nvme_tcp_queue_rq()
2320 struct nvme_tcp_ctrl *ctrl = set->driver_data; in nvme_tcp_map_queues() local
2321 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_tcp_map_queues()
2323 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_tcp_map_queues()
2325 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2326 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2327 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2328 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2329 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2330 set->map[HCTX_TYPE_READ].queue_offset = in nvme_tcp_map_queues()
2331 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2334 set->map[HCTX_TYPE_DEFAULT].nr_queues = in nvme_tcp_map_queues()
2335 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2336 set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; in nvme_tcp_map_queues()
2337 set->map[HCTX_TYPE_READ].nr_queues = in nvme_tcp_map_queues()
2338 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_tcp_map_queues()
2339 set->map[HCTX_TYPE_READ].queue_offset = 0; in nvme_tcp_map_queues()
2341 blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); in nvme_tcp_map_queues()
2342 blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); in nvme_tcp_map_queues()
2344 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_tcp_map_queues()
2346 set->map[HCTX_TYPE_POLL].nr_queues = in nvme_tcp_map_queues()
2347 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_tcp_map_queues()
2348 set->map[HCTX_TYPE_POLL].queue_offset = in nvme_tcp_map_queues()
2349 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_tcp_map_queues()
2350 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_tcp_map_queues()
2351 blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2354 dev_info(ctrl->ctrl.device, in nvme_tcp_map_queues()
2356 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_tcp_map_queues()
2357 ctrl->io_queues[HCTX_TYPE_READ], in nvme_tcp_map_queues()
2358 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_tcp_map_queues()
2365 struct nvme_tcp_queue *queue = hctx->driver_data; in nvme_tcp_poll()
2366 struct sock *sk = queue->sock->sk; in nvme_tcp_poll()
2368 if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags)) in nvme_tcp_poll()
2371 set_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2372 if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue)) in nvme_tcp_poll()
2375 clear_bit(NVME_TCP_Q_POLLING, &queue->flags); in nvme_tcp_poll()
2376 return queue->nr_cqe; in nvme_tcp_poll()
2416 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_existing_controller() local
2420 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) { in nvme_tcp_existing_controller()
2421 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_tcp_existing_controller()
2433 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_create_ctrl() local
2436 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_tcp_create_ctrl()
2437 if (!ctrl) in nvme_tcp_create_ctrl()
2438 return ERR_PTR(-ENOMEM); in nvme_tcp_create_ctrl()
2440 INIT_LIST_HEAD(&ctrl->list); in nvme_tcp_create_ctrl()
2441 ctrl->ctrl.opts = opts; in nvme_tcp_create_ctrl()
2442 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_tcp_create_ctrl()
2443 opts->nr_poll_queues + 1; in nvme_tcp_create_ctrl()
2444 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_tcp_create_ctrl()
2445 ctrl->ctrl.kato = opts->kato; in nvme_tcp_create_ctrl()
2447 INIT_DELAYED_WORK(&ctrl->connect_work, in nvme_tcp_create_ctrl()
2449 INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); in nvme_tcp_create_ctrl()
2450 INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); in nvme_tcp_create_ctrl()
2452 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_tcp_create_ctrl()
2453 opts->trsvcid = in nvme_tcp_create_ctrl()
2455 if (!opts->trsvcid) { in nvme_tcp_create_ctrl()
2456 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2459 opts->mask |= NVMF_OPT_TRSVCID; in nvme_tcp_create_ctrl()
2463 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_tcp_create_ctrl()
2466 opts->traddr, opts->trsvcid); in nvme_tcp_create_ctrl()
2470 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_tcp_create_ctrl()
2472 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_tcp_create_ctrl()
2475 opts->host_traddr); in nvme_tcp_create_ctrl()
2480 if (!opts->duplicate_connect && nvme_tcp_existing_controller(opts)) { in nvme_tcp_create_ctrl()
2481 ret = -EALREADY; in nvme_tcp_create_ctrl()
2485 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_tcp_create_ctrl()
2487 if (!ctrl->queues) { in nvme_tcp_create_ctrl()
2488 ret = -ENOMEM; in nvme_tcp_create_ctrl()
2492 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_tcp_ctrl_ops, 0); in nvme_tcp_create_ctrl()
2496 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_tcp_create_ctrl()
2498 ret = -EINTR; in nvme_tcp_create_ctrl()
2502 ret = nvme_tcp_setup_ctrl(&ctrl->ctrl, true); in nvme_tcp_create_ctrl()
2506 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n", in nvme_tcp_create_ctrl()
2507 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_tcp_create_ctrl()
2510 list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list); in nvme_tcp_create_ctrl()
2513 return &ctrl->ctrl; in nvme_tcp_create_ctrl()
2516 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2517 nvme_put_ctrl(&ctrl->ctrl); in nvme_tcp_create_ctrl()
2519 ret = -EIO; in nvme_tcp_create_ctrl()
2522 kfree(ctrl->queues); in nvme_tcp_create_ctrl()
2524 kfree(ctrl); in nvme_tcp_create_ctrl()
2545 return -ENOMEM; in nvme_tcp_init_module()
2553 struct nvme_tcp_ctrl *ctrl; in nvme_tcp_cleanup_module() local
2558 list_for_each_entry(ctrl, &nvme_tcp_ctrl_list, list) in nvme_tcp_cleanup_module()
2559 nvme_delete_ctrl(&ctrl->ctrl); in nvme_tcp_cleanup_module()