Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
142 …le completion vectors. The default value is the minimum of four times the number of online CPU soc…
169 int tmo = *(int *)kp->arg; in srp_tmo_get()
185 if (kp->arg == &srp_reconnect_delay) in srp_tmo_set()
188 else if (kp->arg == &srp_fast_io_fail_tmo) in srp_tmo_set()
195 *(int *)kp->arg = tmo; in srp_tmo_set()
208 return (struct srp_target_port *) host->hostdata; in host_to_target()
213 return host_to_target(host)->target_name; in srp_target_info()
222 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || in srp_target_is_topspin()
223 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); in srp_target_is_topspin()
236 iu->buf = kzalloc(size, gfp_mask); in srp_alloc_iu()
237 if (!iu->buf) in srp_alloc_iu()
240 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, in srp_alloc_iu()
242 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) in srp_alloc_iu()
245 iu->size = size; in srp_alloc_iu()
246 iu->direction = direction; in srp_alloc_iu()
251 kfree(iu->buf); in srp_alloc_iu()
263 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, in srp_free_iu()
264 iu->direction); in srp_free_iu()
265 kfree(iu->buf); in srp_free_iu()
272 ib_event_msg(event->event), event->event); in srp_qp_event()
283 return -ENOMEM; in srp_init_ib_qp()
285 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, in srp_init_ib_qp()
286 target->srp_host->port, in srp_init_ib_qp()
287 be16_to_cpu(target->ib_cm.pkey), in srp_init_ib_qp()
288 &attr->pkey_index); in srp_init_ib_qp()
292 attr->qp_state = IB_QPS_INIT; in srp_init_ib_qp()
293 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | in srp_init_ib_qp()
295 attr->port_num = target->srp_host->port; in srp_init_ib_qp()
310 struct srp_target_port *target = ch->target; in srp_new_ib_cm_id()
313 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
318 if (ch->ib_cm.cm_id) in srp_new_ib_cm_id()
319 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_new_ib_cm_id()
320 ch->ib_cm.cm_id = new_cm_id; in srp_new_ib_cm_id()
321 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
322 target->srp_host->port)) in srp_new_ib_cm_id()
323 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; in srp_new_ib_cm_id()
325 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; in srp_new_ib_cm_id()
326 ch->ib_cm.path.sgid = target->sgid; in srp_new_ib_cm_id()
327 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; in srp_new_ib_cm_id()
328 ch->ib_cm.path.pkey = target->ib_cm.pkey; in srp_new_ib_cm_id()
329 ch->ib_cm.path.service_id = target->ib_cm.service_id; in srp_new_ib_cm_id()
336 struct srp_target_port *target = ch->target; in srp_new_rdma_cm_id()
340 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, in srp_new_rdma_cm_id()
348 init_completion(&ch->done); in srp_new_rdma_cm_id()
349 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? in srp_new_rdma_cm_id()
350 &target->rdma_cm.src.sa : NULL, in srp_new_rdma_cm_id()
351 &target->rdma_cm.dst.sa, in srp_new_rdma_cm_id()
355 &target->rdma_cm.src, &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
358 ret = wait_for_completion_interruptible(&ch->done); in srp_new_rdma_cm_id()
362 ret = ch->status; in srp_new_rdma_cm_id()
365 &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
369 swap(ch->rdma_cm.cm_id, new_cm_id); in srp_new_rdma_cm_id()
380 struct srp_target_port *target = ch->target; in srp_new_cm_id()
382 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : in srp_new_cm_id()
387 * srp_destroy_fr_pool() - free the resources owned by a pool
398 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_destroy_fr_pool()
399 if (d->mr) in srp_destroy_fr_pool()
400 ib_dereg_mr(d->mr); in srp_destroy_fr_pool()
406 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
419 int i, ret = -EINVAL; in srp_create_fr_pool()
424 ret = -ENOMEM; in srp_create_fr_pool()
428 pool->size = pool_size; in srp_create_fr_pool()
429 pool->max_page_list_len = max_page_list_len; in srp_create_fr_pool()
430 spin_lock_init(&pool->lock); in srp_create_fr_pool()
431 INIT_LIST_HEAD(&pool->free_list); in srp_create_fr_pool()
433 if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) in srp_create_fr_pool()
438 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_create_fr_pool()
442 if (ret == -ENOMEM) in srp_create_fr_pool()
444 dev_name(&device->dev)); in srp_create_fr_pool()
447 d->mr = mr; in srp_create_fr_pool()
448 list_add_tail(&d->entry, &pool->free_list); in srp_create_fr_pool()
463 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
471 spin_lock_irqsave(&pool->lock, flags); in srp_fr_pool_get()
472 if (!list_empty(&pool->free_list)) { in srp_fr_pool_get()
473 d = list_first_entry(&pool->free_list, typeof(*d), entry); in srp_fr_pool_get()
474 list_del(&d->entry); in srp_fr_pool_get()
476 spin_unlock_irqrestore(&pool->lock, flags); in srp_fr_pool_get()
482 * srp_fr_pool_put() - put an FR descriptor back in the free list
488 * desc->mr->rkey before calling this function.
496 spin_lock_irqsave(&pool->lock, flags); in srp_fr_pool_put()
498 list_add(&desc[i]->entry, &pool->free_list); in srp_fr_pool_put()
499 spin_unlock_irqrestore(&pool->lock, flags); in srp_fr_pool_put()
504 struct srp_device *dev = target->srp_host->srp_dev; in srp_alloc_fr_pool()
506 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, in srp_alloc_fr_pool()
507 dev->max_pages_per_mr); in srp_alloc_fr_pool()
511 * srp_destroy_qp() - destroy an RDMA queue pair
520 spin_lock_irq(&ch->lock); in srp_destroy_qp()
521 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
522 spin_unlock_irq(&ch->lock); in srp_destroy_qp()
524 ib_drain_qp(ch->qp); in srp_destroy_qp()
525 ib_destroy_qp(ch->qp); in srp_destroy_qp()
530 struct srp_target_port *target = ch->target; in srp_create_ch_ib()
531 struct srp_device *dev = target->srp_host->srp_dev; in srp_create_ch_ib()
532 const struct ib_device_attr *attr = &dev->dev->attrs; in srp_create_ch_ib()
537 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; in srp_create_ch_ib()
542 return -ENOMEM; in srp_create_ch_ib()
545 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, in srp_create_ch_ib()
546 ch->comp_vector, IB_POLL_SOFTIRQ); in srp_create_ch_ib()
552 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
553 ch->comp_vector, IB_POLL_DIRECT); in srp_create_ch_ib()
559 init_attr->event_handler = srp_qp_event; in srp_create_ch_ib()
560 init_attr->cap.max_send_wr = m * target->queue_size; in srp_create_ch_ib()
561 init_attr->cap.max_recv_wr = target->queue_size + 1; in srp_create_ch_ib()
562 init_attr->cap.max_recv_sge = 1; in srp_create_ch_ib()
563 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge); in srp_create_ch_ib()
564 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; in srp_create_ch_ib()
565 init_attr->qp_type = IB_QPT_RC; in srp_create_ch_ib()
566 init_attr->send_cq = send_cq; in srp_create_ch_ib()
567 init_attr->recv_cq = recv_cq; in srp_create_ch_ib()
569 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); in srp_create_ch_ib()
571 if (target->using_rdma_cm) { in srp_create_ch_ib()
572 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); in srp_create_ch_ib()
573 qp = ch->rdma_cm.cm_id->qp; in srp_create_ch_ib()
575 qp = ib_create_qp(dev->pd, init_attr); in srp_create_ch_ib()
586 dev_name(&dev->dev->dev), ret); in srp_create_ch_ib()
590 if (dev->use_fast_reg) { in srp_create_ch_ib()
594 shost_printk(KERN_WARNING, target->scsi_host, PFX in srp_create_ch_ib()
600 if (ch->qp) in srp_create_ch_ib()
602 if (ch->recv_cq) in srp_create_ch_ib()
603 ib_free_cq(ch->recv_cq); in srp_create_ch_ib()
604 if (ch->send_cq) in srp_create_ch_ib()
605 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
607 ch->qp = qp; in srp_create_ch_ib()
608 ch->recv_cq = recv_cq; in srp_create_ch_ib()
609 ch->send_cq = send_cq; in srp_create_ch_ib()
611 if (dev->use_fast_reg) { in srp_create_ch_ib()
612 if (ch->fr_pool) in srp_create_ch_ib()
613 srp_destroy_fr_pool(ch->fr_pool); in srp_create_ch_ib()
614 ch->fr_pool = fr_pool; in srp_create_ch_ib()
621 if (target->using_rdma_cm) in srp_create_ch_ib()
622 rdma_destroy_qp(ch->rdma_cm.cm_id); in srp_create_ch_ib()
639 * invoked. Hence the ch->[rt]x_ring checks.
644 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_ch_ib()
647 if (!ch->target) in srp_free_ch_ib()
650 if (target->using_rdma_cm) { in srp_free_ch_ib()
651 if (ch->rdma_cm.cm_id) { in srp_free_ch_ib()
652 rdma_destroy_id(ch->rdma_cm.cm_id); in srp_free_ch_ib()
653 ch->rdma_cm.cm_id = NULL; in srp_free_ch_ib()
656 if (ch->ib_cm.cm_id) { in srp_free_ch_ib()
657 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_free_ch_ib()
658 ch->ib_cm.cm_id = NULL; in srp_free_ch_ib()
663 if (!ch->qp) in srp_free_ch_ib()
666 if (dev->use_fast_reg) { in srp_free_ch_ib()
667 if (ch->fr_pool) in srp_free_ch_ib()
668 srp_destroy_fr_pool(ch->fr_pool); in srp_free_ch_ib()
672 ib_free_cq(ch->send_cq); in srp_free_ch_ib()
673 ib_free_cq(ch->recv_cq); in srp_free_ch_ib()
681 ch->target = NULL; in srp_free_ch_ib()
683 ch->qp = NULL; in srp_free_ch_ib()
684 ch->send_cq = ch->recv_cq = NULL; in srp_free_ch_ib()
686 if (ch->rx_ring) { in srp_free_ch_ib()
687 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
688 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_free_ch_ib()
689 kfree(ch->rx_ring); in srp_free_ch_ib()
690 ch->rx_ring = NULL; in srp_free_ch_ib()
692 if (ch->tx_ring) { in srp_free_ch_ib()
693 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
694 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_free_ch_ib()
695 kfree(ch->tx_ring); in srp_free_ch_ib()
696 ch->tx_ring = NULL; in srp_free_ch_ib()
705 struct srp_target_port *target = ch->target; in srp_path_rec_completion()
707 ch->status = status; in srp_path_rec_completion()
709 shost_printk(KERN_ERR, target->scsi_host, in srp_path_rec_completion()
712 ch->ib_cm.path = *pathrec; in srp_path_rec_completion()
713 complete(&ch->done); in srp_path_rec_completion()
718 struct srp_target_port *target = ch->target; in srp_ib_lookup_path()
721 ch->ib_cm.path.numb_path = 1; in srp_ib_lookup_path()
723 init_completion(&ch->done); in srp_ib_lookup_path()
725 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, in srp_ib_lookup_path()
726 target->srp_host->srp_dev->dev, in srp_ib_lookup_path()
727 target->srp_host->port, in srp_ib_lookup_path()
728 &ch->ib_cm.path, in srp_ib_lookup_path()
737 ch, &ch->ib_cm.path_query); in srp_ib_lookup_path()
738 if (ch->ib_cm.path_query_id < 0) in srp_ib_lookup_path()
739 return ch->ib_cm.path_query_id; in srp_ib_lookup_path()
741 ret = wait_for_completion_interruptible(&ch->done); in srp_ib_lookup_path()
745 if (ch->status < 0) in srp_ib_lookup_path()
746 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_lookup_path()
748 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, in srp_ib_lookup_path()
749 be16_to_cpu(target->ib_cm.pkey), in srp_ib_lookup_path()
750 be64_to_cpu(target->ib_cm.service_id)); in srp_ib_lookup_path()
752 return ch->status; in srp_ib_lookup_path()
757 struct srp_target_port *target = ch->target; in srp_rdma_lookup_path()
760 init_completion(&ch->done); in srp_rdma_lookup_path()
762 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); in srp_rdma_lookup_path()
766 wait_for_completion_interruptible(&ch->done); in srp_rdma_lookup_path()
768 if (ch->status != 0) in srp_rdma_lookup_path()
769 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_lookup_path()
772 return ch->status; in srp_rdma_lookup_path()
777 struct srp_target_port *target = ch->target; in srp_lookup_path()
779 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : in srp_lookup_path()
789 ret = ib_query_port(host->srp_dev->dev, host->port, &attr); in srp_get_subnet_timeout()
795 dev_name(&host->srp_dev->dev->dev), subnet_timeout); in srp_get_subnet_timeout()
803 struct srp_target_port *target = ch->target; in srp_send_req()
810 char *ipi, *tpi; in srp_send_req() local
815 return -ENOMEM; in srp_send_req()
817 req->ib_param.flow_control = 1; in srp_send_req()
818 req->ib_param.retry_count = target->tl_retry_count; in srp_send_req()
824 req->ib_param.responder_resources = 4; in srp_send_req()
825 req->ib_param.rnr_retry_count = 7; in srp_send_req()
826 req->ib_param.max_cm_retries = 15; in srp_send_req()
828 req->ib_req.opcode = SRP_LOGIN_REQ; in srp_send_req()
829 req->ib_req.tag = 0; in srp_send_req()
830 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); in srp_send_req()
831 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | in srp_send_req()
833 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : in srp_send_req()
836 req->ib_req.req_flags |= SRP_IMMED_REQUESTED; in srp_send_req()
837 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); in srp_send_req()
840 if (target->using_rdma_cm) { in srp_send_req()
841 req->rdma_param.flow_control = req->ib_param.flow_control; in srp_send_req()
842 req->rdma_param.responder_resources = in srp_send_req()
843 req->ib_param.responder_resources; in srp_send_req()
844 req->rdma_param.initiator_depth = req->ib_param.initiator_depth; in srp_send_req()
845 req->rdma_param.retry_count = req->ib_param.retry_count; in srp_send_req()
846 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count; in srp_send_req()
847 req->rdma_param.private_data = &req->rdma_req; in srp_send_req()
848 req->rdma_param.private_data_len = sizeof(req->rdma_req); in srp_send_req()
850 req->rdma_req.opcode = req->ib_req.opcode; in srp_send_req()
851 req->rdma_req.tag = req->ib_req.tag; in srp_send_req()
852 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; in srp_send_req()
853 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; in srp_send_req()
854 req->rdma_req.req_flags = req->ib_req.req_flags; in srp_send_req()
855 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; in srp_send_req()
857 ipi = req->rdma_req.initiator_port_id; in srp_send_req()
858 tpi = req->rdma_req.target_port_id; in srp_send_req()
862 subnet_timeout = srp_get_subnet_timeout(target->srp_host); in srp_send_req()
864 req->ib_param.primary_path = &ch->ib_cm.path; in srp_send_req()
865 req->ib_param.alternate_path = NULL; in srp_send_req()
866 req->ib_param.service_id = target->ib_cm.service_id; in srp_send_req()
867 get_random_bytes(&req->ib_param.starting_psn, 4); in srp_send_req()
868 req->ib_param.starting_psn &= 0xffffff; in srp_send_req()
869 req->ib_param.qp_num = ch->qp->qp_num; in srp_send_req()
870 req->ib_param.qp_type = ch->qp->qp_type; in srp_send_req()
871 req->ib_param.local_cm_response_timeout = subnet_timeout + 2; in srp_send_req()
872 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2; in srp_send_req()
873 req->ib_param.private_data = &req->ib_req; in srp_send_req()
874 req->ib_param.private_data_len = sizeof(req->ib_req); in srp_send_req()
876 ipi = req->ib_req.initiator_port_id; in srp_send_req()
877 tpi = req->ib_req.target_port_id; in srp_send_req()
889 if (target->io_class == SRP_REV10_IB_IO_CLASS) { in srp_send_req()
890 memcpy(ipi, &target->sgid.global.interface_id, 8); in srp_send_req()
891 memcpy(ipi + 8, &target->initiator_ext, 8); in srp_send_req()
892 memcpy(tpi, &target->ioc_guid, 8); in srp_send_req()
893 memcpy(tpi + 8, &target->id_ext, 8); in srp_send_req()
895 memcpy(ipi, &target->initiator_ext, 8); in srp_send_req()
896 memcpy(ipi + 8, &target->sgid.global.interface_id, 8); in srp_send_req()
897 memcpy(tpi, &target->id_ext, 8); in srp_send_req()
898 memcpy(tpi + 8, &target->ioc_guid, 8); in srp_send_req()
907 shost_printk(KERN_DEBUG, target->scsi_host, in srp_send_req()
910 be64_to_cpu(target->ioc_guid)); in srp_send_req()
911 memset(ipi, 0, 8); in srp_send_req()
912 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); in srp_send_req()
915 if (target->using_rdma_cm) in srp_send_req()
916 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); in srp_send_req()
918 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); in srp_send_req()
929 spin_lock_irq(&target->lock); in srp_queue_remove_work()
930 if (target->state != SRP_TARGET_REMOVED) { in srp_queue_remove_work()
931 target->state = SRP_TARGET_REMOVED; in srp_queue_remove_work()
934 spin_unlock_irq(&target->lock); in srp_queue_remove_work()
937 queue_work(srp_remove_wq, &target->remove_work); in srp_queue_remove_work()
949 for (i = 0; i < target->ch_count; i++) { in srp_disconnect_target()
950 ch = &target->ch[i]; in srp_disconnect_target()
951 ch->connected = false; in srp_disconnect_target()
953 if (target->using_rdma_cm) { in srp_disconnect_target()
954 if (ch->rdma_cm.cm_id) in srp_disconnect_target()
955 rdma_disconnect(ch->rdma_cm.cm_id); in srp_disconnect_target()
957 if (ch->ib_cm.cm_id) in srp_disconnect_target()
958 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, in srp_disconnect_target()
962 shost_printk(KERN_DEBUG, target->scsi_host, in srp_disconnect_target()
971 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_req_data()
972 struct ib_device *ibdev = dev->dev; in srp_free_req_data()
976 if (!ch->req_ring) in srp_free_req_data()
979 for (i = 0; i < target->req_ring_size; ++i) { in srp_free_req_data()
980 req = &ch->req_ring[i]; in srp_free_req_data()
981 if (dev->use_fast_reg) in srp_free_req_data()
982 kfree(req->fr_list); in srp_free_req_data()
983 if (req->indirect_dma_addr) { in srp_free_req_data()
984 ib_dma_unmap_single(ibdev, req->indirect_dma_addr, in srp_free_req_data()
985 target->indirect_size, in srp_free_req_data()
988 kfree(req->indirect_desc); in srp_free_req_data()
991 kfree(ch->req_ring); in srp_free_req_data()
992 ch->req_ring = NULL; in srp_free_req_data()
997 struct srp_target_port *target = ch->target; in srp_alloc_req_data()
998 struct srp_device *srp_dev = target->srp_host->srp_dev; in srp_alloc_req_data()
999 struct ib_device *ibdev = srp_dev->dev; in srp_alloc_req_data()
1003 int i, ret = -ENOMEM; in srp_alloc_req_data()
1005 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring), in srp_alloc_req_data()
1007 if (!ch->req_ring) in srp_alloc_req_data()
1010 for (i = 0; i < target->req_ring_size; ++i) { in srp_alloc_req_data()
1011 req = &ch->req_ring[i]; in srp_alloc_req_data()
1012 mr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), in srp_alloc_req_data()
1016 if (srp_dev->use_fast_reg) in srp_alloc_req_data()
1017 req->fr_list = mr_list; in srp_alloc_req_data()
1018 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); in srp_alloc_req_data()
1019 if (!req->indirect_desc) in srp_alloc_req_data()
1022 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, in srp_alloc_req_data()
1023 target->indirect_size, in srp_alloc_req_data()
1028 req->indirect_dma_addr = dma_addr; in srp_alloc_req_data()
1037 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1047 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) in srp_del_scsi_host_attr()
1048 device_remove_file(&shost->shost_dev, *attr); in srp_del_scsi_host_attr()
1056 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_target()
1058 srp_del_scsi_host_attr(target->scsi_host); in srp_remove_target()
1059 srp_rport_get(target->rport); in srp_remove_target()
1060 srp_remove_host(target->scsi_host); in srp_remove_target()
1061 scsi_remove_host(target->scsi_host); in srp_remove_target()
1062 srp_stop_rport_timers(target->rport); in srp_remove_target()
1064 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in srp_remove_target()
1065 for (i = 0; i < target->ch_count; i++) { in srp_remove_target()
1066 ch = &target->ch[i]; in srp_remove_target()
1069 cancel_work_sync(&target->tl_err_work); in srp_remove_target()
1070 srp_rport_put(target->rport); in srp_remove_target()
1071 for (i = 0; i < target->ch_count; i++) { in srp_remove_target()
1072 ch = &target->ch[i]; in srp_remove_target()
1075 kfree(target->ch); in srp_remove_target()
1076 target->ch = NULL; in srp_remove_target()
1078 spin_lock(&target->srp_host->target_lock); in srp_remove_target()
1079 list_del(&target->list); in srp_remove_target()
1080 spin_unlock(&target->srp_host->target_lock); in srp_remove_target()
1082 scsi_host_put(target->scsi_host); in srp_remove_target()
1090 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_work()
1097 struct srp_target_port *target = rport->lld_data; in srp_rport_delete()
1103 * srp_connected_ch() - number of connected channels
1110 for (i = 0; i < target->ch_count; i++) in srp_connected_ch()
1111 c += target->ch[i].connected; in srp_connected_ch()
1119 struct srp_target_port *target = ch->target; in srp_connect_ch()
1129 init_completion(&ch->done); in srp_connect_ch()
1133 ret = wait_for_completion_interruptible(&ch->done); in srp_connect_ch()
1143 ret = ch->status; in srp_connect_ch()
1146 ch->connected = true; in srp_connect_ch()
1159 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_connect_ch()
1161 ret = -ECONNRESET; in srp_connect_ch()
1170 return ret <= 0 ? ret : -ENODEV; in srp_connect_ch()
1189 wr.wr_cqe = &req->reg_cqe; in srp_inv_rkey()
1190 req->reg_cqe.done = srp_inv_rkey_err_done; in srp_inv_rkey()
1191 return ib_post_send(ch->qp, &wr, NULL); in srp_inv_rkey()
1198 struct srp_target_port *target = ch->target; in srp_unmap_data()
1199 struct srp_device *dev = target->srp_host->srp_dev; in srp_unmap_data()
1200 struct ib_device *ibdev = dev->dev; in srp_unmap_data()
1204 (scmnd->sc_data_direction != DMA_TO_DEVICE && in srp_unmap_data()
1205 scmnd->sc_data_direction != DMA_FROM_DEVICE)) in srp_unmap_data()
1208 if (dev->use_fast_reg) { in srp_unmap_data()
1211 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { in srp_unmap_data()
1212 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); in srp_unmap_data()
1214 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_unmap_data()
1216 (*pfr)->mr->rkey, res); in srp_unmap_data()
1218 &target->tl_err_work); in srp_unmap_data()
1221 if (req->nmdesc) in srp_unmap_data()
1222 srp_fr_pool_put(ch->fr_pool, req->fr_list, in srp_unmap_data()
1223 req->nmdesc); in srp_unmap_data()
1227 scmnd->sc_data_direction); in srp_unmap_data()
1231 * srp_claim_req - Take ownership of the scmnd associated with a request.
1235 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1236 * ownership of @req->scmnd if it equals @scmnd.
1248 spin_lock_irqsave(&ch->lock, flags); in srp_claim_req()
1249 if (req->scmnd && in srp_claim_req()
1250 (!sdev || req->scmnd->device == sdev) && in srp_claim_req()
1251 (!scmnd || req->scmnd == scmnd)) { in srp_claim_req()
1252 scmnd = req->scmnd; in srp_claim_req()
1253 req->scmnd = NULL; in srp_claim_req()
1257 spin_unlock_irqrestore(&ch->lock, flags); in srp_claim_req()
1263 * srp_free_req() - Unmap data and adjust ch->req_lim.
1267 * @req_lim_delta: Amount to be added to @target->req_lim.
1276 spin_lock_irqsave(&ch->lock, flags); in srp_free_req()
1277 ch->req_lim += req_lim_delta; in srp_free_req()
1278 spin_unlock_irqrestore(&ch->lock, flags); in srp_free_req()
1288 scmnd->result = result; in srp_finish_req()
1289 scmnd->scsi_done(scmnd); in srp_finish_req()
1295 struct srp_target_port *target = rport->lld_data; in srp_terminate_io()
1299 for (i = 0; i < target->ch_count; i++) { in srp_terminate_io()
1300 ch = &target->ch[i]; in srp_terminate_io()
1302 for (j = 0; j < target->req_ring_size; ++j) { in srp_terminate_io()
1303 struct srp_request *req = &ch->req_ring[j]; in srp_terminate_io()
1337 * serializes calls of this function via rport->mutex and also blocks
1342 struct srp_target_port *target = rport->lld_data; in srp_rport_reconnect()
1344 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_rport_reconnect()
1346 target->max_it_iu_size); in srp_rport_reconnect()
1352 if (target->state == SRP_TARGET_SCANNING) in srp_rport_reconnect()
1353 return -ENODEV; in srp_rport_reconnect()
1360 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1361 ch = &target->ch[i]; in srp_rport_reconnect()
1364 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1365 ch = &target->ch[i]; in srp_rport_reconnect()
1366 for (j = 0; j < target->req_ring_size; ++j) { in srp_rport_reconnect()
1367 struct srp_request *req = &ch->req_ring[j]; in srp_rport_reconnect()
1372 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1373 ch = &target->ch[i]; in srp_rport_reconnect()
1381 INIT_LIST_HEAD(&ch->free_tx); in srp_rport_reconnect()
1382 for (j = 0; j < target->queue_size; ++j) in srp_rport_reconnect()
1383 list_add(&ch->tx_ring[j]->list, &ch->free_tx); in srp_rport_reconnect()
1386 target->qp_in_error = false; in srp_rport_reconnect()
1388 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1389 ch = &target->ch[i]; in srp_rport_reconnect()
1397 shost_printk(KERN_INFO, target->scsi_host, in srp_rport_reconnect()
1406 struct srp_direct_buf *desc = state->desc; in srp_map_desc()
1410 desc->va = cpu_to_be64(dma_addr); in srp_map_desc()
1411 desc->key = cpu_to_be32(rkey); in srp_map_desc()
1412 desc->len = cpu_to_be32(dma_len); in srp_map_desc()
1414 state->total_len += dma_len; in srp_map_desc()
1415 state->desc++; in srp_map_desc()
1416 state->ndesc++; in srp_map_desc()
1425 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1427 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1435 struct srp_target_port *target = ch->target; in srp_map_finish_fr()
1436 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_finish_fr()
1442 if (state->fr.next >= state->fr.end) { in srp_map_finish_fr()
1443 shost_printk(KERN_ERR, ch->target->scsi_host, in srp_map_finish_fr()
1445 ch->target->mr_per_cmd); in srp_map_finish_fr()
1446 return -ENOMEM; in srp_map_finish_fr()
1449 WARN_ON_ONCE(!dev->use_fast_reg); in srp_map_finish_fr()
1451 if (sg_nents == 1 && target->global_rkey) { in srp_map_finish_fr()
1454 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, in srp_map_finish_fr()
1455 sg_dma_len(state->sg) - sg_offset, in srp_map_finish_fr()
1456 target->global_rkey); in srp_map_finish_fr()
1462 desc = srp_fr_pool_get(ch->fr_pool); in srp_map_finish_fr()
1464 return -ENOMEM; in srp_map_finish_fr()
1466 rkey = ib_inc_rkey(desc->mr->rkey); in srp_map_finish_fr()
1467 ib_update_fast_reg_key(desc->mr, rkey); in srp_map_finish_fr()
1469 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, in srp_map_finish_fr()
1470 dev->mr_page_size); in srp_map_finish_fr()
1472 srp_fr_pool_put(ch->fr_pool, &desc, 1); in srp_map_finish_fr()
1474 dev_name(&req->scmnd->device->sdev_gendev), sg_nents, in srp_map_finish_fr()
1475 sg_offset_p ? *sg_offset_p : -1, n); in srp_map_finish_fr()
1479 WARN_ON_ONCE(desc->mr->length == 0); in srp_map_finish_fr()
1481 req->reg_cqe.done = srp_reg_mr_err_done; in srp_map_finish_fr()
1485 wr.wr.wr_cqe = &req->reg_cqe; in srp_map_finish_fr()
1488 wr.mr = desc->mr; in srp_map_finish_fr()
1489 wr.key = desc->mr->rkey; in srp_map_finish_fr()
1494 *state->fr.next++ = desc; in srp_map_finish_fr()
1495 state->nmdesc++; in srp_map_finish_fr()
1497 srp_map_desc(state, desc->mr->iova, in srp_map_finish_fr()
1498 desc->mr->length, desc->mr->rkey); in srp_map_finish_fr()
1500 err = ib_post_send(ch->qp, &wr.wr, NULL); in srp_map_finish_fr()
1502 WARN_ON_ONCE(err == -ENOMEM); in srp_map_finish_fr()
1515 state->fr.next = req->fr_list; in srp_map_sg_fr()
1516 state->fr.end = req->fr_list + ch->target->mr_per_cmd; in srp_map_sg_fr()
1517 state->sg = scat; in srp_map_sg_fr()
1529 count -= n; in srp_map_sg_fr()
1531 state->sg = sg_next(state->sg); in srp_map_sg_fr()
1541 struct srp_target_port *target = ch->target; in srp_map_sg_dma()
1547 target->global_rkey); in srp_map_sg_dma()
1564 struct srp_target_port *target = ch->target; in srp_map_idb()
1565 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_idb()
1576 state.base_dma_addr = req->indirect_dma_addr; in srp_map_idb()
1579 if (dev->use_fast_reg) { in srp_map_idb()
1581 sg_init_one(idb_sg, req->indirect_desc, idb_len); in srp_map_idb()
1582 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ in srp_map_idb()
1584 idb_sg->dma_length = idb_sg->length; /* hack^2 */ in srp_map_idb()
1591 return -EINVAL; in srp_map_idb()
1603 struct srp_device *dev = ch->target->srp_host->srp_dev; in srp_check_mapping()
1608 for (i = 0; i < state->ndesc; i++) in srp_check_mapping()
1609 desc_len += be32_to_cpu(req->indirect_desc[i].len); in srp_check_mapping()
1610 if (dev->use_fast_reg) in srp_check_mapping()
1611 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) in srp_check_mapping()
1612 mr_len += (*pfr)->mr->length; in srp_check_mapping()
1613 if (desc_len != scsi_bufflen(req->scmnd) || in srp_check_mapping()
1614 mr_len > scsi_bufflen(req->scmnd)) in srp_check_mapping()
1616 scsi_bufflen(req->scmnd), desc_len, mr_len, in srp_check_mapping()
1617 state->ndesc, state->nmdesc); in srp_check_mapping()
1621 * srp_map_data() - map SCSI data buffer onto an SRP request
1633 struct srp_target_port *target = ch->target; in srp_map_data()
1635 struct srp_cmd *cmd = req->cmd->buf; in srp_map_data()
1646 req->cmd->num_sge = 1; in srp_map_data()
1648 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) in srp_map_data()
1649 return sizeof(struct srp_cmd) + cmd->add_cdb_len; in srp_map_data()
1651 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && in srp_map_data()
1652 scmnd->sc_data_direction != DMA_TO_DEVICE) { in srp_map_data()
1653 shost_printk(KERN_WARNING, target->scsi_host, in srp_map_data()
1655 scmnd->sc_data_direction); in srp_map_data()
1656 return -EINVAL; in srp_map_data()
1663 dev = target->srp_host->srp_dev; in srp_map_data()
1664 ibdev = dev->dev; in srp_map_data()
1666 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); in srp_map_data()
1668 return -EIO; in srp_map_data()
1670 if (ch->use_imm_data && in srp_map_data()
1671 count <= ch->max_imm_sge && in srp_map_data()
1672 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && in srp_map_data()
1673 scmnd->sc_data_direction == DMA_TO_DEVICE) { in srp_map_data()
1675 struct ib_sge *sge = &req->cmd->sge[1]; in srp_map_data()
1679 req->nmdesc = 0; in srp_map_data()
1680 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1681 buf->len = cpu_to_be32(data_len); in srp_map_data()
1686 sge[i].lkey = target->lkey; in srp_map_data()
1688 req->cmd->num_sge += count; in srp_map_data()
1693 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + in srp_map_data()
1696 if (count == 1 && target->global_rkey) { in srp_map_data()
1705 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1706 buf->va = cpu_to_be64(sg_dma_address(scat)); in srp_map_data()
1707 buf->key = cpu_to_be32(target->global_rkey); in srp_map_data()
1708 buf->len = cpu_to_be32(sg_dma_len(scat)); in srp_map_data()
1710 req->nmdesc = 0; in srp_map_data()
1718 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1720 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, in srp_map_data()
1721 target->indirect_size, DMA_TO_DEVICE); in srp_map_data()
1724 state.desc = req->indirect_desc; in srp_map_data()
1725 if (dev->use_fast_reg) in srp_map_data()
1729 req->nmdesc = state.nmdesc; in srp_map_data()
1748 * Memory registration collapsed the sg-list into one entry, in srp_map_data()
1753 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1754 *buf = req->indirect_desc[0]; in srp_map_data()
1758 if (unlikely(target->cmd_sg_cnt < state.ndesc && in srp_map_data()
1759 !target->allow_ext_sg)) { in srp_map_data()
1760 shost_printk(KERN_ERR, target->scsi_host, in srp_map_data()
1762 ret = -EIO; in srp_map_data()
1766 count = min(state.ndesc, target->cmd_sg_cnt); in srp_map_data()
1771 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + in srp_map_data()
1775 memcpy(indirect_hdr->desc_list, req->indirect_desc, in srp_map_data()
1778 if (!target->global_rkey) { in srp_map_data()
1783 req->nmdesc++; in srp_map_data()
1785 idb_rkey = cpu_to_be32(target->global_rkey); in srp_map_data()
1788 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); in srp_map_data()
1789 indirect_hdr->table_desc.key = idb_rkey; in srp_map_data()
1790 indirect_hdr->table_desc.len = cpu_to_be32(table_len); in srp_map_data()
1791 indirect_hdr->len = cpu_to_be32(state.total_len); in srp_map_data()
1793 if (scmnd->sc_data_direction == DMA_TO_DEVICE) in srp_map_data()
1794 cmd->data_out_desc_cnt = count; in srp_map_data()
1796 cmd->data_in_desc_cnt = count; in srp_map_data()
1798 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, in srp_map_data()
1802 if (scmnd->sc_data_direction == DMA_TO_DEVICE) in srp_map_data()
1803 cmd->buf_fmt = fmt << 4; in srp_map_data()
1805 cmd->buf_fmt = fmt; in srp_map_data()
1811 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) in srp_map_data()
1812 ret = -E2BIG; in srp_map_data()
1824 spin_lock_irqsave(&ch->lock, flags); in srp_put_tx_iu()
1825 list_add(&iu->list, &ch->free_tx); in srp_put_tx_iu()
1827 ++ch->req_lim; in srp_put_tx_iu()
1828 spin_unlock_irqrestore(&ch->lock, flags); in srp_put_tx_iu()
1832 * Must be called with ch->lock held to protect req_lim and free_tx.
1838 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1840 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1841 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1847 struct srp_target_port *target = ch->target; in __srp_get_tx_iu()
1851 lockdep_assert_held(&ch->lock); in __srp_get_tx_iu()
1853 ib_process_cq_direct(ch->send_cq, -1); in __srp_get_tx_iu()
1855 if (list_empty(&ch->free_tx)) in __srp_get_tx_iu()
1860 if (ch->req_lim <= rsv) { in __srp_get_tx_iu()
1861 ++target->zero_req_lim; in __srp_get_tx_iu()
1865 --ch->req_lim; in __srp_get_tx_iu()
1868 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); in __srp_get_tx_iu()
1869 list_del(&iu->list); in __srp_get_tx_iu()
1875 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1880 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_send_done()
1881 struct srp_rdma_ch *ch = cq->cq_context; in srp_send_done()
1883 if (unlikely(wc->status != IB_WC_SUCCESS)) { in srp_send_done()
1888 lockdep_assert_held(&ch->lock); in srp_send_done()
1890 list_add(&iu->list, &ch->free_tx); in srp_send_done()
1894 * srp_post_send() - send an SRP information unit
1901 struct srp_target_port *target = ch->target; in srp_post_send()
1904 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) in srp_post_send()
1905 return -EINVAL; in srp_post_send()
1907 iu->sge[0].addr = iu->dma; in srp_post_send()
1908 iu->sge[0].length = len; in srp_post_send()
1909 iu->sge[0].lkey = target->lkey; in srp_post_send()
1911 iu->cqe.done = srp_send_done; in srp_post_send()
1914 wr.wr_cqe = &iu->cqe; in srp_post_send()
1915 wr.sg_list = &iu->sge[0]; in srp_post_send()
1916 wr.num_sge = iu->num_sge; in srp_post_send()
1920 return ib_post_send(ch->qp, &wr, NULL); in srp_post_send()
1925 struct srp_target_port *target = ch->target; in srp_post_recv()
1929 list.addr = iu->dma; in srp_post_recv()
1930 list.length = iu->size; in srp_post_recv()
1931 list.lkey = target->lkey; in srp_post_recv()
1933 iu->cqe.done = srp_recv_done; in srp_post_recv()
1936 wr.wr_cqe = &iu->cqe; in srp_post_recv()
1940 return ib_post_recv(ch->qp, &wr, NULL); in srp_post_recv()
1945 struct srp_target_port *target = ch->target; in srp_process_rsp()
1950 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { in srp_process_rsp()
1951 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1952 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1953 if (rsp->tag == ch->tsk_mgmt_tag) { in srp_process_rsp()
1954 ch->tsk_mgmt_status = -1; in srp_process_rsp()
1955 if (be32_to_cpu(rsp->resp_data_len) >= 4) in srp_process_rsp()
1956 ch->tsk_mgmt_status = rsp->data[3]; in srp_process_rsp()
1957 complete(&ch->tsk_mgmt_done); in srp_process_rsp()
1959 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1961 rsp->tag); in srp_process_rsp()
1963 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1965 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); in srp_process_rsp()
1966 if (scmnd && scmnd->host_scribble) { in srp_process_rsp()
1967 req = (void *)scmnd->host_scribble; in srp_process_rsp()
1973 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1975 rsp->tag, ch - target->ch, ch->qp->qp_num); in srp_process_rsp()
1977 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1978 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1979 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1983 scmnd->result = rsp->status; in srp_process_rsp()
1985 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { in srp_process_rsp()
1986 memcpy(scmnd->sense_buffer, rsp->data + in srp_process_rsp()
1987 be32_to_cpu(rsp->resp_data_len), in srp_process_rsp()
1988 min_t(int, be32_to_cpu(rsp->sense_data_len), in srp_process_rsp()
1992 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) in srp_process_rsp()
1993 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); in srp_process_rsp()
1994 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER)) in srp_process_rsp()
1995 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt)); in srp_process_rsp()
1996 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) in srp_process_rsp()
1997 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); in srp_process_rsp()
1998 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER)) in srp_process_rsp()
1999 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt)); in srp_process_rsp()
2002 be32_to_cpu(rsp->req_lim_delta)); in srp_process_rsp()
2004 scmnd->host_scribble = NULL; in srp_process_rsp()
2005 scmnd->scsi_done(scmnd); in srp_process_rsp()
2012 struct srp_target_port *target = ch->target; in srp_response_common()
2013 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_response_common()
2018 spin_lock_irqsave(&ch->lock, flags); in srp_response_common()
2019 ch->req_lim += req_delta; in srp_response_common()
2021 spin_unlock_irqrestore(&ch->lock, flags); in srp_response_common()
2024 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2029 iu->num_sge = 1; in srp_response_common()
2030 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); in srp_response_common()
2031 memcpy(iu->buf, rsp, len); in srp_response_common()
2032 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); in srp_response_common()
2036 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2049 .tag = req->tag, in srp_process_cred_req()
2051 s32 delta = be32_to_cpu(req->req_lim_delta); in srp_process_cred_req()
2054 shost_printk(KERN_ERR, ch->target->scsi_host, PFX in srp_process_cred_req()
2061 struct srp_target_port *target = ch->target; in srp_process_aer_req()
2064 .tag = req->tag, in srp_process_aer_req()
2066 s32 delta = be32_to_cpu(req->req_lim_delta); in srp_process_aer_req()
2068 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2069 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); in srp_process_aer_req()
2072 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2078 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_recv_done()
2079 struct srp_rdma_ch *ch = cq->cq_context; in srp_recv_done()
2080 struct srp_target_port *target = ch->target; in srp_recv_done()
2081 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_recv_done()
2085 if (unlikely(wc->status != IB_WC_SUCCESS)) { in srp_recv_done()
2090 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2093 opcode = *(u8 *) iu->buf; in srp_recv_done()
2096 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2099 iu->buf, wc->byte_len, true); in srp_recv_done()
2104 srp_process_rsp(ch, iu->buf); in srp_recv_done()
2108 srp_process_cred_req(ch, iu->buf); in srp_recv_done()
2112 srp_process_aer_req(ch, iu->buf); in srp_recv_done()
2117 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2122 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2127 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2132 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2137 * srp_tl_err_work() - handle a transport layer error
2141 * hence the target->rport test.
2148 if (target->rport) in srp_tl_err_work()
2149 srp_start_tl_fail_timers(target->rport); in srp_tl_err_work()
2155 struct srp_rdma_ch *ch = cq->cq_context; in srp_handle_qp_err()
2156 struct srp_target_port *target = ch->target; in srp_handle_qp_err()
2158 if (ch->connected && !target->qp_in_error) { in srp_handle_qp_err()
2159 shost_printk(KERN_ERR, target->scsi_host, in srp_handle_qp_err()
2161 opname, ib_wc_status_msg(wc->status), wc->status, in srp_handle_qp_err()
2162 wc->wr_cqe); in srp_handle_qp_err()
2163 queue_work(system_long_wq, &target->tl_err_work); in srp_handle_qp_err()
2165 target->qp_in_error = true; in srp_handle_qp_err()
2181 scmnd->result = srp_chkready(target->rport); in srp_queuecommand()
2182 if (unlikely(scmnd->result)) in srp_queuecommand()
2185 WARN_ON_ONCE(scmnd->request->tag < 0); in srp_queuecommand()
2186 tag = blk_mq_unique_tag(scmnd->request); in srp_queuecommand()
2187 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_queuecommand()
2189 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n", in srp_queuecommand()
2190 dev_name(&shost->shost_gendev), tag, idx, in srp_queuecommand()
2191 target->req_ring_size); in srp_queuecommand()
2193 spin_lock_irqsave(&ch->lock, flags); in srp_queuecommand()
2195 spin_unlock_irqrestore(&ch->lock, flags); in srp_queuecommand()
2200 req = &ch->req_ring[idx]; in srp_queuecommand()
2201 dev = target->srp_host->srp_dev->dev; in srp_queuecommand()
2202 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2205 scmnd->host_scribble = (void *) req; in srp_queuecommand()
2207 cmd = iu->buf; in srp_queuecommand()
2210 cmd->opcode = SRP_CMD; in srp_queuecommand()
2211 int_to_scsilun(scmnd->device->lun, &cmd->lun); in srp_queuecommand()
2212 cmd->tag = tag; in srp_queuecommand()
2213 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); in srp_queuecommand()
2214 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { in srp_queuecommand()
2215 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), in srp_queuecommand()
2217 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) in srp_queuecommand()
2221 req->scmnd = scmnd; in srp_queuecommand()
2222 req->cmd = iu; in srp_queuecommand()
2226 shost_printk(KERN_ERR, target->scsi_host, in srp_queuecommand()
2229 * If we ran out of memory descriptors (-ENOMEM) because an in srp_queuecommand()
2231 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer in srp_queuecommand()
2234 scmnd->result = len == -ENOMEM ? in srp_queuecommand()
2239 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2243 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); in srp_queuecommand()
2244 scmnd->result = DID_ERROR << 16; in srp_queuecommand()
2260 req->scmnd = NULL; in srp_queuecommand()
2263 if (scmnd->result) { in srp_queuecommand()
2264 scmnd->scsi_done(scmnd); in srp_queuecommand()
2279 struct srp_target_port *target = ch->target; in srp_alloc_iu_bufs()
2282 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), in srp_alloc_iu_bufs()
2284 if (!ch->rx_ring) in srp_alloc_iu_bufs()
2286 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), in srp_alloc_iu_bufs()
2288 if (!ch->tx_ring) in srp_alloc_iu_bufs()
2291 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2292 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2293 ch->max_ti_iu_len, in srp_alloc_iu_bufs()
2295 if (!ch->rx_ring[i]) in srp_alloc_iu_bufs()
2299 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2300 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2301 ch->max_it_iu_len, in srp_alloc_iu_bufs()
2303 if (!ch->tx_ring[i]) in srp_alloc_iu_bufs()
2306 list_add(&ch->tx_ring[i]->list, &ch->free_tx); in srp_alloc_iu_bufs()
2312 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2313 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_alloc_iu_bufs()
2314 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_alloc_iu_bufs()
2319 kfree(ch->tx_ring); in srp_alloc_iu_bufs()
2320 ch->tx_ring = NULL; in srp_alloc_iu_bufs()
2321 kfree(ch->rx_ring); in srp_alloc_iu_bufs()
2322 ch->rx_ring = NULL; in srp_alloc_iu_bufs()
2324 return -ENOMEM; in srp_alloc_iu_bufs()
2341 * Set target->rq_tmo_jiffies to one second more than the largest time in srp_compute_rq_tmo()
2343 * C9-140..142 in the IBTA spec for more information about how to in srp_compute_rq_tmo()
2346 T_tr_ns = 4096 * (1ULL << qp_attr->timeout); in srp_compute_rq_tmo()
2347 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; in srp_compute_rq_tmo()
2358 struct srp_target_port *target = ch->target; in srp_cm_rep_handler()
2364 if (lrsp->opcode == SRP_LOGIN_RSP) { in srp_cm_rep_handler()
2365 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); in srp_cm_rep_handler()
2366 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); in srp_cm_rep_handler()
2367 ch->use_imm_data = srp_use_imm_data && in srp_cm_rep_handler()
2368 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); in srp_cm_rep_handler()
2369 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_cm_rep_handler()
2370 ch->use_imm_data, in srp_cm_rep_handler()
2371 target->max_it_iu_size); in srp_cm_rep_handler()
2372 WARN_ON_ONCE(ch->max_it_iu_len > in srp_cm_rep_handler()
2373 be32_to_cpu(lrsp->max_it_iu_len)); in srp_cm_rep_handler()
2375 if (ch->use_imm_data) in srp_cm_rep_handler()
2376 shost_printk(KERN_DEBUG, target->scsi_host, in srp_cm_rep_handler()
2381 * bounce requests back to the SCSI mid-layer. in srp_cm_rep_handler()
2383 target->scsi_host->can_queue in srp_cm_rep_handler()
2384 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, in srp_cm_rep_handler()
2385 target->scsi_host->can_queue); in srp_cm_rep_handler()
2386 target->scsi_host->cmd_per_lun in srp_cm_rep_handler()
2387 = min_t(int, target->scsi_host->can_queue, in srp_cm_rep_handler()
2388 target->scsi_host->cmd_per_lun); in srp_cm_rep_handler()
2390 shost_printk(KERN_WARNING, target->scsi_host, in srp_cm_rep_handler()
2391 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); in srp_cm_rep_handler()
2392 ret = -ECONNRESET; in srp_cm_rep_handler()
2396 if (!ch->rx_ring) { in srp_cm_rep_handler()
2402 for (i = 0; i < target->queue_size; i++) { in srp_cm_rep_handler()
2403 struct srp_iu *iu = ch->rx_ring[i]; in srp_cm_rep_handler()
2410 if (!target->using_rdma_cm) { in srp_cm_rep_handler()
2411 ret = -ENOMEM; in srp_cm_rep_handler()
2416 qp_attr->qp_state = IB_QPS_RTR; in srp_cm_rep_handler()
2421 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2425 qp_attr->qp_state = IB_QPS_RTS; in srp_cm_rep_handler()
2430 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); in srp_cm_rep_handler()
2432 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2443 ch->status = ret; in srp_cm_rep_handler()
2450 struct srp_target_port *target = ch->target; in srp_ib_cm_rej_handler()
2451 struct Scsi_Host *shost = target->scsi_host; in srp_ib_cm_rej_handler()
2456 switch (event->param.rej_rcvd.reason) { in srp_ib_cm_rej_handler()
2458 cpi = event->param.rej_rcvd.ari; in srp_ib_cm_rej_handler()
2459 dlid = be16_to_cpu(cpi->redirect_lid); in srp_ib_cm_rej_handler()
2460 sa_path_set_dlid(&ch->ib_cm.path, dlid); in srp_ib_cm_rej_handler()
2461 ch->ib_cm.path.pkey = cpi->redirect_pkey; in srp_ib_cm_rej_handler()
2462 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; in srp_ib_cm_rej_handler()
2463 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); in srp_ib_cm_rej_handler()
2465 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2470 union ib_gid *dgid = &ch->ib_cm.path.dgid; in srp_ib_cm_rej_handler()
2477 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); in srp_ib_cm_rej_handler()
2481 be64_to_cpu(dgid->global.subnet_prefix), in srp_ib_cm_rej_handler()
2482 be64_to_cpu(dgid->global.interface_id)); in srp_ib_cm_rej_handler()
2484 ch->status = SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2488 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2495 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2499 opcode = *(u8 *) event->private_data; in srp_ib_cm_rej_handler()
2501 struct srp_login_rej *rej = event->private_data; in srp_ib_cm_rej_handler()
2502 u32 reason = be32_to_cpu(rej->reason); in srp_ib_cm_rej_handler()
2510 target->sgid.raw, in srp_ib_cm_rej_handler()
2511 target->ib_cm.orig_dgid.raw, in srp_ib_cm_rej_handler()
2517 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2522 ch->status = SRP_STALE_CONN; in srp_ib_cm_rej_handler()
2527 event->param.rej_rcvd.reason); in srp_ib_cm_rej_handler()
2528 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2535 struct srp_rdma_ch *ch = cm_id->context; in srp_ib_cm_handler()
2536 struct srp_target_port *target = ch->target; in srp_ib_cm_handler()
2539 switch (event->event) { in srp_ib_cm_handler()
2541 shost_printk(KERN_DEBUG, target->scsi_host, in srp_ib_cm_handler()
2544 ch->status = -ECONNRESET; in srp_ib_cm_handler()
2549 srp_cm_rep_handler(cm_id, event->private_data, ch); in srp_ib_cm_handler()
2553 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_ib_cm_handler()
2560 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2561 PFX "DREQ received - connection closed\n"); in srp_ib_cm_handler()
2562 ch->connected = false; in srp_ib_cm_handler()
2564 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2566 queue_work(system_long_wq, &target->tl_err_work); in srp_ib_cm_handler()
2570 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2574 ch->status = 0; in srp_ib_cm_handler()
2583 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2584 PFX "Unhandled CM event %d\n", event->event); in srp_ib_cm_handler()
2589 complete(&ch->done); in srp_ib_cm_handler()
2597 struct srp_target_port *target = ch->target; in srp_rdma_cm_rej_handler()
2598 struct Scsi_Host *shost = target->scsi_host; in srp_rdma_cm_rej_handler()
2601 switch (event->status) { in srp_rdma_cm_rej_handler()
2605 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2609 opcode = *(u8 *) event->param.conn.private_data; in srp_rdma_cm_rej_handler()
2613 event->param.conn.private_data; in srp_rdma_cm_rej_handler()
2614 u32 reason = be32_to_cpu(rej->reason); in srp_rdma_cm_rej_handler()
2627 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2633 ch->status = SRP_STALE_CONN; in srp_rdma_cm_rej_handler()
2638 event->status); in srp_rdma_cm_rej_handler()
2639 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2647 struct srp_rdma_ch *ch = cm_id->context; in srp_rdma_cm_handler()
2648 struct srp_target_port *target = ch->target; in srp_rdma_cm_handler()
2651 switch (event->event) { in srp_rdma_cm_handler()
2653 ch->status = 0; in srp_rdma_cm_handler()
2658 ch->status = -ENXIO; in srp_rdma_cm_handler()
2663 ch->status = 0; in srp_rdma_cm_handler()
2669 ch->status = -EHOSTUNREACH; in srp_rdma_cm_handler()
2674 shost_printk(KERN_DEBUG, target->scsi_host, in srp_rdma_cm_handler()
2677 ch->status = -ECONNRESET; in srp_rdma_cm_handler()
2682 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); in srp_rdma_cm_handler()
2686 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_rdma_cm_handler()
2693 if (ch->connected) { in srp_rdma_cm_handler()
2694 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2696 rdma_disconnect(ch->rdma_cm.cm_id); in srp_rdma_cm_handler()
2698 ch->status = 0; in srp_rdma_cm_handler()
2699 queue_work(system_long_wq, &target->tl_err_work); in srp_rdma_cm_handler()
2704 shost_printk(KERN_ERR, target->scsi_host, in srp_rdma_cm_handler()
2708 ch->status = 0; in srp_rdma_cm_handler()
2712 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2713 PFX "Unhandled CM event %d\n", event->event); in srp_rdma_cm_handler()
2718 complete(&ch->done); in srp_rdma_cm_handler()
2724 * srp_change_queue_depth - setting device queue depth
2733 if (!sdev->tagged_supported) in srp_change_queue_depth()
2741 struct srp_target_port *target = ch->target; in srp_send_tsk_mgmt()
2742 struct srp_rport *rport = target->rport; in srp_send_tsk_mgmt()
2743 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_send_tsk_mgmt()
2748 if (!ch->connected || target->qp_in_error) in srp_send_tsk_mgmt()
2749 return -1; in srp_send_tsk_mgmt()
2755 mutex_lock(&rport->mutex); in srp_send_tsk_mgmt()
2756 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2758 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2761 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2763 return -1; in srp_send_tsk_mgmt()
2766 iu->num_sge = 1; in srp_send_tsk_mgmt()
2768 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, in srp_send_tsk_mgmt()
2770 tsk_mgmt = iu->buf; in srp_send_tsk_mgmt()
2773 tsk_mgmt->opcode = SRP_TSK_MGMT; in srp_send_tsk_mgmt()
2774 int_to_scsilun(lun, &tsk_mgmt->lun); in srp_send_tsk_mgmt()
2775 tsk_mgmt->tsk_mgmt_func = func; in srp_send_tsk_mgmt()
2776 tsk_mgmt->task_tag = req_tag; in srp_send_tsk_mgmt()
2778 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2779 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; in srp_send_tsk_mgmt()
2780 tsk_mgmt->tag = ch->tsk_mgmt_tag; in srp_send_tsk_mgmt()
2781 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2783 init_completion(&ch->tsk_mgmt_done); in srp_send_tsk_mgmt()
2785 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, in srp_send_tsk_mgmt()
2789 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2791 return -1; in srp_send_tsk_mgmt()
2793 res = wait_for_completion_timeout(&ch->tsk_mgmt_done, in srp_send_tsk_mgmt()
2796 *status = ch->tsk_mgmt_status; in srp_send_tsk_mgmt()
2797 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2801 return res > 0 ? 0 : -1; in srp_send_tsk_mgmt()
2806 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_abort()
2807 struct srp_request *req = (struct srp_request *) scmnd->host_scribble; in srp_abort()
2813 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); in srp_abort()
2817 tag = blk_mq_unique_tag(scmnd->request); in srp_abort()
2819 if (WARN_ON_ONCE(ch_idx >= target->ch_count)) in srp_abort()
2821 ch = &target->ch[ch_idx]; in srp_abort()
2824 shost_printk(KERN_ERR, target->scsi_host, in srp_abort()
2826 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, in srp_abort()
2829 else if (target->rport->state == SRP_RPORT_LOST) in srp_abort()
2835 scmnd->result = DID_ABORT << 16; in srp_abort()
2836 scmnd->scsi_done(scmnd); in srp_abort()
2844 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_device()
2848 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); in srp_reset_device()
2850 ch = &target->ch[0]; in srp_reset_device()
2851 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, in srp_reset_device()
2862 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_host()
2864 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); in srp_reset_host()
2866 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; in srp_reset_host()
2871 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in srp_target_alloc()
2874 if (target->target_can_queue) in srp_target_alloc()
2875 starget->can_queue = target->target_can_queue; in srp_target_alloc()
2881 struct Scsi_Host *shost = sdev->host; in srp_slave_configure()
2883 struct request_queue *q = sdev->request_queue; in srp_slave_configure()
2886 if (sdev->type == TYPE_DISK) { in srp_slave_configure()
2887 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); in srp_slave_configure()
2899 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); in show_id_ext()
2907 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); in show_ioc_guid()
2915 if (target->using_rdma_cm) in show_service_id()
2916 return -ENOENT; in show_service_id()
2918 be64_to_cpu(target->ib_cm.service_id)); in show_service_id()
2926 if (target->using_rdma_cm) in show_pkey()
2927 return -ENOENT; in show_pkey()
2928 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); in show_pkey()
2936 return sprintf(buf, "%pI6\n", target->sgid.raw); in show_sgid()
2943 struct srp_rdma_ch *ch = &target->ch[0]; in show_dgid()
2945 if (target->using_rdma_cm) in show_dgid()
2946 return -ENOENT; in show_dgid()
2947 return sprintf(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); in show_dgid()
2955 if (target->using_rdma_cm) in show_orig_dgid()
2956 return -ENOENT; in show_orig_dgid()
2957 return sprintf(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); in show_orig_dgid()
2967 for (i = 0; i < target->ch_count; i++) { in show_req_lim()
2968 ch = &target->ch[i]; in show_req_lim()
2969 req_lim = min(req_lim, ch->req_lim); in show_req_lim()
2979 return sprintf(buf, "%d\n", target->zero_req_lim); in show_zero_req_lim()
2987 return sprintf(buf, "%d\n", target->srp_host->port); in show_local_ib_port()
2996 dev_name(&target->srp_host->srp_dev->dev->dev)); in show_local_ib_device()
3004 return sprintf(buf, "%d\n", target->ch_count); in show_ch_count()
3012 return sprintf(buf, "%d\n", target->comp_vector); in show_comp_vector()
3020 return sprintf(buf, "%d\n", target->tl_retry_count); in show_tl_retry_count()
3028 return sprintf(buf, "%u\n", target->cmd_sg_cnt); in show_cmd_sg_entries()
3036 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); in show_allow_ext_sg()
3092 .this_id = -1,
3112 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3114 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3121 target->state = SRP_TARGET_SCANNING; in srp_add_target()
3122 sprintf(target->target_name, "SRP.T10:%016llX", in srp_add_target()
3123 be64_to_cpu(target->id_ext)); in srp_add_target()
3125 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) in srp_add_target()
3126 return -ENODEV; in srp_add_target()
3128 memcpy(ids.port_id, &target->id_ext, 8); in srp_add_target()
3129 memcpy(ids.port_id + 8, &target->ioc_guid, 8); in srp_add_target()
3131 rport = srp_rport_add(target->scsi_host, &ids); in srp_add_target()
3133 scsi_remove_host(target->scsi_host); in srp_add_target()
3137 rport->lld_data = target; in srp_add_target()
3138 target->rport = rport; in srp_add_target()
3140 spin_lock(&host->target_lock); in srp_add_target()
3141 list_add_tail(&target->list, &host->target_list); in srp_add_target()
3142 spin_unlock(&host->target_lock); in srp_add_target()
3144 scsi_scan_target(&target->scsi_host->shost_gendev, in srp_add_target()
3145 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); in srp_add_target()
3147 if (srp_connected_ch(target) < target->ch_count || in srp_add_target()
3148 target->qp_in_error) { in srp_add_target()
3149 shost_printk(KERN_INFO, target->scsi_host, in srp_add_target()
3150 PFX "SCSI scan failed - removing SCSI host\n"); in srp_add_target()
3155 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", in srp_add_target()
3156 dev_name(&target->scsi_host->shost_gendev), in srp_add_target()
3157 srp_sdev_count(target->scsi_host)); in srp_add_target()
3159 spin_lock_irq(&target->lock); in srp_add_target()
3160 if (target->state == SRP_TARGET_SCANNING) in srp_add_target()
3161 target->state = SRP_TARGET_LIVE; in srp_add_target()
3162 spin_unlock_irq(&target->lock); in srp_add_target()
3173 complete(&host->released); in srp_release_dev()
3182 * srp_conn_unique() - check whether the connection to a target is unique
3192 if (target->state == SRP_TARGET_REMOVED) in srp_conn_unique()
3197 spin_lock(&host->target_lock); in srp_conn_unique()
3198 list_for_each_entry(t, &host->target_list, list) { in srp_conn_unique()
3200 target->id_ext == t->id_ext && in srp_conn_unique()
3201 target->ioc_guid == t->ioc_guid && in srp_conn_unique()
3202 target->initiator_ext == t->initiator_ext) { in srp_conn_unique()
3207 spin_unlock(&host->target_lock); in srp_conn_unique()
3284 * srp_parse_in - parse an IP address and port number combination
3291 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3292 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3302 return -ENOMEM; in srp_parse_in()
3312 addr_end = addr + strlen(addr) - 1; in srp_parse_in()
3320 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa); in srp_parse_in()
3334 int ret = -EINVAL; in srp_parse_options()
3339 return -ENOMEM; in srp_parse_options()
3353 ret = -ENOMEM; in srp_parse_options()
3362 target->id_ext = cpu_to_be64(ull); in srp_parse_options()
3369 ret = -ENOMEM; in srp_parse_options()
3378 target->ioc_guid = cpu_to_be64(ull); in srp_parse_options()
3385 ret = -ENOMEM; in srp_parse_options()
3394 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); in srp_parse_options()
3405 target->ib_cm.pkey = cpu_to_be16(token); in srp_parse_options()
3411 ret = -ENOMEM; in srp_parse_options()
3420 target->ib_cm.service_id = cpu_to_be64(ull); in srp_parse_options()
3427 ret = -ENOMEM; in srp_parse_options()
3430 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, in srp_parse_options()
3437 target->rdma_cm.src_specified = true; in srp_parse_options()
3444 ret = -ENOMEM; in srp_parse_options()
3447 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, in srp_parse_options()
3450 ret = -EINVAL; in srp_parse_options()
3456 target->using_rdma_cm = true; in srp_parse_options()
3465 target->scsi_host->max_sectors = token; in srp_parse_options()
3473 target->scsi_host->can_queue = token; in srp_parse_options()
3474 target->queue_size = token + SRP_RSP_SQ_SIZE + in srp_parse_options()
3477 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3486 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3495 target->target_can_queue = token; in srp_parse_options()
3510 target->io_class = token; in srp_parse_options()
3516 ret = -ENOMEM; in srp_parse_options()
3525 target->initiator_ext = cpu_to_be64(ull); in srp_parse_options()
3535 target->cmd_sg_cnt = token; in srp_parse_options()
3543 target->allow_ext_sg = !!token; in srp_parse_options()
3553 target->sg_tablesize = token; in srp_parse_options()
3561 target->comp_vector = token; in srp_parse_options()
3570 target->tl_retry_count = token; in srp_parse_options()
3578 target->max_it_iu_size = token; in srp_parse_options()
3586 target->ch_count = token; in srp_parse_options()
3605 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue in srp_parse_options()
3608 target->scsi_host->cmd_per_lun, in srp_parse_options()
3609 target->scsi_host->can_queue); in srp_parse_options()
3625 struct srp_device *srp_dev = host->srp_dev; in srp_create_target()
3626 struct ib_device *ibdev = srp_dev->dev; in srp_create_target()
3635 return -ENOMEM; in srp_create_target()
3637 target_host->transportt = ib_srp_transport_template; in srp_create_target()
3638 target_host->max_channel = 0; in srp_create_target()
3639 target_host->max_id = 1; in srp_create_target()
3640 target_host->max_lun = -1LL; in srp_create_target()
3641 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; in srp_create_target()
3642 target_host->max_segment_size = ib_dma_max_seg_size(ibdev); in srp_create_target()
3644 if (!(ibdev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) in srp_create_target()
3645 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; in srp_create_target()
3649 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); in srp_create_target()
3650 target->io_class = SRP_REV16A_IB_IO_CLASS; in srp_create_target()
3651 target->scsi_host = target_host; in srp_create_target()
3652 target->srp_host = host; in srp_create_target()
3653 target->lkey = host->srp_dev->pd->local_dma_lkey; in srp_create_target()
3654 target->global_rkey = host->srp_dev->global_rkey; in srp_create_target()
3655 target->cmd_sg_cnt = cmd_sg_entries; in srp_create_target()
3656 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; in srp_create_target()
3657 target->allow_ext_sg = allow_ext_sg; in srp_create_target()
3658 target->tl_retry_count = 7; in srp_create_target()
3659 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; in srp_create_target()
3665 scsi_host_get(target->scsi_host); in srp_create_target()
3667 ret = mutex_lock_interruptible(&host->add_target_mutex); in srp_create_target()
3671 ret = srp_parse_options(target->net, buf, target); in srp_create_target()
3675 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE; in srp_create_target()
3677 if (!srp_conn_unique(target->srp_host, target)) { in srp_create_target()
3678 if (target->using_rdma_cm) { in srp_create_target()
3679 shost_printk(KERN_INFO, target->scsi_host, in srp_create_target()
3681 be64_to_cpu(target->id_ext), in srp_create_target()
3682 be64_to_cpu(target->ioc_guid), in srp_create_target()
3683 &target->rdma_cm.dst); in srp_create_target()
3685 shost_printk(KERN_INFO, target->scsi_host, in srp_create_target()
3687 be64_to_cpu(target->id_ext), in srp_create_target()
3688 be64_to_cpu(target->ioc_guid), in srp_create_target()
3689 be64_to_cpu(target->initiator_ext)); in srp_create_target()
3691 ret = -EEXIST; in srp_create_target()
3695 if (!srp_dev->has_fr && !target->allow_ext_sg && in srp_create_target()
3696 target->cmd_sg_cnt < target->sg_tablesize) { in srp_create_target()
3698 target->sg_tablesize = target->cmd_sg_cnt; in srp_create_target()
3701 if (srp_dev->use_fast_reg) { in srp_create_target()
3702 bool gaps_reg = (ibdev->attrs.device_cap_flags & in srp_create_target()
3705 max_sectors_per_mr = srp_dev->max_pages_per_mr << in srp_create_target()
3706 (ilog2(srp_dev->mr_page_size) - 9); in srp_create_target()
3722 (target->scsi_host->max_sectors + 1 + in srp_create_target()
3723 max_sectors_per_mr - 1) / max_sectors_per_mr; in srp_create_target()
3726 (target->sg_tablesize + in srp_create_target()
3727 srp_dev->max_pages_per_mr - 1) / in srp_create_target()
3728 srp_dev->max_pages_per_mr; in srp_create_target()
3731 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, in srp_create_target()
3735 target_host->sg_tablesize = target->sg_tablesize; in srp_create_target()
3736 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; in srp_create_target()
3737 target->mr_per_cmd = mr_per_cmd; in srp_create_target()
3738 target->indirect_size = target->sg_tablesize * in srp_create_target()
3740 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_create_target()
3742 target->max_it_iu_size); in srp_create_target()
3744 INIT_WORK(&target->tl_err_work, srp_tl_err_work); in srp_create_target()
3745 INIT_WORK(&target->remove_work, srp_remove_work); in srp_create_target()
3746 spin_lock_init(&target->lock); in srp_create_target()
3747 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); in srp_create_target()
3751 ret = -ENOMEM; in srp_create_target()
3752 if (target->ch_count == 0) in srp_create_target()
3753 target->ch_count = in srp_create_target()
3757 ibdev->num_comp_vectors), in srp_create_target()
3759 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), in srp_create_target()
3761 if (!target->ch) in srp_create_target()
3766 const int ch_start = (node_idx * target->ch_count / in srp_create_target()
3768 const int ch_end = ((node_idx + 1) * target->ch_count / in srp_create_target()
3770 const int cv_start = node_idx * ibdev->num_comp_vectors / in srp_create_target()
3772 const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors / in srp_create_target()
3781 ch = &target->ch[ch_start + cpu_idx]; in srp_create_target()
3782 ch->target = target; in srp_create_target()
3783 ch->comp_vector = cv_start == cv_end ? cv_start : in srp_create_target()
3784 cv_start + cpu_idx % (cv_end - cv_start); in srp_create_target()
3785 spin_lock_init(&ch->lock); in srp_create_target()
3786 INIT_LIST_HEAD(&ch->free_tx); in srp_create_target()
3803 if (target->using_rdma_cm) in srp_create_target()
3805 &target->rdma_cm.dst); in srp_create_target()
3808 target->ib_cm.orig_dgid.raw); in srp_create_target()
3809 shost_printk(KERN_ERR, target->scsi_host, in srp_create_target()
3812 target->ch_count, dst); in srp_create_target()
3818 target->ch_count = ch - target->ch; in srp_create_target()
3830 target->scsi_host->nr_hw_queues = target->ch_count; in srp_create_target()
3836 if (target->state != SRP_TARGET_REMOVED) { in srp_create_target()
3837 if (target->using_rdma_cm) { in srp_create_target()
3838 shost_printk(KERN_DEBUG, target->scsi_host, PFX in srp_create_target()
3840 be64_to_cpu(target->id_ext), in srp_create_target()
3841 be64_to_cpu(target->ioc_guid), in srp_create_target()
3842 target->sgid.raw, &target->rdma_cm.dst); in srp_create_target()
3844 shost_printk(KERN_DEBUG, target->scsi_host, PFX in srp_create_target()
3846 be64_to_cpu(target->id_ext), in srp_create_target()
3847 be64_to_cpu(target->ioc_guid), in srp_create_target()
3848 be16_to_cpu(target->ib_cm.pkey), in srp_create_target()
3849 be64_to_cpu(target->ib_cm.service_id), in srp_create_target()
3850 target->sgid.raw, in srp_create_target()
3851 target->ib_cm.orig_dgid.raw); in srp_create_target()
3858 mutex_unlock(&host->add_target_mutex); in srp_create_target()
3861 scsi_host_put(target->scsi_host); in srp_create_target()
3868 if (target->state != SRP_TARGET_REMOVED) in srp_create_target()
3869 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in srp_create_target()
3870 scsi_host_put(target->scsi_host); in srp_create_target()
3879 for (i = 0; i < target->ch_count; i++) { in srp_create_target()
3880 ch = &target->ch[i]; in srp_create_target()
3885 kfree(target->ch); in srp_create_target()
3896 return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev)); in show_ibdev()
3906 return sprintf(buf, "%d\n", host->port); in show_port()
3919 INIT_LIST_HEAD(&host->target_list); in srp_add_port()
3920 spin_lock_init(&host->target_lock); in srp_add_port()
3921 init_completion(&host->released); in srp_add_port()
3922 mutex_init(&host->add_target_mutex); in srp_add_port()
3923 host->srp_dev = device; in srp_add_port()
3924 host->port = port; in srp_add_port()
3926 host->dev.class = &srp_class; in srp_add_port()
3927 host->dev.parent = device->dev->dev.parent; in srp_add_port()
3928 dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev), in srp_add_port()
3931 if (device_register(&host->dev)) in srp_add_port()
3933 if (device_create_file(&host->dev, &dev_attr_add_target)) in srp_add_port()
3935 if (device_create_file(&host->dev, &dev_attr_ibdev)) in srp_add_port()
3937 if (device_create_file(&host->dev, &dev_attr_port)) in srp_add_port()
3943 device_unregister(&host->dev); in srp_add_port()
3956 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_rename_dev()
3959 snprintf(name, sizeof(name), "srp-%s-%d", in srp_rename_dev()
3960 dev_name(&device->dev), host->port); in srp_rename_dev()
3961 device_rename(&host->dev, name); in srp_rename_dev()
3968 struct ib_device_attr *attr = &device->attrs; in srp_add_one()
3977 return -ENOMEM; in srp_add_one()
3984 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); in srp_add_one()
3985 srp_dev->mr_page_size = 1 << mr_page_shift; in srp_add_one()
3986 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); in srp_add_one()
3987 max_pages_per_mr = attr->max_mr_size; in srp_add_one()
3988 do_div(max_pages_per_mr, srp_dev->mr_page_size); in srp_add_one()
3990 attr->max_mr_size, srp_dev->mr_page_size, in srp_add_one()
3992 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, in srp_add_one()
3995 srp_dev->has_fr = (attr->device_cap_flags & in srp_add_one()
3997 if (!never_register && !srp_dev->has_fr) in srp_add_one()
3998 dev_warn(&device->dev, "FR is not supported\n"); in srp_add_one()
4000 attr->max_mr_size >= 2 * srp_dev->mr_page_size) in srp_add_one()
4001 srp_dev->use_fast_reg = srp_dev->has_fr; in srp_add_one()
4003 if (never_register || !register_always || !srp_dev->has_fr) in srp_add_one()
4006 if (srp_dev->use_fast_reg) { in srp_add_one()
4007 srp_dev->max_pages_per_mr = in srp_add_one()
4008 min_t(u32, srp_dev->max_pages_per_mr, in srp_add_one()
4009 attr->max_fast_reg_page_list_len); in srp_add_one()
4011 srp_dev->mr_max_size = srp_dev->mr_page_size * in srp_add_one()
4012 srp_dev->max_pages_per_mr; in srp_add_one()
4013 …pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len … in srp_add_one()
4014 dev_name(&device->dev), mr_page_shift, attr->max_mr_size, in srp_add_one()
4015 attr->max_fast_reg_page_list_len, in srp_add_one()
4016 srp_dev->max_pages_per_mr, srp_dev->mr_max_size); in srp_add_one()
4018 INIT_LIST_HEAD(&srp_dev->dev_list); in srp_add_one()
4020 srp_dev->dev = device; in srp_add_one()
4021 srp_dev->pd = ib_alloc_pd(device, flags); in srp_add_one()
4022 if (IS_ERR(srp_dev->pd)) { in srp_add_one()
4023 int ret = PTR_ERR(srp_dev->pd); in srp_add_one()
4030 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; in srp_add_one()
4031 WARN_ON_ONCE(srp_dev->global_rkey == 0); in srp_add_one()
4037 list_add_tail(&host->list, &srp_dev->dev_list); in srp_add_one()
4052 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_remove_one()
4053 device_unregister(&host->dev); in srp_remove_one()
4058 wait_for_completion(&host->released); in srp_remove_one()
4063 spin_lock(&host->target_lock); in srp_remove_one()
4064 list_for_each_entry(target, &host->target_list, list) in srp_remove_one()
4066 spin_unlock(&host->target_lock); in srp_remove_one()
4077 ib_dealloc_pd(srp_dev->pd); in srp_remove_one()
4132 ret = -ENOMEM; in srp_init_module()
4136 ret = -ENOMEM; in srp_init_module()