Lines Matching full:queue
50 struct nvmet_rdma_queue *queue; member
65 struct nvmet_rdma_queue *queue; member
147 MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
157 MODULE_PARM_DESC(srq_size, "set Shared Receive Queue (SRQ) size, should >= 256 (default: 1024)");
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
212 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
217 spin_lock_irqsave(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
218 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
222 spin_unlock_irqrestore(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
230 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp()
248 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
253 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
254 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
255 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
454 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument
456 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_alloc_rsps()
457 int nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_alloc_rsps()
460 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), in nvmet_rdma_alloc_rsps()
462 if (!queue->rsps) in nvmet_rdma_alloc_rsps()
466 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
472 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
479 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
484 kfree(queue->rsps); in nvmet_rdma_alloc_rsps()
489 static void nvmet_rdma_free_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_rsps() argument
491 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_free_rsps()
492 int i, nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_free_rsps()
495 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps()
500 kfree(queue->rsps); in nvmet_rdma_free_rsps()
515 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
523 static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) in nvmet_rdma_process_wr_wait_list() argument
525 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
526 while (!list_empty(&queue->rsp_wr_wait_list)) { in nvmet_rdma_process_wr_wait_list()
530 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
534 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
536 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
539 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
543 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
638 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
657 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
672 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp() local
674 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
682 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) in nvmet_rdma_release_rsp()
683 nvmet_rdma_process_wr_wait_list(queue); in nvmet_rdma_release_rsp()
688 static void nvmet_rdma_error_comp(struct nvmet_rdma_queue *queue) in nvmet_rdma_error_comp() argument
690 if (queue->nvme_sq.ctrl) { in nvmet_rdma_error_comp()
691 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_rdma_error_comp()
696 * cleanup the queue in nvmet_rdma_error_comp()
698 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_error_comp()
706 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_send_done() local
714 nvmet_rdma_error_comp(queue); in nvmet_rdma_send_done()
722 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
743 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
745 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
759 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_read_data_done() local
763 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
773 nvmet_rdma_error_comp(queue); in nvmet_rdma_read_data_done()
792 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_write_data_done() local
793 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
800 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
810 nvmet_rdma_error_comp(queue); in nvmet_rdma_write_data_done()
867 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
956 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command() local
959 &queue->sq_wr_avail) < 0)) { in nvmet_rdma_execute_command()
960 pr_debug("IB send queue full (needed %d): queue %u cntlid %u\n", in nvmet_rdma_execute_command()
961 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
962 queue->nvme_sq.ctrl->cntlid); in nvmet_rdma_execute_command()
963 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
968 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
969 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
978 static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, in nvmet_rdma_handle_command() argument
983 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
986 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
990 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, in nvmet_rdma_handle_command()
991 &queue->nvme_sq, &nvmet_rdma_ops)) in nvmet_rdma_handle_command()
999 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
1000 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_handle_command()
1001 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
1014 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_recv_done() local
1022 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1029 nvmet_rdma_error_comp(queue); in nvmet_rdma_recv_done()
1033 cmd->queue = queue; in nvmet_rdma_recv_done()
1034 rsp = nvmet_rdma_get_rsp(queue); in nvmet_rdma_recv_done()
1041 nvmet_rdma_post_recv(queue->dev, cmd); in nvmet_rdma_recv_done()
1044 rsp->queue = queue; in nvmet_rdma_recv_done()
1048 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1051 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { in nvmet_rdma_recv_done()
1054 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1055 if (queue->state == NVMET_RDMA_Q_CONNECTING) in nvmet_rdma_recv_done()
1056 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
1059 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1063 nvmet_rdma_handle_command(queue, rsp); in nvmet_rdma_recv_done()
1261 static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_create_queue_ib() argument
1264 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_create_queue_ib()
1270 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib()
1272 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, in nvmet_rdma_create_queue_ib()
1273 queue->comp_vector, IB_POLL_WORKQUEUE); in nvmet_rdma_create_queue_ib()
1274 if (IS_ERR(queue->cq)) { in nvmet_rdma_create_queue_ib()
1275 ret = PTR_ERR(queue->cq); in nvmet_rdma_create_queue_ib()
1281 qp_attr.qp_context = queue; in nvmet_rdma_create_queue_ib()
1283 qp_attr.send_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1284 qp_attr.recv_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1288 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; in nvmet_rdma_create_queue_ib()
1289 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, in nvmet_rdma_create_queue_ib()
1291 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; in nvmet_rdma_create_queue_ib()
1295 if (queue->nsrq) { in nvmet_rdma_create_queue_ib()
1296 qp_attr.srq = queue->nsrq->srq; in nvmet_rdma_create_queue_ib()
1299 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; in nvmet_rdma_create_queue_ib()
1303 if (queue->port->pi_enable && queue->host_qid) in nvmet_rdma_create_queue_ib()
1306 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); in nvmet_rdma_create_queue_ib()
1311 queue->qp = queue->cm_id->qp; in nvmet_rdma_create_queue_ib()
1313 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); in nvmet_rdma_create_queue_ib()
1316 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, in nvmet_rdma_create_queue_ib()
1317 qp_attr.cap.max_send_wr, queue->cm_id); in nvmet_rdma_create_queue_ib()
1319 if (!queue->nsrq) { in nvmet_rdma_create_queue_ib()
1320 for (i = 0; i < queue->recv_queue_size; i++) { in nvmet_rdma_create_queue_ib()
1321 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib()
1322 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); in nvmet_rdma_create_queue_ib()
1332 rdma_destroy_qp(queue->cm_id); in nvmet_rdma_create_queue_ib()
1334 ib_cq_pool_put(queue->cq, nr_cqe + 1); in nvmet_rdma_create_queue_ib()
1338 static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) in nvmet_rdma_destroy_queue_ib() argument
1340 ib_drain_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1341 if (queue->cm_id) in nvmet_rdma_destroy_queue_ib()
1342 rdma_destroy_id(queue->cm_id); in nvmet_rdma_destroy_queue_ib()
1343 ib_destroy_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1344 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * in nvmet_rdma_destroy_queue_ib()
1345 queue->send_queue_size + 1); in nvmet_rdma_destroy_queue_ib()
1348 static void nvmet_rdma_free_queue(struct nvmet_rdma_queue *queue) in nvmet_rdma_free_queue() argument
1350 pr_debug("freeing queue %d\n", queue->idx); in nvmet_rdma_free_queue()
1352 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_free_queue()
1354 nvmet_rdma_destroy_queue_ib(queue); in nvmet_rdma_free_queue()
1355 if (!queue->nsrq) { in nvmet_rdma_free_queue()
1356 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue()
1357 queue->recv_queue_size, in nvmet_rdma_free_queue()
1358 !queue->host_qid); in nvmet_rdma_free_queue()
1360 nvmet_rdma_free_rsps(queue); in nvmet_rdma_free_queue()
1361 ida_free(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_free_queue()
1362 kfree(queue); in nvmet_rdma_free_queue()
1367 struct nvmet_rdma_queue *queue = in nvmet_rdma_release_queue_work() local
1369 struct nvmet_rdma_device *dev = queue->dev; in nvmet_rdma_release_queue_work()
1371 nvmet_rdma_free_queue(queue); in nvmet_rdma_release_queue_work()
1378 struct nvmet_rdma_queue *queue) in nvmet_rdma_parse_cm_connect_req() argument
1389 queue->host_qid = le16_to_cpu(req->qid); in nvmet_rdma_parse_cm_connect_req()
1392 * req->hsqsize corresponds to our recv queue size plus 1 in nvmet_rdma_parse_cm_connect_req()
1393 * req->hrqsize corresponds to our send queue size in nvmet_rdma_parse_cm_connect_req()
1395 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; in nvmet_rdma_parse_cm_connect_req()
1396 queue->send_queue_size = le16_to_cpu(req->hrqsize); in nvmet_rdma_parse_cm_connect_req()
1398 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) in nvmet_rdma_parse_cm_connect_req()
1427 struct nvmet_rdma_queue *queue; in nvmet_rdma_alloc_queue() local
1430 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in nvmet_rdma_alloc_queue()
1431 if (!queue) { in nvmet_rdma_alloc_queue()
1436 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1442 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); in nvmet_rdma_alloc_queue()
1450 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); in nvmet_rdma_alloc_queue()
1451 queue->dev = ndev; in nvmet_rdma_alloc_queue()
1452 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1453 queue->port = port->nport; in nvmet_rdma_alloc_queue()
1455 spin_lock_init(&queue->state_lock); in nvmet_rdma_alloc_queue()
1456 queue->state = NVMET_RDMA_Q_CONNECTING; in nvmet_rdma_alloc_queue()
1457 INIT_LIST_HEAD(&queue->rsp_wait_list); in nvmet_rdma_alloc_queue()
1458 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); in nvmet_rdma_alloc_queue()
1459 spin_lock_init(&queue->rsp_wr_wait_lock); in nvmet_rdma_alloc_queue()
1460 INIT_LIST_HEAD(&queue->free_rsps); in nvmet_rdma_alloc_queue()
1461 spin_lock_init(&queue->rsps_lock); in nvmet_rdma_alloc_queue()
1462 INIT_LIST_HEAD(&queue->queue_list); in nvmet_rdma_alloc_queue()
1464 queue->idx = ida_alloc(&nvmet_rdma_queue_ida, GFP_KERNEL); in nvmet_rdma_alloc_queue()
1465 if (queue->idx < 0) { in nvmet_rdma_alloc_queue()
1474 queue->comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_alloc_queue()
1475 queue->idx % ndev->device->num_comp_vectors; in nvmet_rdma_alloc_queue()
1478 ret = nvmet_rdma_alloc_rsps(queue); in nvmet_rdma_alloc_queue()
1485 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; in nvmet_rdma_alloc_queue()
1487 queue->cmds = nvmet_rdma_alloc_cmds(ndev, in nvmet_rdma_alloc_queue()
1488 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1489 !queue->host_qid); in nvmet_rdma_alloc_queue()
1490 if (IS_ERR(queue->cmds)) { in nvmet_rdma_alloc_queue()
1496 ret = nvmet_rdma_create_queue_ib(queue); in nvmet_rdma_alloc_queue()
1498 pr_err("%s: creating RDMA queue failed (%d).\n", in nvmet_rdma_alloc_queue()
1504 return queue; in nvmet_rdma_alloc_queue()
1507 if (!queue->nsrq) { in nvmet_rdma_alloc_queue()
1508 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_alloc_queue()
1509 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1510 !queue->host_qid); in nvmet_rdma_alloc_queue()
1513 nvmet_rdma_free_rsps(queue); in nvmet_rdma_alloc_queue()
1515 ida_free(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_alloc_queue()
1517 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1519 kfree(queue); in nvmet_rdma_alloc_queue()
1527 struct nvmet_rdma_queue *queue = priv; in nvmet_rdma_qp_event() local
1531 rdma_notify(queue->cm_id, event->event); in nvmet_rdma_qp_event()
1534 pr_debug("received last WQE reached event for queue=0x%p\n", in nvmet_rdma_qp_event()
1535 queue); in nvmet_rdma_qp_event()
1545 struct nvmet_rdma_queue *queue, in nvmet_rdma_cm_accept() argument
1555 queue->dev->device->attrs.max_qp_init_rd_atom); in nvmet_rdma_cm_accept()
1559 priv.crqsize = cpu_to_le16(queue->recv_queue_size); in nvmet_rdma_cm_accept()
1572 struct nvmet_rdma_queue *queue; in nvmet_rdma_queue_connect() local
1581 queue = nvmet_rdma_alloc_queue(ndev, cm_id, event); in nvmet_rdma_queue_connect()
1582 if (!queue) { in nvmet_rdma_queue_connect()
1587 if (queue->host_qid == 0) { in nvmet_rdma_queue_connect()
1594 if (q->nvme_sq.ctrl == queue->nvme_sq.ctrl && in nvmet_rdma_queue_connect()
1603 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); in nvmet_rdma_queue_connect()
1609 queue->cm_id = NULL; in nvmet_rdma_queue_connect()
1614 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); in nvmet_rdma_queue_connect()
1620 nvmet_rdma_free_queue(queue); in nvmet_rdma_queue_connect()
1627 static void nvmet_rdma_queue_established(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_established() argument
1631 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1632 if (queue->state != NVMET_RDMA_Q_CONNECTING) { in nvmet_rdma_queue_established()
1633 pr_warn("trying to establish a connected queue\n"); in nvmet_rdma_queue_established()
1636 queue->state = NVMET_RDMA_Q_LIVE; in nvmet_rdma_queue_established()
1638 while (!list_empty(&queue->rsp_wait_list)) { in nvmet_rdma_queue_established()
1641 cmd = list_first_entry(&queue->rsp_wait_list, in nvmet_rdma_queue_established()
1645 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1646 nvmet_rdma_handle_command(queue, cmd); in nvmet_rdma_queue_established()
1647 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1651 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1654 static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in __nvmet_rdma_queue_disconnect() argument
1659 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); in __nvmet_rdma_queue_disconnect()
1661 spin_lock_irqsave(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1662 switch (queue->state) { in __nvmet_rdma_queue_disconnect()
1664 while (!list_empty(&queue->rsp_wait_list)) { in __nvmet_rdma_queue_disconnect()
1667 rsp = list_first_entry(&queue->rsp_wait_list, in __nvmet_rdma_queue_disconnect()
1675 queue->state = NVMET_RDMA_Q_DISCONNECTING; in __nvmet_rdma_queue_disconnect()
1681 spin_unlock_irqrestore(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1684 rdma_disconnect(queue->cm_id); in __nvmet_rdma_queue_disconnect()
1685 queue_work(nvmet_wq, &queue->release_work); in __nvmet_rdma_queue_disconnect()
1689 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_disconnect() argument
1694 if (!list_empty(&queue->queue_list)) { in nvmet_rdma_queue_disconnect()
1695 list_del_init(&queue->queue_list); in nvmet_rdma_queue_disconnect()
1701 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_queue_disconnect()
1705 struct nvmet_rdma_queue *queue) in nvmet_rdma_queue_connect_fail() argument
1707 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); in nvmet_rdma_queue_connect_fail()
1710 if (!list_empty(&queue->queue_list)) in nvmet_rdma_queue_connect_fail()
1711 list_del_init(&queue->queue_list); in nvmet_rdma_queue_connect_fail()
1714 pr_err("failed to connect queue %d\n", queue->idx); in nvmet_rdma_queue_connect_fail()
1715 queue_work(nvmet_wq, &queue->release_work); in nvmet_rdma_queue_connect_fail()
1721 * @queue: nvmet rdma queue (cm id qp_context)
1725 * queue cm_id and/or a device bound listener cm_id (where in this
1726 * case queue will be null).
1734 struct nvmet_rdma_queue *queue) in nvmet_rdma_device_removal() argument
1738 if (queue) { in nvmet_rdma_device_removal()
1740 * This is a queue cm_id. we have registered in nvmet_rdma_device_removal()
1768 struct nvmet_rdma_queue *queue = NULL; in nvmet_rdma_cm_handler() local
1772 queue = cm_id->qp->qp_context; in nvmet_rdma_cm_handler()
1783 nvmet_rdma_queue_established(queue); in nvmet_rdma_cm_handler()
1786 if (!queue) { in nvmet_rdma_cm_handler()
1795 nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_cm_handler()
1798 ret = nvmet_rdma_device_removal(cm_id, queue); in nvmet_rdma_cm_handler()
1806 nvmet_rdma_queue_connect_fail(cm_id, queue); in nvmet_rdma_cm_handler()
1819 struct nvmet_rdma_queue *queue; in nvmet_rdma_delete_ctrl() local
1823 list_for_each_entry(queue, &nvmet_rdma_queue_list, queue_list) { in nvmet_rdma_delete_ctrl()
1824 if (queue->nvme_sq.ctrl == ctrl) { in nvmet_rdma_delete_ctrl()
1825 list_del_init(&queue->queue_list); in nvmet_rdma_delete_ctrl()
1828 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_delete_ctrl()
1837 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_destroy_port_queues() local
1841 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_destroy_port_queues()
1843 if (queue->port != nport) in nvmet_rdma_destroy_port_queues()
1846 list_del_init(&queue->queue_list); in nvmet_rdma_destroy_port_queues()
1847 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_destroy_port_queues()
1862 * guarantees that no new queue will be created. in nvmet_rdma_disable_port()
2000 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()
2037 struct nvmet_rdma_queue *queue, *tmp; in nvmet_rdma_remove_one() local
2058 list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list, in nvmet_rdma_remove_one()
2060 if (queue->dev->device != ib_device) in nvmet_rdma_remove_one()
2063 pr_info("Removing queue %d\n", queue->idx); in nvmet_rdma_remove_one()
2064 list_del_init(&queue->queue_list); in nvmet_rdma_remove_one()
2065 __nvmet_rdma_queue_disconnect(queue); in nvmet_rdma_remove_one()