Lines Matching +full:dont +full:- +full:validate

1 // SPDX-License-Identifier: GPL-2.0-only
36 * -------------
38 * Define a shared-memory interface for LIO to pass SCSI commands and
40 * are too complex for in-kernel support to be possible.
42 * It uses the UIO framework to do a lot of the device-creation and
48 * internal to the mmap-ed area. There is separate space outside the
68 #define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF)
77 #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT))
78 #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT))
212 * mutex_lock(&tcmu_dev->cmdr_lock);
213 * mutex_unlock(&tcmu_dev->cmdr_lock);
236 return -EINVAL; in tcmu_set_global_max_data_area()
240 return -EINVAL; in tcmu_set_global_max_data_area()
288 return -EINVAL; in tcmu_set_block_netlink()
305 struct tcmu_dev *udev = nl_cmd->udev; in tcmu_fail_netlink_cmd()
309 return -EBUSY; in tcmu_fail_netlink_cmd()
312 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { in tcmu_fail_netlink_cmd()
313 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); in tcmu_fail_netlink_cmd()
314 nl_cmd->status = -EINTR; in tcmu_fail_netlink_cmd()
315 list_del(&nl_cmd->nl_list); in tcmu_fail_netlink_cmd()
316 complete(&nl_cmd->complete); in tcmu_fail_netlink_cmd()
334 return -EINVAL; in tcmu_set_reset_netlink()
378 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || in tcmu_genl_cmd_done()
379 !info->attrs[TCMU_ATTR_DEVICE_ID]) { in tcmu_genl_cmd_done()
381 return -EINVAL; in tcmu_genl_cmd_done()
384 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); in tcmu_genl_cmd_done()
385 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); in tcmu_genl_cmd_done()
389 if (nl_cmd->udev->se_dev.dev_index == dev_id) { in tcmu_genl_cmd_done()
390 udev = nl_cmd->udev; in tcmu_genl_cmd_done()
398 ret = -ENODEV; in tcmu_genl_cmd_done()
401 list_del(&nl_cmd->nl_list); in tcmu_genl_cmd_done()
404 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, in tcmu_genl_cmd_done()
405 nl_cmd->status); in tcmu_genl_cmd_done()
407 if (nl_cmd->cmd != completed_cmd) { in tcmu_genl_cmd_done()
409 udev->name, completed_cmd, nl_cmd->cmd); in tcmu_genl_cmd_done()
410 ret = -EINVAL; in tcmu_genl_cmd_done()
414 nl_cmd->status = rc; in tcmu_genl_cmd_done()
415 complete(&nl_cmd->complete); in tcmu_genl_cmd_done()
439 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { in tcmu_genl_set_features()
441 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); in tcmu_genl_set_features()
452 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
458 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
464 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
470 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
480 .name = "TCM-USER",
492 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
494 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index))
495 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++])
499 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in tcmu_cmd_free_data()
503 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); in tcmu_cmd_free_data()
510 XA_STATE(xas, &udev->data_pages, 0); in tcmu_get_empty_block()
515 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); in tcmu_get_empty_block()
516 if (dbi == udev->dbi_thresh) in tcmu_get_empty_block()
517 return -1; in tcmu_get_empty_block()
519 dpi = dbi * udev->data_pages_per_blk; in tcmu_get_empty_block()
533 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { in tcmu_get_empty_block()
538 if (atomic_add_return(i - cnt, &global_page_count) > in tcmu_get_empty_block()
542 if (i && dbi > udev->dbi_max) in tcmu_get_empty_block()
543 udev->dbi_max = dbi; in tcmu_get_empty_block()
545 set_bit(dbi, udev->data_bitmap); in tcmu_get_empty_block()
551 return i == page_cnt ? dbi : -1; in tcmu_get_empty_block()
558 int dbi = -2; in tcmu_get_empty_blocks()
560 uint32_t blk_size = udev->data_blk_size; in tcmu_get_empty_blocks()
562 for (; length > 0; length -= blk_size) { in tcmu_get_empty_blocks()
567 return -1; in tcmu_get_empty_blocks()
574 kfree(tcmu_cmd->dbi); in tcmu_free_cmd()
581 struct se_cmd *se_cmd = cmd->se_cmd; in tcmu_cmd_set_block_cnts()
582 uint32_t blk_size = cmd->tcmu_dev->data_blk_size; in tcmu_cmd_set_block_cnts()
584 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size); in tcmu_cmd_set_block_cnts()
586 if (se_cmd->se_cmd_flags & SCF_BIDI) { in tcmu_cmd_set_block_cnts()
587 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); in tcmu_cmd_set_block_cnts()
588 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) in tcmu_cmd_set_block_cnts()
589 len += se_cmd->t_bidi_data_sg[i].length; in tcmu_cmd_set_block_cnts()
590 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size); in tcmu_cmd_set_block_cnts()
591 cmd->dbi_cnt += cmd->dbi_bidi_cnt; in tcmu_cmd_set_block_cnts()
592 cmd->data_len_bidi = len; in tcmu_cmd_set_block_cnts()
602 /* Do not add more than udev->data_blk_size to iov */ in new_block_to_iov()
603 len = min_t(int, len, udev->data_blk_size); in new_block_to_iov()
614 (*iov)->iov_base = (void __user *) in new_block_to_iov()
615 (udev->data_off + dbi * udev->data_blk_size); in new_block_to_iov()
617 (*iov)->iov_len += len; in new_block_to_iov()
626 int dbi = -2; in tcmu_setup_iovs()
629 for (; data_length > 0; data_length -= udev->data_blk_size) in tcmu_setup_iovs()
635 struct se_device *se_dev = se_cmd->se_dev; in tcmu_alloc_cmd()
643 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); in tcmu_alloc_cmd()
644 tcmu_cmd->se_cmd = se_cmd; in tcmu_alloc_cmd()
645 tcmu_cmd->tcmu_dev = udev; in tcmu_alloc_cmd()
648 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), in tcmu_alloc_cmd()
650 if (!tcmu_cmd->dbi) { in tcmu_alloc_cmd()
661 void *start = vaddr - offset; in tcmu_flush_dcache_range()
668 size -= PAGE_SIZE; in tcmu_flush_dcache_range()
678 int diff = head - tail; in spc_used()
689 return (size - spc_used(head, tail, size) - 1); in spc_free()
694 return size - head; in head_to_end()
708 int dbi = -2; in tcmu_copy_data()
730 if (page_cnt > udev->data_pages_per_blk) in tcmu_copy_data()
731 page_cnt = udev->data_pages_per_blk; in tcmu_copy_data()
733 dpi = dbi * udev->data_pages_per_blk; in tcmu_copy_data()
736 page = xa_load(&udev->data_pages, dpi); in tcmu_copy_data()
755 PAGE_SIZE - page_remaining; in tcmu_copy_data()
761 data_len -= cp_len; in tcmu_copy_data()
762 page_remaining -= cp_len; in tcmu_copy_data()
777 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; in scatter_data_area()
779 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, in scatter_data_area()
780 se_cmd->t_data_nents, iov, se_cmd->data_length); in scatter_data_area()
786 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; in gather_data_area()
791 data_sg = se_cmd->t_data_sg; in gather_data_area()
792 data_nents = se_cmd->t_data_nents; in gather_data_area()
795 * For bidi case, the first count blocks are for Data-Out in gather_data_area()
796 * buffer blocks, and before gathering the Data-In buffer in gather_data_area()
797 * the Data-Out buffer blocks should be skipped. in gather_data_area()
800 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); in gather_data_area()
802 data_sg = se_cmd->t_bidi_data_sg; in gather_data_area()
803 data_nents = se_cmd->t_bidi_data_nents; in gather_data_area()
812 return thresh - bitmap_weight(bitmap, thresh); in spc_bitmap_free()
822 struct tcmu_mailbox *mb = udev->mb_addr; in is_ring_space_avail()
828 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in is_ring_space_avail()
831 * If cmd end-of-ring space is too small then we need space for a NOP plus in is_ring_space_avail()
832 * original cmd - cmds are internally contiguous. in is_ring_space_avail()
834 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) in is_ring_space_avail()
837 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); in is_ring_space_avail()
839 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
842 udev->cmdr_last_cleaned, udev->cmdr_size); in is_ring_space_avail()
850 * Returns -1 on error (not enough space) or number of needed iovs on success
859 if (!cmd->dbi_cnt) in tcmu_alloc_data_space()
863 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); in tcmu_alloc_data_space()
864 if (space < cmd->dbi_cnt) { in tcmu_alloc_data_space()
866 (udev->max_blocks - udev->dbi_thresh) + space; in tcmu_alloc_data_space()
868 if (blocks_left < cmd->dbi_cnt) { in tcmu_alloc_data_space()
870 blocks_left * udev->data_blk_size, in tcmu_alloc_data_space()
871 cmd->dbi_cnt * udev->data_blk_size); in tcmu_alloc_data_space()
872 return -1; in tcmu_alloc_data_space()
875 udev->dbi_thresh += cmd->dbi_cnt; in tcmu_alloc_data_space()
876 if (udev->dbi_thresh > udev->max_blocks) in tcmu_alloc_data_space()
877 udev->dbi_thresh = udev->max_blocks; in tcmu_alloc_data_space()
880 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); in tcmu_alloc_data_space()
882 return -1; in tcmu_alloc_data_space()
884 if (cmd->dbi_bidi_cnt) { in tcmu_alloc_data_space()
885 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); in tcmu_alloc_data_space()
887 return -1; in tcmu_alloc_data_space()
903 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; in tcmu_cmd_get_cmd_size()
907 round_up(scsi_command_size(se_cmd->t_task_cdb), in tcmu_cmd_get_cmd_size()
910 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); in tcmu_cmd_get_cmd_size()
921 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); in tcmu_setup_cmd_timer()
923 mod_timer(timer, tcmu_cmd->deadline); in tcmu_setup_cmd_timer()
926 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); in tcmu_setup_cmd_timer()
931 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in add_to_qfull_queue()
938 if (!udev->qfull_time_out) in add_to_qfull_queue()
939 return -ETIMEDOUT; in add_to_qfull_queue()
940 else if (udev->qfull_time_out > 0) in add_to_qfull_queue()
941 tmo = udev->qfull_time_out; in add_to_qfull_queue()
942 else if (udev->cmd_time_out) in add_to_qfull_queue()
943 tmo = udev->cmd_time_out; in add_to_qfull_queue()
947 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); in add_to_qfull_queue()
949 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); in add_to_qfull_queue()
951 tcmu_cmd, udev->name); in add_to_qfull_queue()
958 struct tcmu_mailbox *mb = udev->mb_addr; in ring_insert_padding()
959 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
961 /* Insert a PAD if end-of-ring space is too small */ in ring_insert_padding()
962 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { in ring_insert_padding()
963 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); in ring_insert_padding()
965 hdr = udev->cmdr + cmd_head; in ring_insert_padding()
966 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); in ring_insert_padding()
967 tcmu_hdr_set_len(&hdr->len_op, pad_size); in ring_insert_padding()
968 hdr->cmd_id = 0; /* not used for PAD */ in ring_insert_padding()
969 hdr->kflags = 0; in ring_insert_padding()
970 hdr->uflags = 0; in ring_insert_padding()
973 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); in ring_insert_padding()
976 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ in ring_insert_padding()
985 struct se_device *se_dev = se_plug->se_dev; in tcmu_unplug_device()
988 clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags); in tcmu_unplug_device()
989 uio_event_notify(&udev->uio_info); in tcmu_unplug_device()
996 if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in tcmu_plug_device()
997 return &udev->se_plug; in tcmu_plug_device()
1003 * queue_cmd_ring - queue cmd to ring or internally
1005 * @scsi_err: TCM error code if failure (-1) returned.
1008 * -1 we cannot queue internally or to the ring.
1014 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; in queue_cmd_ring()
1015 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; in queue_cmd_ring()
1017 struct tcmu_mailbox *mb = udev->mb_addr; in queue_cmd_ring()
1023 uint32_t blk_size = udev->data_blk_size; in queue_cmd_ring()
1025 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; in queue_cmd_ring()
1029 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { in queue_cmd_ring()
1031 return -1; in queue_cmd_ring()
1034 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in queue_cmd_ring()
1036 return -1; in queue_cmd_ring()
1039 if (!list_empty(&udev->qfull_queue)) in queue_cmd_ring()
1042 if (data_length > (size_t)udev->max_blocks * blk_size) { in queue_cmd_ring()
1044 data_length, (size_t)udev->max_blocks * blk_size); in queue_cmd_ring()
1046 return -1; in queue_cmd_ring()
1060 if (command_size > (udev->cmdr_size / 2)) { in queue_cmd_ring()
1062 command_size, udev->cmdr_size); in queue_cmd_ring()
1063 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); in queue_cmd_ring()
1065 return -1; in queue_cmd_ring()
1075 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), in queue_cmd_ring()
1079 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); in queue_cmd_ring()
1081 return -1; in queue_cmd_ring()
1083 tcmu_cmd->cmd_id = cmd_id; in queue_cmd_ring()
1085 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, in queue_cmd_ring()
1086 tcmu_cmd, udev->name); in queue_cmd_ring()
1090 entry = udev->cmdr + cmd_head; in queue_cmd_ring()
1092 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); in queue_cmd_ring()
1096 iov = &entry->req.iov[0]; in queue_cmd_ring()
1098 if (se_cmd->data_direction == DMA_TO_DEVICE || in queue_cmd_ring()
1099 se_cmd->se_cmd_flags & SCF_BIDI) in queue_cmd_ring()
1102 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); in queue_cmd_ring()
1104 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; in queue_cmd_ring()
1107 if (se_cmd->se_cmd_flags & SCF_BIDI) { in queue_cmd_ring()
1109 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); in queue_cmd_ring()
1110 entry->req.iov_bidi_cnt = iov_bidi_cnt; in queue_cmd_ring()
1113 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); in queue_cmd_ring()
1115 entry->hdr.cmd_id = tcmu_cmd->cmd_id; in queue_cmd_ring()
1117 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); in queue_cmd_ring()
1121 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); in queue_cmd_ring()
1122 entry->req.cdb_off = cdb_off; in queue_cmd_ring()
1125 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); in queue_cmd_ring()
1128 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); in queue_cmd_ring()
1130 if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) in queue_cmd_ring()
1131 uio_event_notify(&udev->uio_info); in queue_cmd_ring()
1136 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); in queue_cmd_ring()
1142 return -1; in queue_cmd_ring()
1149 * queue_tmr_ring - queue tmr info to ring or internally
1163 struct tcmu_mailbox *mb = udev->mb_addr; in queue_tmr_ring()
1166 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) in queue_tmr_ring()
1169 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; in queue_tmr_ring()
1172 if (!list_empty(&udev->tmr_queue) || in queue_tmr_ring()
1174 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); in queue_tmr_ring()
1176 tmr, udev->name); in queue_tmr_ring()
1182 entry = udev->cmdr + cmd_head; in queue_tmr_ring()
1184 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); in queue_tmr_ring()
1185 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); in queue_tmr_ring()
1186 entry->tmr_type = tmr->tmr_type; in queue_tmr_ring()
1187 entry->cmd_cnt = tmr->tmr_cmd_cnt; in queue_tmr_ring()
1188 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); in queue_tmr_ring()
1191 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); in queue_tmr_ring()
1194 uio_event_notify(&udev->uio_info); in queue_tmr_ring()
1205 struct se_device *se_dev = se_cmd->se_dev; in tcmu_queue_cmd()
1209 int ret = -1; in tcmu_queue_cmd()
1215 mutex_lock(&udev->cmdr_lock); in tcmu_queue_cmd()
1216 if (!(se_cmd->transport_state & CMD_T_ABORTED)) in tcmu_queue_cmd()
1221 se_cmd->priv = tcmu_cmd; in tcmu_queue_cmd()
1222 mutex_unlock(&udev->cmdr_lock); in tcmu_queue_cmd()
1233 mod_timer(timer, cmd->deadline); in tcmu_set_next_deadline()
1265 mutex_lock(&udev->cmdr_lock); in tcmu_tmr_notify()
1270 if (!se_cmd->priv) in tcmu_tmr_notify()
1272 cmd = se_cmd->priv; in tcmu_tmr_notify()
1274 if (cmd->cmd_id) { in tcmu_tmr_notify()
1279 cmd, udev->name); in tcmu_tmr_notify()
1281 list_del_init(&cmd->queue_entry); in tcmu_tmr_notify()
1283 se_cmd->priv = NULL; in tcmu_tmr_notify()
1288 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in tcmu_tmr_notify()
1290 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) in tcmu_tmr_notify()
1294 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); in tcmu_tmr_notify()
1300 tmr->tmr_type = tcmu_tmr_type(tmf); in tcmu_tmr_notify()
1301 tmr->tmr_cmd_cnt = cmd_cnt; in tcmu_tmr_notify()
1306 if (!se_cmd->priv) in tcmu_tmr_notify()
1308 cmd = se_cmd->priv; in tcmu_tmr_notify()
1309 if (cmd->cmd_id) in tcmu_tmr_notify()
1310 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; in tcmu_tmr_notify()
1317 mutex_unlock(&udev->cmdr_lock); in tcmu_tmr_notify()
1323 struct se_cmd *se_cmd = cmd->se_cmd; in tcmu_handle_completion()
1324 struct tcmu_dev *udev = cmd->tcmu_dev; in tcmu_handle_completion()
1333 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { in tcmu_handle_completion()
1337 if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { in tcmu_handle_completion()
1339 entry->hdr.cmd_id); in tcmu_handle_completion()
1340 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completion()
1345 list_del_init(&cmd->queue_entry); in tcmu_handle_completion()
1349 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { in tcmu_handle_completion()
1351 cmd->se_cmd); in tcmu_handle_completion()
1352 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; in tcmu_handle_completion()
1356 read_len = se_cmd->data_length; in tcmu_handle_completion()
1357 if (se_cmd->data_direction == DMA_FROM_DEVICE && in tcmu_handle_completion()
1358 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { in tcmu_handle_completion()
1360 if (entry->rsp.read_len < read_len) in tcmu_handle_completion()
1361 read_len = entry->rsp.read_len; in tcmu_handle_completion()
1364 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { in tcmu_handle_completion()
1365 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); in tcmu_handle_completion()
1369 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; in tcmu_handle_completion()
1371 if (se_cmd->se_cmd_flags & SCF_BIDI) { in tcmu_handle_completion()
1372 /* Get Data-In buffer before clean up */ in tcmu_handle_completion()
1374 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { in tcmu_handle_completion()
1376 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { in tcmu_handle_completion()
1378 } else if (se_cmd->data_direction != DMA_NONE) { in tcmu_handle_completion()
1380 se_cmd->data_direction); in tcmu_handle_completion()
1384 se_cmd->priv = NULL; in tcmu_handle_completion()
1387 target_complete_cmd_with_length(cmd->se_cmd, in tcmu_handle_completion()
1388 entry->rsp.scsi_status, read_len); in tcmu_handle_completion()
1390 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); in tcmu_handle_completion()
1394 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); in tcmu_handle_completion()
1405 clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); in tcmu_handle_completion()
1406 set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags); in tcmu_handle_completion()
1416 if (list_empty(&udev->tmr_queue)) in tcmu_run_tmr_queue()
1419 pr_debug("running %s's tmr queue\n", udev->name); in tcmu_run_tmr_queue()
1421 list_splice_init(&udev->tmr_queue, &tmrs); in tcmu_run_tmr_queue()
1424 list_del_init(&tmr->queue_entry); in tcmu_run_tmr_queue()
1427 tmr, udev->name); in tcmu_run_tmr_queue()
1435 list_splice_tail(&tmrs, &udev->tmr_queue); in tcmu_run_tmr_queue()
1449 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { in tcmu_handle_completions()
1454 mb = udev->mb_addr; in tcmu_handle_completions()
1457 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { in tcmu_handle_completions()
1459 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; in tcmu_handle_completions()
1466 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1467 udev->cmdr_size); in tcmu_handle_completions()
1473 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || in tcmu_handle_completions()
1474 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { in tcmu_handle_completions()
1475 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1476 tcmu_hdr_get_len(entry->hdr.len_op), in tcmu_handle_completions()
1477 udev->cmdr_size); in tcmu_handle_completions()
1480 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); in tcmu_handle_completions()
1482 keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF); in tcmu_handle_completions()
1484 cmd = xa_load(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1486 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); in tcmu_handle_completions()
1489 entry->hdr.cmd_id); in tcmu_handle_completions()
1490 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_handle_completions()
1497 UPDATE_HEAD(udev->cmdr_last_cleaned, in tcmu_handle_completions()
1498 tcmu_hdr_get_len(entry->hdr.len_op), in tcmu_handle_completions()
1499 udev->cmdr_size); in tcmu_handle_completions()
1505 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { in tcmu_handle_completions()
1512 if (udev->cmd_time_out) in tcmu_handle_completions()
1513 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); in tcmu_handle_completions()
1522 if (!time_after_eq(jiffies, cmd->deadline)) in tcmu_check_expired_ring_cmd()
1525 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); in tcmu_check_expired_ring_cmd()
1526 list_del_init(&cmd->queue_entry); in tcmu_check_expired_ring_cmd()
1527 se_cmd = cmd->se_cmd; in tcmu_check_expired_ring_cmd()
1528 se_cmd->priv = NULL; in tcmu_check_expired_ring_cmd()
1529 cmd->se_cmd = NULL; in tcmu_check_expired_ring_cmd()
1532 cmd->cmd_id, cmd->tcmu_dev->name); in tcmu_check_expired_ring_cmd()
1541 if (!time_after_eq(jiffies, cmd->deadline)) in tcmu_check_expired_queue_cmd()
1545 cmd, cmd->tcmu_dev->name); in tcmu_check_expired_queue_cmd()
1547 list_del_init(&cmd->queue_entry); in tcmu_check_expired_queue_cmd()
1548 se_cmd = cmd->se_cmd; in tcmu_check_expired_queue_cmd()
1551 se_cmd->priv = NULL; in tcmu_check_expired_queue_cmd()
1558 if (list_empty(&udev->timedout_entry)) in tcmu_device_timedout()
1559 list_add_tail(&udev->timedout_entry, &timed_out_udevs); in tcmu_device_timedout()
1569 pr_debug("%s cmd timeout has expired\n", udev->name); in tcmu_cmd_timedout()
1577 pr_debug("%s qfull timeout has expired\n", udev->name); in tcmu_qfull_timedout()
1587 return -ENOMEM; in tcmu_attach_hba()
1589 tcmu_hba->host_id = host_id; in tcmu_attach_hba()
1590 hba->hba_ptr = tcmu_hba; in tcmu_attach_hba()
1597 kfree(hba->hba_ptr); in tcmu_detach_hba()
1598 hba->hba_ptr = NULL; in tcmu_detach_hba()
1608 kref_init(&udev->kref); in tcmu_alloc_device()
1610 udev->name = kstrdup(name, GFP_KERNEL); in tcmu_alloc_device()
1611 if (!udev->name) { in tcmu_alloc_device()
1616 udev->hba = hba; in tcmu_alloc_device()
1617 udev->cmd_time_out = TCMU_TIME_OUT; in tcmu_alloc_device()
1618 udev->qfull_time_out = -1; in tcmu_alloc_device()
1620 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; in tcmu_alloc_device()
1621 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; in tcmu_alloc_device()
1622 udev->cmdr_size = CMDR_SIZE_DEF; in tcmu_alloc_device()
1623 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); in tcmu_alloc_device()
1625 mutex_init(&udev->cmdr_lock); in tcmu_alloc_device()
1627 INIT_LIST_HEAD(&udev->node); in tcmu_alloc_device()
1628 INIT_LIST_HEAD(&udev->timedout_entry); in tcmu_alloc_device()
1629 INIT_LIST_HEAD(&udev->qfull_queue); in tcmu_alloc_device()
1630 INIT_LIST_HEAD(&udev->tmr_queue); in tcmu_alloc_device()
1631 INIT_LIST_HEAD(&udev->inflight_queue); in tcmu_alloc_device()
1632 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); in tcmu_alloc_device()
1634 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); in tcmu_alloc_device()
1635 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); in tcmu_alloc_device()
1637 xa_init(&udev->data_pages); in tcmu_alloc_device()
1639 return &udev->se_dev; in tcmu_alloc_device()
1647 kfree(udev->uio_info.name); in tcmu_dev_call_rcu()
1648 kfree(udev->name); in tcmu_dev_call_rcu()
1654 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) || in tcmu_check_and_free_pending_cmd()
1655 test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { in tcmu_check_and_free_pending_cmd()
1659 return -EINVAL; in tcmu_check_and_free_pending_cmd()
1669 first = first * udev->data_pages_per_blk; in tcmu_blocks_release()
1670 last = (last + 1) * udev->data_pages_per_blk - 1; in tcmu_blocks_release()
1671 xa_for_each_range(&udev->data_pages, dpi, page, first, last) { in tcmu_blocks_release()
1672 xa_erase(&udev->data_pages, dpi); in tcmu_blocks_release()
1675 * the to-be-released pages. A race condition may occur if in tcmu_blocks_release()
1706 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { in tcmu_remove_all_queued_tmr()
1707 list_del_init(&tmr->queue_entry); in tcmu_remove_all_queued_tmr()
1715 struct se_device *dev = &udev->se_dev; in tcmu_dev_kref_release()
1720 vfree(udev->mb_addr); in tcmu_dev_kref_release()
1721 udev->mb_addr = NULL; in tcmu_dev_kref_release()
1724 if (!list_empty(&udev->timedout_entry)) in tcmu_dev_kref_release()
1725 list_del(&udev->timedout_entry); in tcmu_dev_kref_release()
1729 mutex_lock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1730 xa_for_each(&udev->commands, i, cmd) { in tcmu_dev_kref_release()
1736 if (!list_empty(&udev->qfull_queue)) in tcmu_dev_kref_release()
1738 xa_destroy(&udev->commands); in tcmu_dev_kref_release()
1741 tcmu_blocks_release(udev, 0, udev->dbi_max); in tcmu_dev_kref_release()
1742 bitmap_free(udev->data_bitmap); in tcmu_dev_kref_release()
1743 mutex_unlock(&udev->cmdr_lock); in tcmu_dev_kref_release()
1747 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); in tcmu_dev_kref_release()
1757 if (list_empty(&udev->qfull_queue)) in run_qfull_queue()
1760 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); in run_qfull_queue()
1762 list_splice_init(&udev->qfull_queue, &cmds); in run_qfull_queue()
1765 list_del_init(&tcmu_cmd->queue_entry); in run_qfull_queue()
1768 tcmu_cmd, udev->name); in run_qfull_queue()
1778 tcmu_cmd->se_cmd->priv = NULL; in run_qfull_queue()
1779 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); in run_qfull_queue()
1787 tcmu_cmd, udev->name, scsi_ret); in run_qfull_queue()
1792 tcmu_cmd->se_cmd->priv = NULL; in run_qfull_queue()
1793 target_complete_cmd(tcmu_cmd->se_cmd, in run_qfull_queue()
1802 list_splice_tail(&cmds, &udev->qfull_queue); in run_qfull_queue()
1807 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in run_qfull_queue()
1814 mutex_lock(&udev->cmdr_lock); in tcmu_irqcontrol()
1817 mutex_unlock(&udev->cmdr_lock); in tcmu_irqcontrol()
1828 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_find_mem_index()
1829 struct uio_info *info = &udev->uio_info; in tcmu_find_mem_index()
1831 if (vma->vm_pgoff < MAX_UIO_MAPS) { in tcmu_find_mem_index()
1832 if (info->mem[vma->vm_pgoff].size == 0) in tcmu_find_mem_index()
1833 return -1; in tcmu_find_mem_index()
1834 return (int)vma->vm_pgoff; in tcmu_find_mem_index()
1836 return -1; in tcmu_find_mem_index()
1843 mutex_lock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1844 page = xa_load(&udev->data_pages, dpi); in tcmu_try_get_data_page()
1848 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1857 dpi, udev->name); in tcmu_try_get_data_page()
1858 mutex_unlock(&udev->cmdr_lock); in tcmu_try_get_data_page()
1865 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_open()
1869 kref_get(&udev->kref); in tcmu_vma_open()
1874 struct tcmu_dev *udev = vma->vm_private_data; in tcmu_vma_close()
1879 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_vma_close()
1884 struct tcmu_dev *udev = vmf->vma->vm_private_data; in tcmu_vma_fault()
1885 struct uio_info *info = &udev->uio_info; in tcmu_vma_fault()
1891 int mi = tcmu_find_mem_index(vmf->vma); in tcmu_vma_fault()
1899 offset = (vmf->pgoff - mi) << PAGE_SHIFT; in tcmu_vma_fault()
1901 if (offset < udev->data_off) { in tcmu_vma_fault()
1903 addr = (void *)(unsigned long)info->mem[mi].addr + offset; in tcmu_vma_fault()
1910 dpi = (offset - udev->data_off) / PAGE_SIZE; in tcmu_vma_fault()
1917 vmf->page = page; in tcmu_vma_fault()
1932 vma->vm_ops = &tcmu_vm_ops; in tcmu_mmap()
1934 vma->vm_private_data = udev; in tcmu_mmap()
1937 if (vma_pages(vma) != udev->mmap_pages) in tcmu_mmap()
1938 return -EINVAL; in tcmu_mmap()
1950 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) in tcmu_open()
1951 return -EBUSY; in tcmu_open()
1953 udev->inode = inode; in tcmu_open()
1967 mutex_lock(&udev->cmdr_lock); in tcmu_release()
1969 xa_for_each(&udev->commands, i, cmd) { in tcmu_release()
1977 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) in tcmu_release()
1980 cmd->cmd_id, udev->name); in tcmu_release()
1983 xa_erase(&udev->commands, i); in tcmu_release()
1984 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); in tcmu_release()
1988 * We only freed data space, not ring space. Therefore we dont call in tcmu_release()
1991 if (freed && list_empty(&udev->tmr_queue)) in tcmu_release()
1994 mutex_unlock(&udev->cmdr_lock); in tcmu_release()
1996 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); in tcmu_release()
2005 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_init_genl_cmd_reply()
2010 if (udev->nl_reply_supported <= 0) in tcmu_init_genl_cmd_reply()
2018 udev->name); in tcmu_init_genl_cmd_reply()
2019 return -EAGAIN; in tcmu_init_genl_cmd_reply()
2022 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { in tcmu_init_genl_cmd_reply()
2025 nl_cmd->cmd, udev->name); in tcmu_init_genl_cmd_reply()
2026 return -EBUSY; in tcmu_init_genl_cmd_reply()
2030 nl_cmd->cmd = cmd; in tcmu_init_genl_cmd_reply()
2031 nl_cmd->udev = udev; in tcmu_init_genl_cmd_reply()
2032 init_completion(&nl_cmd->complete); in tcmu_init_genl_cmd_reply()
2033 INIT_LIST_HEAD(&nl_cmd->nl_list); in tcmu_init_genl_cmd_reply()
2035 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); in tcmu_init_genl_cmd_reply()
2043 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_destroy_genl_cmd_reply()
2048 if (udev->nl_reply_supported <= 0) in tcmu_destroy_genl_cmd_reply()
2053 list_del(&nl_cmd->nl_list); in tcmu_destroy_genl_cmd_reply()
2061 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; in tcmu_wait_genl_cmd_reply()
2067 if (udev->nl_reply_supported <= 0) in tcmu_wait_genl_cmd_reply()
2071 wait_for_completion(&nl_cmd->complete); in tcmu_wait_genl_cmd_reply()
2074 nl_cmd->cmd = TCMU_CMD_UNSPEC; in tcmu_wait_genl_cmd_reply()
2075 ret = nl_cmd->status; in tcmu_wait_genl_cmd_reply()
2087 int ret = -ENOMEM; in tcmu_netlink_event_init()
2097 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); in tcmu_netlink_event_init()
2101 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); in tcmu_netlink_event_init()
2105 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); in tcmu_netlink_event_init()
2137 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) in tcmu_netlink_event_send()
2175 struct tcmu_hba *hba = udev->hba->hba_ptr; in tcmu_update_uio_info()
2179 info = &udev->uio_info; in tcmu_update_uio_info()
2181 if (udev->dev_config[0]) in tcmu_update_uio_info()
2182 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, in tcmu_update_uio_info()
2183 udev->name, udev->dev_config); in tcmu_update_uio_info()
2185 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, in tcmu_update_uio_info()
2186 udev->name); in tcmu_update_uio_info()
2188 return -ENOMEM; in tcmu_update_uio_info()
2191 kfree(info->name); in tcmu_update_uio_info()
2192 info->name = str; in tcmu_update_uio_info()
2209 info = &udev->uio_info; in tcmu_configure_device()
2211 mutex_lock(&udev->cmdr_lock); in tcmu_configure_device()
2212 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); in tcmu_configure_device()
2213 mutex_unlock(&udev->cmdr_lock); in tcmu_configure_device()
2214 if (!udev->data_bitmap) { in tcmu_configure_device()
2215 ret = -ENOMEM; in tcmu_configure_device()
2219 mb = vzalloc(udev->cmdr_size + CMDR_OFF); in tcmu_configure_device()
2221 ret = -ENOMEM; in tcmu_configure_device()
2226 udev->mb_addr = mb; in tcmu_configure_device()
2227 udev->cmdr = (void *)mb + CMDR_OFF; in tcmu_configure_device()
2228 udev->data_off = udev->cmdr_size + CMDR_OFF; in tcmu_configure_device()
2229 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; in tcmu_configure_device()
2230 udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; in tcmu_configure_device()
2231 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; in tcmu_configure_device()
2232 udev->dbi_thresh = 0; /* Default in Idle state */ in tcmu_configure_device()
2235 mb->version = TCMU_MAILBOX_VERSION; in tcmu_configure_device()
2236 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | in tcmu_configure_device()
2240 mb->cmdr_off = CMDR_OFF; in tcmu_configure_device()
2241 mb->cmdr_size = udev->cmdr_size; in tcmu_configure_device()
2243 WARN_ON(!PAGE_ALIGNED(udev->data_off)); in tcmu_configure_device()
2246 info->version = __stringify(TCMU_MAILBOX_VERSION); in tcmu_configure_device()
2248 info->mem[0].name = "tcm-user command & data buffer"; in tcmu_configure_device()
2249 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; in tcmu_configure_device()
2250 info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; in tcmu_configure_device()
2251 info->mem[0].memtype = UIO_MEM_NONE; in tcmu_configure_device()
2253 info->irqcontrol = tcmu_irqcontrol; in tcmu_configure_device()
2254 info->irq = UIO_IRQ_CUSTOM; in tcmu_configure_device()
2256 info->mmap = tcmu_mmap; in tcmu_configure_device()
2257 info->open = tcmu_open; in tcmu_configure_device()
2258 info->release = tcmu_release; in tcmu_configure_device()
2265 if (dev->dev_attrib.hw_block_size == 0) in tcmu_configure_device()
2266 dev->dev_attrib.hw_block_size = 512; in tcmu_configure_device()
2268 if (!dev->dev_attrib.hw_max_sectors) in tcmu_configure_device()
2269 dev->dev_attrib.hw_max_sectors = 128; in tcmu_configure_device()
2270 if (!dev->dev_attrib.emulate_write_cache) in tcmu_configure_device()
2271 dev->dev_attrib.emulate_write_cache = 0; in tcmu_configure_device()
2272 dev->dev_attrib.hw_queue_depth = 128; in tcmu_configure_device()
2277 if (udev->nl_reply_supported >= 0) in tcmu_configure_device()
2278 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; in tcmu_configure_device()
2284 kref_get(&udev->kref); in tcmu_configure_device()
2291 list_add(&udev->node, &root_udev); in tcmu_configure_device()
2297 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_configure_device()
2298 uio_unregister_device(&udev->uio_info); in tcmu_configure_device()
2300 vfree(udev->mb_addr); in tcmu_configure_device()
2301 udev->mb_addr = NULL; in tcmu_configure_device()
2303 bitmap_free(udev->data_bitmap); in tcmu_configure_device()
2304 udev->data_bitmap = NULL; in tcmu_configure_device()
2306 kfree(info->name); in tcmu_configure_device()
2307 info->name = NULL; in tcmu_configure_device()
2317 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_free_device()
2324 del_timer_sync(&udev->cmd_timer); in tcmu_destroy_device()
2325 del_timer_sync(&udev->qfull_timer); in tcmu_destroy_device()
2328 list_del(&udev->node); in tcmu_destroy_device()
2333 uio_unregister_device(&udev->uio_info); in tcmu_destroy_device()
2336 kref_put(&udev->kref, tcmu_dev_kref_release); in tcmu_destroy_device()
2341 mutex_lock(&udev->cmdr_lock); in tcmu_unblock_dev()
2342 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); in tcmu_unblock_dev()
2343 mutex_unlock(&udev->cmdr_lock); in tcmu_unblock_dev()
2348 mutex_lock(&udev->cmdr_lock); in tcmu_block_dev()
2350 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev()
2359 mutex_unlock(&udev->cmdr_lock); in tcmu_block_dev()
2368 mutex_lock(&udev->cmdr_lock); in tcmu_reset_ring()
2370 xa_for_each(&udev->commands, i, cmd) { in tcmu_reset_ring()
2372 cmd->cmd_id, udev->name, in tcmu_reset_ring()
2373 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ? in tcmu_reset_ring()
2375 (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ? in tcmu_reset_ring()
2378 xa_erase(&udev->commands, i); in tcmu_reset_ring()
2379 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) && in tcmu_reset_ring()
2380 !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { in tcmu_reset_ring()
2381 WARN_ON(!cmd->se_cmd); in tcmu_reset_ring()
2382 list_del_init(&cmd->queue_entry); in tcmu_reset_ring()
2383 cmd->se_cmd->priv = NULL; in tcmu_reset_ring()
2389 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); in tcmu_reset_ring()
2392 target_complete_cmd(cmd->se_cmd, in tcmu_reset_ring()
2396 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); in tcmu_reset_ring()
2400 mb = udev->mb_addr; in tcmu_reset_ring()
2402 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, in tcmu_reset_ring()
2403 mb->cmd_tail, mb->cmd_head); in tcmu_reset_ring()
2405 udev->cmdr_last_cleaned = 0; in tcmu_reset_ring()
2406 mb->cmd_tail = 0; in tcmu_reset_ring()
2407 mb->cmd_head = 0; in tcmu_reset_ring()
2409 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); in tcmu_reset_ring()
2411 del_timer(&udev->cmd_timer); in tcmu_reset_ring()
2424 mutex_unlock(&udev->cmdr_lock); in tcmu_reset_ring()
2459 return -EINVAL; in tcmu_set_dev_attrib()
2468 uint32_t pages_per_blk = udev->data_pages_per_blk; in tcmu_set_max_blocks_param()
2478 return -EINVAL; in tcmu_set_max_blocks_param()
2488 return -EINVAL; in tcmu_set_max_blocks_param()
2491 mutex_lock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2492 if (udev->data_bitmap) { in tcmu_set_max_blocks_param()
2494 ret = -EINVAL; in tcmu_set_max_blocks_param()
2498 udev->data_area_mb = val; in tcmu_set_max_blocks_param()
2499 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; in tcmu_set_max_blocks_param()
2502 mutex_unlock(&udev->cmdr_lock); in tcmu_set_max_blocks_param()
2517 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { in tcmu_set_data_pages_per_blk()
2518 pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n", in tcmu_set_data_pages_per_blk()
2519 val, udev->data_area_mb, in tcmu_set_data_pages_per_blk()
2520 TCMU_MBS_TO_PAGES(udev->data_area_mb)); in tcmu_set_data_pages_per_blk()
2521 return -EINVAL; in tcmu_set_data_pages_per_blk()
2524 mutex_lock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2525 if (udev->data_bitmap) { in tcmu_set_data_pages_per_blk()
2527 ret = -EINVAL; in tcmu_set_data_pages_per_blk()
2531 udev->data_pages_per_blk = val; in tcmu_set_data_pages_per_blk()
2532 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; in tcmu_set_data_pages_per_blk()
2535 mutex_unlock(&udev->cmdr_lock); in tcmu_set_data_pages_per_blk()
2552 return -EINVAL; in tcmu_set_cmd_ring_size()
2555 mutex_lock(&udev->cmdr_lock); in tcmu_set_cmd_ring_size()
2556 if (udev->data_bitmap) { in tcmu_set_cmd_ring_size()
2558 ret = -EINVAL; in tcmu_set_cmd_ring_size()
2562 udev->cmdr_size = (val << 20) - CMDR_OFF; in tcmu_set_cmd_ring_size()
2566 udev->cmdr_size = CMDR_SIZE_DEF; in tcmu_set_cmd_ring_size()
2570 mutex_unlock(&udev->cmdr_lock); in tcmu_set_cmd_ring_size()
2584 return -ENOMEM; in tcmu_set_configfs_dev_params()
2595 if (match_strlcpy(udev->dev_config, &args[0], in tcmu_set_configfs_dev_params()
2597 ret = -EINVAL; in tcmu_set_configfs_dev_params()
2600 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); in tcmu_set_configfs_dev_params()
2603 ret = match_u64(&args[0], &udev->dev_size); in tcmu_set_configfs_dev_params()
2610 &(dev->dev_attrib.hw_block_size)); in tcmu_set_configfs_dev_params()
2614 &(dev->dev_attrib.hw_max_sectors)); in tcmu_set_configfs_dev_params()
2617 ret = match_int(&args[0], &udev->nl_reply_supported); in tcmu_set_configfs_dev_params()
2649 udev->dev_config[0] ? udev->dev_config : "NULL"); in tcmu_show_configfs_dev_params()
2650 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); in tcmu_show_configfs_dev_params()
2651 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); in tcmu_show_configfs_dev_params()
2652 bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk); in tcmu_show_configfs_dev_params()
2654 (udev->cmdr_size + CMDR_OFF) >> 20); in tcmu_show_configfs_dev_params()
2663 return div_u64(udev->dev_size - dev->dev_attrib.block_size, in tcmu_get_blocks()
2664 dev->dev_attrib.block_size); in tcmu_get_blocks()
2677 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_cmd_time_out_show()
2679 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); in tcmu_cmd_time_out_show()
2687 struct tcmu_dev *udev = container_of(da->da_dev, in tcmu_cmd_time_out_store()
2692 if (da->da_dev->export_count) { in tcmu_cmd_time_out_store()
2694 return -EINVAL; in tcmu_cmd_time_out_store()
2701 udev->cmd_time_out = val * MSEC_PER_SEC; in tcmu_cmd_time_out_store()
2710 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_show()
2712 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? in tcmu_qfull_time_out_show()
2713 udev->qfull_time_out : in tcmu_qfull_time_out_show()
2714 udev->qfull_time_out / MSEC_PER_SEC); in tcmu_qfull_time_out_show()
2722 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_qfull_time_out_store()
2731 udev->qfull_time_out = val * MSEC_PER_SEC; in tcmu_qfull_time_out_store()
2732 } else if (val == -1) { in tcmu_qfull_time_out_store()
2733 udev->qfull_time_out = val; in tcmu_qfull_time_out_store()
2736 return -EINVAL; in tcmu_qfull_time_out_store()
2746 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_max_data_area_mb_show()
2748 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); in tcmu_max_data_area_mb_show()
2757 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_data_pages_per_blk_show()
2759 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); in tcmu_data_pages_per_blk_show()
2767 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_cmd_ring_size_mb_show()
2770 (udev->cmdr_size + CMDR_OFF) >> 20); in tcmu_cmd_ring_size_mb_show()
2778 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_show()
2780 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); in tcmu_dev_config_show()
2809 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_config_store()
2813 if (!len || len > TCMU_CONFIG_LEN - 1) in tcmu_dev_config_store()
2814 return -EINVAL; in tcmu_dev_config_store()
2817 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_config_store()
2823 strscpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2830 strscpy(udev->dev_config, page, TCMU_CONFIG_LEN); in tcmu_dev_config_store()
2840 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_show()
2842 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); in tcmu_dev_size_show()
2870 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_dev_size_store()
2879 if (target_dev_configured(&udev->se_dev)) { in tcmu_dev_size_store()
2886 udev->dev_size = val; in tcmu_dev_size_store()
2896 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_show()
2898 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); in tcmu_nl_reply_supported_show()
2906 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_nl_reply_supported_store()
2914 udev->nl_reply_supported = val; in tcmu_nl_reply_supported_store()
2925 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); in tcmu_emulate_write_cache_show()
2952 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_emulate_write_cache_store()
2961 if (target_dev_configured(&udev->se_dev)) { in tcmu_emulate_write_cache_store()
2969 da->emulate_write_cache = val; in tcmu_emulate_write_cache_store()
2978 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_show()
2981 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); in tcmu_tmr_notification_show()
2989 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); in tcmu_tmr_notification_store()
2997 return -EINVAL; in tcmu_tmr_notification_store()
3000 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
3002 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); in tcmu_tmr_notification_store()
3014 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) in tcmu_block_dev_show()
3030 if (!target_dev_configured(&udev->se_dev)) { in tcmu_block_dev_store()
3032 return -EINVAL; in tcmu_block_dev_store()
3041 return -EINVAL; in tcmu_block_dev_store()
3062 if (!target_dev_configured(&udev->se_dev)) { in tcmu_reset_ring_store()
3064 return -EINVAL; in tcmu_reset_ring_store()
3073 return -EINVAL; in tcmu_reset_ring_store()
3092 if (!target_dev_configured(&udev->se_dev)) { in tcmu_free_kept_buf_store()
3094 return -EINVAL; in tcmu_free_kept_buf_store()
3101 mutex_lock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3104 XA_STATE(xas, &udev->commands, cmd_id); in tcmu_free_kept_buf_store()
3110 count = -EINVAL; in tcmu_free_kept_buf_store()
3114 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { in tcmu_free_kept_buf_store()
3117 count = -EINVAL; in tcmu_free_kept_buf_store()
3125 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); in tcmu_free_kept_buf_store()
3128 * We only freed data space, not ring space. Therefore we dont call in tcmu_free_kept_buf_store()
3131 if (list_empty(&udev->tmr_queue)) in tcmu_free_kept_buf_store()
3135 mutex_unlock(&udev->cmdr_lock); in tcmu_free_kept_buf_store()
3198 mutex_lock(&udev->cmdr_lock); in find_free_blocks()
3200 if (!target_dev_configured(&udev->se_dev)) { in find_free_blocks()
3201 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3210 if (!udev->dbi_thresh) { in find_free_blocks()
3211 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3215 end = udev->dbi_max + 1; in find_free_blocks()
3216 block = find_last_bit(udev->data_bitmap, end); in find_free_blocks()
3217 if (block == udev->dbi_max) { in find_free_blocks()
3222 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3226 udev->dbi_thresh = start = 0; in find_free_blocks()
3227 udev->dbi_max = 0; in find_free_blocks()
3229 udev->dbi_thresh = start = block + 1; in find_free_blocks()
3230 udev->dbi_max = block; in find_free_blocks()
3243 pages_freed = tcmu_blocks_release(udev, start, end - 1); in find_free_blocks()
3246 off = udev->data_off + (loff_t)start * udev->data_blk_size; in find_free_blocks()
3247 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); in find_free_blocks()
3249 mutex_unlock(&udev->cmdr_lock); in find_free_blocks()
3252 total_blocks_freed += end - start; in find_free_blocks()
3254 pages_freed, total_pages_freed, end - start, in find_free_blocks()
3255 total_blocks_freed, udev->name); in find_free_blocks()
3273 list_del_init(&udev->timedout_entry); in check_timedout_devices()
3276 mutex_lock(&udev->cmdr_lock); in check_timedout_devices()
3282 if (udev->cmd_time_out) { in check_timedout_devices()
3284 &udev->inflight_queue, in check_timedout_devices()
3288 tcmu_set_next_deadline(&udev->inflight_queue, in check_timedout_devices()
3289 &udev->cmd_timer); in check_timedout_devices()
3291 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, in check_timedout_devices()
3295 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); in check_timedout_devices()
3297 mutex_unlock(&udev->cmdr_lock); in check_timedout_devices()
3324 return -ENOMEM; in tcmu_module_init()
3347 ret = -ENOMEM; in tcmu_module_init()