Lines Matching +full:async +full:- +full:prefix

1 // SPDX-License-Identifier: GPL-2.0
23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan
36 #include <linux/async.h>
63 * Prefix values for the SCSI id's (stored in sysfs name field)
92 "last scsi LUN (should be between 1 and 2^64-1)");
95 #define SCSI_SCAN_TYPE_DEFAULT "async"
104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
126 * scsi_enable_async_suspend - Enable async suspend and resume
131 * If a user has disabled async probing a likely reason is due to a in scsi_enable_async_suspend()
132 * storage enclosure that does not inject staggered spin-ups. For in scsi_enable_async_suspend()
135 if (strncmp(scsi_scan_type, "async", 5) != 0) in scsi_enable_async_suspend()
142 * scsi_complete_async_scans - Wait for asynchronous scans to complete
157 * sleep a little. Even if we never get memory, the async in scsi_complete_async_scans()
165 data->shost = NULL; in scsi_complete_async_scans()
166 init_completion(&data->prev_finished); in scsi_complete_async_scans()
172 list_add_tail(&data->list, &scanning_hosts); in scsi_complete_async_scans()
176 wait_for_completion(&data->prev_finished); in scsi_complete_async_scans()
179 list_del(&data->list); in scsi_complete_async_scans()
183 complete(&next->prev_finished); in scsi_complete_async_scans()
193 * scsi_unlock_floptical - unlock device via a special MODE SENSE command
221 bool need_alloc = !sdev->budget_map.map; in scsi_realloc_sdev_budget_map()
230 * up one new default queue depth after calling ->slave_configure in scsi_realloc_sdev_budget_map()
232 if (!need_alloc && new_shift != sdev->budget_map.shift) in scsi_realloc_sdev_budget_map()
243 blk_mq_freeze_queue(sdev->request_queue); in scsi_realloc_sdev_budget_map()
244 sb_backup = sdev->budget_map; in scsi_realloc_sdev_budget_map()
246 ret = sbitmap_init_node(&sdev->budget_map, in scsi_realloc_sdev_budget_map()
249 sdev->request_queue->node, false, true); in scsi_realloc_sdev_budget_map()
251 sbitmap_resize(&sdev->budget_map, depth); in scsi_realloc_sdev_budget_map()
255 sdev->budget_map = sb_backup; in scsi_realloc_sdev_budget_map()
259 blk_mq_unfreeze_queue(sdev->request_queue); in scsi_realloc_sdev_budget_map()
265 * scsi_alloc_sdev - allocate and setup a scsi_Device
268 * @hostdata: usually NULL and set by ->slave_alloc instead
285 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in scsi_alloc_sdev()
287 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, in scsi_alloc_sdev()
292 sdev->vendor = scsi_null_device_strs; in scsi_alloc_sdev()
293 sdev->model = scsi_null_device_strs; in scsi_alloc_sdev()
294 sdev->rev = scsi_null_device_strs; in scsi_alloc_sdev()
295 sdev->host = shost; in scsi_alloc_sdev()
296 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; in scsi_alloc_sdev()
297 sdev->id = starget->id; in scsi_alloc_sdev()
298 sdev->lun = lun; in scsi_alloc_sdev()
299 sdev->channel = starget->channel; in scsi_alloc_sdev()
300 mutex_init(&sdev->state_mutex); in scsi_alloc_sdev()
301 sdev->sdev_state = SDEV_CREATED; in scsi_alloc_sdev()
302 INIT_LIST_HEAD(&sdev->siblings); in scsi_alloc_sdev()
303 INIT_LIST_HEAD(&sdev->same_target_siblings); in scsi_alloc_sdev()
304 INIT_LIST_HEAD(&sdev->starved_entry); in scsi_alloc_sdev()
305 INIT_LIST_HEAD(&sdev->event_list); in scsi_alloc_sdev()
306 spin_lock_init(&sdev->list_lock); in scsi_alloc_sdev()
307 mutex_init(&sdev->inquiry_mutex); in scsi_alloc_sdev()
308 INIT_WORK(&sdev->event_work, scsi_evt_thread); in scsi_alloc_sdev()
309 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); in scsi_alloc_sdev()
311 sdev->sdev_gendev.parent = get_device(&starget->dev); in scsi_alloc_sdev()
312 sdev->sdev_target = starget; in scsi_alloc_sdev()
314 /* usually NULL and set by ->slave_alloc instead */ in scsi_alloc_sdev()
315 sdev->hostdata = hostdata; in scsi_alloc_sdev()
319 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; in scsi_alloc_sdev()
322 * Some low level driver could use device->type in scsi_alloc_sdev()
324 sdev->type = -1; in scsi_alloc_sdev()
331 sdev->borken = 1; in scsi_alloc_sdev()
333 sdev->sg_reserved_size = INT_MAX; in scsi_alloc_sdev()
335 q = blk_mq_init_queue(&sdev->host->tag_set); in scsi_alloc_sdev()
339 put_device(&starget->dev); in scsi_alloc_sdev()
343 kref_get(&sdev->host->tagset_refcnt); in scsi_alloc_sdev()
344 sdev->request_queue = q; in scsi_alloc_sdev()
345 q->queuedata = sdev; in scsi_alloc_sdev()
346 __scsi_init_queue(sdev->host, q); in scsi_alloc_sdev()
348 depth = sdev->host->cmd_per_lun ?: 1; in scsi_alloc_sdev()
357 put_device(&starget->dev); in scsi_alloc_sdev()
366 if (shost->hostt->slave_alloc) { in scsi_alloc_sdev()
367 ret = shost->hostt->slave_alloc(sdev); in scsi_alloc_sdev()
373 if (ret == -ENXIO) in scsi_alloc_sdev()
391 struct device *dev = &starget->dev; in scsi_target_destroy()
392 struct Scsi_Host *shost = dev_to_shost(dev->parent); in scsi_target_destroy()
395 BUG_ON(starget->state == STARGET_DEL); in scsi_target_destroy()
396 starget->state = STARGET_DEL; in scsi_target_destroy()
398 spin_lock_irqsave(shost->host_lock, flags); in scsi_target_destroy()
399 if (shost->hostt->target_destroy) in scsi_target_destroy()
400 shost->hostt->target_destroy(starget); in scsi_target_destroy()
401 list_del_init(&starget->siblings); in scsi_target_destroy()
402 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_target_destroy()
408 struct device *parent = dev->parent; in scsi_target_dev_release()
422 return dev->type == &scsi_target_type; in scsi_is_target_device()
434 list_for_each_entry(starget, &shost->__targets, siblings) { in __scsi_find_target()
435 if (starget->id == id && in __scsi_find_target()
436 starget->channel == channel) { in __scsi_find_target()
442 get_device(&found_starget->dev); in __scsi_find_target()
448 * scsi_target_reap_ref_release - remove target from visibility
466 if ((starget->state != STARGET_CREATED) && in scsi_target_reap_ref_release()
467 (starget->state != STARGET_CREATED_REMOVE)) { in scsi_target_reap_ref_release()
468 transport_remove_device(&starget->dev); in scsi_target_reap_ref_release()
469 device_del(&starget->dev); in scsi_target_reap_ref_release()
476 kref_put(&starget->reap_ref, scsi_target_reap_ref_release); in scsi_target_reap_ref_put()
480 * scsi_alloc_target - allocate a new or find an existing target
498 + shost->transportt->target_size; in scsi_alloc_target()
508 dev = &starget->dev; in scsi_alloc_target()
510 kref_init(&starget->reap_ref); in scsi_alloc_target()
511 dev->parent = get_device(parent); in scsi_alloc_target()
512 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); in scsi_alloc_target()
513 dev->bus = &scsi_bus_type; in scsi_alloc_target()
514 dev->type = &scsi_target_type; in scsi_alloc_target()
516 starget->id = id; in scsi_alloc_target()
517 starget->channel = channel; in scsi_alloc_target()
518 starget->can_queue = 0; in scsi_alloc_target()
519 INIT_LIST_HEAD(&starget->siblings); in scsi_alloc_target()
520 INIT_LIST_HEAD(&starget->devices); in scsi_alloc_target()
521 starget->state = STARGET_CREATED; in scsi_alloc_target()
522 starget->scsi_level = SCSI_2; in scsi_alloc_target()
523 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; in scsi_alloc_target()
525 spin_lock_irqsave(shost->host_lock, flags); in scsi_alloc_target()
531 list_add_tail(&starget->siblings, &shost->__targets); in scsi_alloc_target()
532 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_alloc_target()
535 if (shost->hostt->target_alloc) { in scsi_alloc_target()
536 error = shost->hostt->target_alloc(starget); in scsi_alloc_target()
539 if (error != -ENXIO) in scsi_alloc_target()
557 ref_got = kref_get_unless_zero(&found_target->reap_ref); in scsi_alloc_target()
559 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_alloc_target()
573 put_device(&found_target->dev); in scsi_alloc_target()
583 * scsi_target_reap - check to see if target is in use and destroy if not
597 BUG_ON(starget->state == STARGET_DEL); in scsi_target_reap()
602 * scsi_sanitize_inquiry_string - remove non-graphical chars from an
611 * this rule, we will replace non-graphic or non-ASCII characters
620 for (; len > 0; (--len, ++s)) { in scsi_sanitize_inquiry_string()
630 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
659 * transfer length of 36 unless sdev->inquiry_len specifies a in scsi_probe_lun()
661 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; in scsi_probe_lun()
689 * not-ready to ready transition [asc/ascq=0x28/0x0] in scsi_probe_lun()
690 * or power-on, reset [asc/ascq=0x29/0x0], continue. in scsi_probe_lun()
739 * LLD specified a maximum sdev->inquiry_len in scsi_probe_lun()
742 * devices. If a device supports SPC-4 (2014) in scsi_probe_lun()
746 else if (sdev->inquiry_len && in scsi_probe_lun()
747 response_len > sdev->inquiry_len && in scsi_probe_lun()
748 (inq_result[2] & 0x7) < 6) /* SPC-4 */ in scsi_probe_lun()
749 next_inquiry_len = sdev->inquiry_len; in scsi_probe_lun()
777 return -EIO; in scsi_probe_lun()
780 sdev->inquiry_len = min(try_inquiry_len, response_len); in scsi_probe_lun()
785 * and it would be possible to take an incorrect action - we do in scsi_probe_lun()
793 * better than copying < 36 bytes to the inquiry-result buffer in scsi_probe_lun()
797 if (sdev->inquiry_len < 36) { in scsi_probe_lun()
798 if (!sdev->host->short_inquiry) { in scsi_probe_lun()
799 shost_printk(KERN_INFO, sdev->host, in scsi_probe_lun()
801 " using 36\n", sdev->inquiry_len); in scsi_probe_lun()
802 sdev->host->short_inquiry = 1; in scsi_probe_lun()
804 sdev->inquiry_len = 36; in scsi_probe_lun()
823 * non-zero LUNs can be scanned. in scsi_probe_lun()
825 sdev->scsi_level = inq_result[2] & 0x0f; in scsi_probe_lun()
826 if (sdev->scsi_level >= 2 || in scsi_probe_lun()
827 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) in scsi_probe_lun()
828 sdev->scsi_level++; in scsi_probe_lun()
829 sdev->sdev_target->scsi_level = sdev->scsi_level; in scsi_probe_lun()
832 * If SCSI-2 or lower, and if the transport requires it, in scsi_probe_lun()
835 sdev->lun_in_cdb = 0; in scsi_probe_lun()
836 if (sdev->scsi_level <= SCSI_2 && in scsi_probe_lun()
837 sdev->scsi_level != SCSI_UNKNOWN && in scsi_probe_lun()
838 !sdev->host->no_scsi2_lun_in_cdb) in scsi_probe_lun()
839 sdev->lun_in_cdb = 1; in scsi_probe_lun()
845 * scsi_add_lun - allocate and fully initialze a scsi_device
849 * @async: 1 if this device is being scanned asynchronously
860 blist_flags_t *bflags, int async) in scsi_add_lun() argument
884 sdev->inquiry = kmemdup(inq_result, in scsi_add_lun()
885 max_t(size_t, sdev->inquiry_len, 36), in scsi_add_lun()
887 if (sdev->inquiry == NULL) in scsi_add_lun()
890 sdev->vendor = (char *) (sdev->inquiry + 8); in scsi_add_lun()
891 sdev->model = (char *) (sdev->inquiry + 16); in scsi_add_lun()
892 sdev->rev = (char *) (sdev->inquiry + 32); in scsi_add_lun()
894 if (strncmp(sdev->vendor, "ATA ", 8) == 0) { in scsi_add_lun()
901 sdev->allow_restart = 1; in scsi_add_lun()
905 sdev->type = TYPE_ROM; in scsi_add_lun()
906 sdev->removable = 1; in scsi_add_lun()
908 sdev->type = (inq_result[0] & 0x1f); in scsi_add_lun()
909 sdev->removable = (inq_result[1] & 0x80) >> 7; in scsi_add_lun()
913 * well-known logical units. Force well-known type in scsi_add_lun()
916 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { in scsi_add_lun()
918 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", in scsi_add_lun()
919 __func__, sdev->type, (unsigned int)sdev->lun); in scsi_add_lun()
920 sdev->type = TYPE_WLUN; in scsi_add_lun()
925 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { in scsi_add_lun()
926 /* RBC and MMC devices can return SCSI-3 compliance and yet in scsi_add_lun()
950 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; in scsi_add_lun()
951 sdev->lockable = sdev->removable; in scsi_add_lun()
952 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); in scsi_add_lun()
954 if (sdev->scsi_level >= SCSI_3 || in scsi_add_lun()
955 (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) in scsi_add_lun()
956 sdev->ppr = 1; in scsi_add_lun()
958 sdev->wdtr = 1; in scsi_add_lun()
960 sdev->sdtr = 1; in scsi_add_lun()
963 "ANSI: %d%s\n", scsi_device_type(sdev->type), in scsi_add_lun()
964 sdev->vendor, sdev->model, sdev->rev, in scsi_add_lun()
965 sdev->inq_periph_qual, inq_result[2] & 0x07, in scsi_add_lun()
968 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && in scsi_add_lun()
970 sdev->tagged_supported = 1; in scsi_add_lun()
971 sdev->simple_tags = 1; in scsi_add_lun()
980 sdev->borken = 0; in scsi_add_lun()
983 sdev->no_uld_attach = 1; in scsi_add_lun()
990 sdev->select_no_atn = 1; in scsi_add_lun()
997 blk_queue_max_hw_sectors(sdev->request_queue, 512); in scsi_add_lun()
1003 blk_queue_max_hw_sectors(sdev->request_queue, 1024); in scsi_add_lun()
1010 sdev->no_start_on_add = 1; in scsi_add_lun()
1013 scsi_target(sdev)->single_lun = 1; in scsi_add_lun()
1015 sdev->use_10_for_rw = 1; in scsi_add_lun()
1021 sdev->no_report_opcodes = 1; in scsi_add_lun()
1025 mutex_lock(&sdev->state_mutex); in scsi_add_lun()
1029 mutex_unlock(&sdev->state_mutex); in scsi_add_lun()
1034 scsi_device_state_name(sdev->sdev_state)); in scsi_add_lun()
1039 sdev->lockable = 0; in scsi_add_lun()
1042 sdev->retry_hwerror = 1; in scsi_add_lun()
1045 sdev->no_dif = 1; in scsi_add_lun()
1048 sdev->unmap_limit_for_ws = 1; in scsi_add_lun()
1051 sdev->ignore_media_change = 1; in scsi_add_lun()
1053 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; in scsi_add_lun()
1056 sdev->try_vpd_pages = 1; in scsi_add_lun()
1058 sdev->skip_vpd_pages = 1; in scsi_add_lun()
1061 sdev->no_vpd_size = 1; in scsi_add_lun()
1063 transport_configure_device(&sdev->sdev_gendev); in scsi_add_lun()
1065 if (sdev->host->hostt->slave_configure) { in scsi_add_lun()
1066 ret = sdev->host->hostt->slave_configure(sdev); in scsi_add_lun()
1072 if (ret != -ENXIO) { in scsi_add_lun()
1080 * The queue_depth is often changed in ->slave_configure. in scsi_add_lun()
1084 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); in scsi_add_lun()
1087 if (sdev->scsi_level >= SCSI_3) in scsi_add_lun()
1092 sdev->max_queue_depth = sdev->queue_depth; in scsi_add_lun()
1093 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth); in scsi_add_lun()
1094 sdev->sdev_bflags = *bflags; in scsi_add_lun()
1101 if (!async && scsi_sysfs_add_sdev(sdev) != 0) in scsi_add_lun()
1109 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1110 * @buf: Output buffer with at least end-first+1 bytes of space
1134 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1149 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1150 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1152 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1164 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in scsi_probe_and_add_lun()
1175 dev_name(&sdev->sdev_gendev))); in scsi_probe_and_add_lun()
1183 sdev->vendor, in scsi_probe_and_add_lun()
1184 sdev->model); in scsi_probe_and_add_lun()
1213 * logical disk configured at sdev->lun, but there in scsi_probe_and_add_lun()
1246 * 1) SCSI SPC-3, pp. 145-146 in scsi_probe_and_add_lun()
1253 * PDT=00h Direct-access device (floppy) in scsi_probe_and_add_lun()
1256 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && in scsi_probe_and_add_lun()
1266 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); in scsi_probe_and_add_lun()
1269 sdev->lockable = 0; in scsi_probe_and_add_lun()
1293 * scsi_sequential_lun_scan - sequentially scan a SCSI target
1304 * Modifies sdevscan->lun.
1312 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in scsi_sequential_lun_scan()
1317 max_dev_lun = min(max_scsi_luns, shost->max_lun); in scsi_sequential_lun_scan()
1321 * SCSI-3 devices should be scanned via the REPORT LUNS. in scsi_sequential_lun_scan()
1324 max_dev_lun = shost->max_lun; in scsi_sequential_lun_scan()
1337 * scan any SCSI_1 device for non-0 luns, but that check would best in scsi_sequential_lun_scan()
1341 if ((sdevscan->scsi_level < SCSI_1_CCS) && in scsi_sequential_lun_scan()
1351 max_dev_lun = shost->max_lun; in scsi_sequential_lun_scan()
1353 * REGAL CDC-4X: avoid hang after LUN 4 in scsi_sequential_lun_scan()
1358 * Do not scan SCSI-2 or lower device past LUN 7, unless in scsi_sequential_lun_scan()
1379 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1385 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1389 * LUNs even if it's older than SCSI-3.
1392 * If starget->no_report_luns is set, return 1 always.
1410 struct Scsi_Host *shost = dev_to_shost(&starget->dev); in scsi_report_lun_scan()
1417 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. in scsi_report_lun_scan()
1418 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does in scsi_report_lun_scan()
1424 if (starget->scsi_level < SCSI_2 && in scsi_report_lun_scan()
1425 starget->scsi_level != SCSI_UNKNOWN) in scsi_report_lun_scan()
1427 if (starget->scsi_level < SCSI_3 && in scsi_report_lun_scan()
1428 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) in scsi_report_lun_scan()
1432 if (starget->no_report_luns) in scsi_report_lun_scan()
1461 * bytes 1 - 5: reserved, set to zero. in scsi_report_lun_scan()
1466 * bytes 6 - 9: length of the command. in scsi_report_lun_scan()
1517 if (get_unaligned_be32(lun_data->scsi_lun) + in scsi_report_lun_scan()
1519 length = get_unaligned_be32(lun_data->scsi_lun) + in scsi_report_lun_scan()
1524 length = get_unaligned_be32(lun_data->scsi_lun); in scsi_report_lun_scan()
1538 if (lun > sdev->host->max_lun) { in scsi_report_lun_scan()
1575 struct scsi_device *sdev = ERR_PTR(-ENODEV); in __scsi_add_device()
1576 struct device *parent = &shost->shost_gendev; in __scsi_add_device()
1580 return ERR_PTR(-ENODEV); in __scsi_add_device()
1584 return ERR_PTR(-ENOMEM); in __scsi_add_device()
1587 mutex_lock(&shost->scan_mutex); in __scsi_add_device()
1588 if (!shost->async_scan) in __scsi_add_device()
1596 mutex_unlock(&shost->scan_mutex); in __scsi_add_device()
1603 put_device(&starget->dev); in __scsi_add_device()
1624 struct device *dev = &sdev->sdev_gendev; in scsi_rescan_device()
1633 * in the power management core code when system resume is on-going. in scsi_rescan_device()
1635 if (sdev->sdev_state != SDEV_RUNNING || in scsi_rescan_device()
1636 blk_queue_pm_only(sdev->request_queue)) { in scsi_rescan_device()
1637 ret = -EWOULDBLOCK; in scsi_rescan_device()
1644 if (sdev->handler && sdev->handler->rescan) in scsi_rescan_device()
1645 sdev->handler->rescan(sdev); in scsi_rescan_device()
1647 if (dev->driver && try_module_get(dev->driver->owner)) { in scsi_rescan_device()
1648 struct scsi_driver *drv = to_scsi_driver(dev->driver); in scsi_rescan_device()
1650 if (drv->rescan) in scsi_rescan_device()
1651 drv->rescan(dev); in scsi_rescan_device()
1652 module_put(dev->driver->owner); in scsi_rescan_device()
1670 if (shost->this_id == id) in __scsi_scan_target()
1701 starget->scsi_level, rescan); in __scsi_scan_target()
1712 put_device(&starget->dev); in __scsi_scan_target()
1716 * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1745 mutex_lock(&shost->scan_mutex); in scsi_scan_target()
1746 if (!shost->async_scan) in scsi_scan_target()
1753 mutex_unlock(&shost->scan_mutex); in scsi_scan_target()
1764 for (id = 0; id < shost->max_id; ++id) { in scsi_scan_channel()
1774 if (shost->reverse_ordering) in scsi_scan_channel()
1778 order_id = shost->max_id - id - 1; in scsi_scan_channel()
1781 __scsi_scan_target(&shost->shost_gendev, channel, in scsi_scan_channel()
1785 __scsi_scan_target(&shost->shost_gendev, channel, in scsi_scan_channel()
1797 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || in scsi_scan_host_selected()
1798 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || in scsi_scan_host_selected()
1799 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) in scsi_scan_host_selected()
1800 return -EINVAL; in scsi_scan_host_selected()
1802 mutex_lock(&shost->scan_mutex); in scsi_scan_host_selected()
1803 if (!shost->async_scan) in scsi_scan_host_selected()
1808 for (channel = 0; channel <= shost->max_channel; in scsi_scan_host_selected()
1816 mutex_unlock(&shost->scan_mutex); in scsi_scan_host_selected()
1826 if (sdev->sdev_state == SDEV_DEL) in scsi_sysfs_add_devices()
1829 if (sdev->is_visible) in scsi_sysfs_add_devices()
1838 * scsi_prep_async_scan - prepare for an async scan
1855 mutex_lock(&shost->scan_mutex); in scsi_prep_async_scan()
1856 if (shost->async_scan) { in scsi_prep_async_scan()
1864 data->shost = scsi_host_get(shost); in scsi_prep_async_scan()
1865 if (!data->shost) in scsi_prep_async_scan()
1867 init_completion(&data->prev_finished); in scsi_prep_async_scan()
1869 spin_lock_irqsave(shost->host_lock, flags); in scsi_prep_async_scan()
1870 shost->async_scan = 1; in scsi_prep_async_scan()
1871 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_prep_async_scan()
1872 mutex_unlock(&shost->scan_mutex); in scsi_prep_async_scan()
1876 complete(&data->prev_finished); in scsi_prep_async_scan()
1877 list_add_tail(&data->list, &scanning_hosts); in scsi_prep_async_scan()
1883 mutex_unlock(&shost->scan_mutex); in scsi_prep_async_scan()
1889 * scsi_finish_async_scan - asynchronous scan has finished
1904 shost = data->shost; in scsi_finish_async_scan()
1906 mutex_lock(&shost->scan_mutex); in scsi_finish_async_scan()
1908 if (!shost->async_scan) { in scsi_finish_async_scan()
1911 mutex_unlock(&shost->scan_mutex); in scsi_finish_async_scan()
1915 wait_for_completion(&data->prev_finished); in scsi_finish_async_scan()
1919 spin_lock_irqsave(shost->host_lock, flags); in scsi_finish_async_scan()
1920 shost->async_scan = 0; in scsi_finish_async_scan()
1921 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_finish_async_scan()
1923 mutex_unlock(&shost->scan_mutex); in scsi_finish_async_scan()
1926 list_del(&data->list); in scsi_finish_async_scan()
1930 complete(&next->prev_finished); in scsi_finish_async_scan()
1941 if (shost->hostt->scan_finished) { in do_scsi_scan_host()
1943 if (shost->hostt->scan_start) in do_scsi_scan_host()
1944 shost->hostt->scan_start(shost); in do_scsi_scan_host()
1946 while (!shost->hostt->scan_finished(shost, jiffies - start)) in do_scsi_scan_host()
1957 struct Scsi_Host *shost = data->shost; in do_scan_async()
1964 * scsi_scan_host - scan the given adapter
1984 /* register with the async subsystem so wait_for_device_probe() in scsi_scan_host()
1999 spin_lock_irqsave(shost->host_lock, flags); in scsi_forget_host()
2000 list_for_each_entry(sdev, &shost->__devices, siblings) { in scsi_forget_host()
2001 if (sdev->sdev_state == SDEV_DEL) in scsi_forget_host()
2003 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_forget_host()
2007 spin_unlock_irqrestore(shost->host_lock, flags); in scsi_forget_host()