1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * scsi_scan.c 4 * 5 * Copyright (C) 2000 Eric Youngdale, 6 * Copyright (C) 2002 Patrick Mansfield 7 * 8 * The general scanning/probing algorithm is as follows, exceptions are 9 * made to it depending on device specific flags, compilation options, and 10 * global variable (boot or module load time) settings. 11 * 12 * A specific LUN is scanned via an INQUIRY command; if the LUN has a 13 * device attached, a scsi_device is allocated and setup for it. 14 * 15 * For every id of every channel on the given host: 16 * 17 * Scan LUN 0; if the target responds to LUN 0 (even if there is no 18 * device or storage attached to LUN 0): 19 * 20 * If LUN 0 has a device attached, allocate and setup a 21 * scsi_device for it. 22 * 23 * If target is SCSI-3 or up, issue a REPORT LUN, and scan 24 * all of the LUNs returned by the REPORT LUN; else, 25 * sequentially scan LUNs up until some maximum is reached, 26 * or a LUN is seen that cannot have a device attached to it. 27 */ 28 29 #include <linux/module.h> 30 #include <linux/moduleparam.h> 31 #include <linux/init.h> 32 #include <linux/blkdev.h> 33 #include <linux/delay.h> 34 #include <linux/kthread.h> 35 #include <linux/spinlock.h> 36 #include <linux/async.h> 37 #include <linux/slab.h> 38 #include <linux/unaligned.h> 39 40 #include <scsi/scsi.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_device.h> 43 #include <scsi/scsi_driver.h> 44 #include <scsi/scsi_devinfo.h> 45 #include <scsi/scsi_host.h> 46 #include <scsi/scsi_transport.h> 47 #include <scsi/scsi_dh.h> 48 #include <scsi/scsi_eh.h> 49 50 #include "scsi_priv.h" 51 #include "scsi_logging.h" 52 53 #define ALLOC_FAILURE_MSG KERN_ERR "%s: Allocation failure during" \ 54 " SCSI scanning, some SCSI devices might not be configured\n" 55 56 /* 57 * Default timeout 58 */ 59 #define SCSI_TIMEOUT (2*HZ) 60 #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ) 61 62 /* 63 * Prefix values for the SCSI id's (stored in sysfs name field) 64 */ 65 #define SCSI_UID_SER_NUM 'S' 66 #define SCSI_UID_UNKNOWN 'Z' 67 68 /* 69 * Return values of some of the scanning functions. 70 * 71 * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this 72 * includes allocation or general failures preventing IO from being sent. 73 * 74 * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available 75 * on the given LUN. 76 * 77 * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a 78 * given LUN. 79 */ 80 #define SCSI_SCAN_NO_RESPONSE 0 81 #define SCSI_SCAN_TARGET_PRESENT 1 82 #define SCSI_SCAN_LUN_PRESENT 2 83 84 static const char *scsi_null_device_strs = "nullnullnullnull"; 85 86 #define MAX_SCSI_LUNS 512 87 88 static u64 max_scsi_luns = MAX_SCSI_LUNS; 89 90 module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR); 91 MODULE_PARM_DESC(max_luns, 92 "last scsi LUN (should be between 1 and 2^64-1)"); 93 94 #ifdef CONFIG_SCSI_SCAN_ASYNC 95 #define SCSI_SCAN_TYPE_DEFAULT "async" 96 #else 97 #define SCSI_SCAN_TYPE_DEFAULT "sync" 98 #endif 99 100 static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT; 101 102 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), 103 S_IRUGO|S_IWUSR); 104 MODULE_PARM_DESC(scan, "sync, async, manual, or none. " 105 "Setting to 'manual' disables automatic scanning, but allows " 106 "for manual device scan via the 'scan' sysfs attribute."); 107 108 static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18; 109 110 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR); 111 MODULE_PARM_DESC(inq_timeout, 112 "Timeout (in seconds) waiting for devices to answer INQUIRY." 113 " Default is 20. Some devices may need more; most need less."); 114 115 /* This lock protects only this list */ 116 static DEFINE_SPINLOCK(async_scan_lock); 117 static LIST_HEAD(scanning_hosts); 118 119 struct async_scan_data { 120 struct list_head list; 121 struct Scsi_Host *shost; 122 struct completion prev_finished; 123 }; 124 125 /* 126 * scsi_enable_async_suspend - Enable async suspend and resume 127 */ 128 void scsi_enable_async_suspend(struct device *dev) 129 { 130 /* 131 * If a user has disabled async probing a likely reason is due to a 132 * storage enclosure that does not inject staggered spin-ups. For 133 * safety, make resume synchronous as well in that case. 134 */ 135 if (strncmp(scsi_scan_type, "async", 5) != 0) 136 return; 137 /* Enable asynchronous suspend and resume. */ 138 device_enable_async_suspend(dev); 139 } 140 141 /** 142 * scsi_complete_async_scans - Wait for asynchronous scans to complete 143 * 144 * When this function returns, any host which started scanning before 145 * this function was called will have finished its scan. Hosts which 146 * started scanning after this function was called may or may not have 147 * finished. 148 */ 149 int scsi_complete_async_scans(void) 150 { 151 struct async_scan_data *data; 152 153 do { 154 scoped_guard(spinlock, &async_scan_lock) 155 if (list_empty(&scanning_hosts)) 156 return 0; 157 /* If we can't get memory immediately, that's OK. Just 158 * sleep a little. Even if we never get memory, the async 159 * scans will finish eventually. 160 */ 161 data = kmalloc(sizeof(*data), GFP_KERNEL); 162 if (!data) 163 msleep(1); 164 } while (!data); 165 166 data->shost = NULL; 167 init_completion(&data->prev_finished); 168 169 spin_lock(&async_scan_lock); 170 /* Check that there's still somebody else on the list */ 171 if (list_empty(&scanning_hosts)) 172 goto done; 173 list_add_tail(&data->list, &scanning_hosts); 174 spin_unlock(&async_scan_lock); 175 176 printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n"); 177 wait_for_completion(&data->prev_finished); 178 179 spin_lock(&async_scan_lock); 180 list_del(&data->list); 181 if (!list_empty(&scanning_hosts)) { 182 struct async_scan_data *next = list_entry(scanning_hosts.next, 183 struct async_scan_data, list); 184 complete(&next->prev_finished); 185 } 186 done: 187 spin_unlock(&async_scan_lock); 188 189 kfree(data); 190 return 0; 191 } 192 193 /** 194 * scsi_unlock_floptical - unlock device via a special MODE SENSE command 195 * @sdev: scsi device to send command to 196 * @result: area to store the result of the MODE SENSE 197 * 198 * Description: 199 * Send a vendor specific MODE SENSE (not a MODE SELECT) command. 200 * Called for BLIST_KEY devices. 201 **/ 202 static void scsi_unlock_floptical(struct scsi_device *sdev, 203 unsigned char *result) 204 { 205 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 206 207 sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n"); 208 scsi_cmd[0] = MODE_SENSE; 209 scsi_cmd[1] = 0; 210 scsi_cmd[2] = 0x2e; 211 scsi_cmd[3] = 0; 212 scsi_cmd[4] = 0x2a; /* size */ 213 scsi_cmd[5] = 0; 214 scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a, 215 SCSI_TIMEOUT, 3, NULL); 216 } 217 218 static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, 219 unsigned int depth) 220 { 221 int new_shift = sbitmap_calculate_shift(depth); 222 bool need_alloc = !sdev->budget_map.map; 223 bool need_free = false; 224 unsigned int memflags; 225 int ret; 226 struct sbitmap sb_backup; 227 228 depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev)); 229 230 /* 231 * realloc if new shift is calculated, which is caused by setting 232 * up one new default queue depth after calling ->sdev_configure 233 */ 234 if (!need_alloc && new_shift != sdev->budget_map.shift) 235 need_alloc = need_free = true; 236 237 if (!need_alloc) 238 return 0; 239 240 /* 241 * Request queue has to be frozen for reallocating budget map, 242 * and here disk isn't added yet, so freezing is pretty fast 243 */ 244 if (need_free) { 245 memflags = blk_mq_freeze_queue(sdev->request_queue); 246 sb_backup = sdev->budget_map; 247 } 248 ret = sbitmap_init_node(&sdev->budget_map, 249 scsi_device_max_queue_depth(sdev), 250 new_shift, GFP_NOIO, 251 sdev->request_queue->node, false, true); 252 if (!ret) 253 sbitmap_resize(&sdev->budget_map, depth); 254 255 if (need_free) { 256 if (ret) 257 sdev->budget_map = sb_backup; 258 else 259 sbitmap_free(&sb_backup); 260 ret = 0; 261 blk_mq_unfreeze_queue(sdev->request_queue, memflags); 262 } 263 return ret; 264 } 265 266 /** 267 * scsi_alloc_sdev - allocate and setup a scsi_Device 268 * @starget: which target to allocate a &scsi_device for 269 * @lun: which lun 270 * @hostdata: usually NULL and set by ->sdev_init instead 271 * 272 * Description: 273 * Allocate, initialize for io, and return a pointer to a scsi_Device. 274 * Stores the @shost, @channel, @id, and @lun in the scsi_Device, and 275 * adds scsi_Device to the appropriate list. 276 * 277 * Return value: 278 * scsi_Device pointer, or NULL on failure. 279 **/ 280 static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, 281 u64 lun, void *hostdata) 282 { 283 unsigned int depth; 284 struct scsi_device *sdev; 285 struct request_queue *q; 286 int display_failure_msg = 1, ret; 287 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 288 struct queue_limits lim; 289 290 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size, 291 GFP_KERNEL); 292 if (!sdev) 293 goto out; 294 295 sdev->vendor = scsi_null_device_strs; 296 sdev->model = scsi_null_device_strs; 297 sdev->rev = scsi_null_device_strs; 298 sdev->host = shost; 299 sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD; 300 sdev->id = starget->id; 301 sdev->lun = lun; 302 sdev->channel = starget->channel; 303 mutex_init(&sdev->state_mutex); 304 sdev->sdev_state = SDEV_CREATED; 305 INIT_LIST_HEAD(&sdev->siblings); 306 INIT_LIST_HEAD(&sdev->same_target_siblings); 307 INIT_LIST_HEAD(&sdev->starved_entry); 308 INIT_LIST_HEAD(&sdev->event_list); 309 spin_lock_init(&sdev->list_lock); 310 mutex_init(&sdev->inquiry_mutex); 311 INIT_WORK(&sdev->event_work, scsi_evt_thread); 312 INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue); 313 314 sdev->sdev_gendev.parent = get_device(&starget->dev); 315 sdev->sdev_target = starget; 316 317 /* usually NULL and set by ->sdev_init instead */ 318 sdev->hostdata = hostdata; 319 320 /* if the device needs this changing, it may do so in the 321 * sdev_configure function */ 322 sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED; 323 324 /* 325 * Some low level driver could use device->type 326 */ 327 sdev->type = -1; 328 329 /* 330 * Assume that the device will have handshaking problems, 331 * and then fix this field later if it turns out it 332 * doesn't 333 */ 334 sdev->borken = 1; 335 336 sdev->sg_reserved_size = INT_MAX; 337 338 scsi_init_limits(shost, &lim); 339 q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev); 340 if (IS_ERR(q)) { 341 /* release fn is set up in scsi_sysfs_device_initialise, so 342 * have to free and put manually here */ 343 put_device(&starget->dev); 344 kfree(sdev); 345 goto out; 346 } 347 kref_get(&sdev->host->tagset_refcnt); 348 sdev->request_queue = q; 349 350 depth = sdev->host->cmd_per_lun ?: 1; 351 352 /* 353 * Use .can_queue as budget map's depth because we have to 354 * support adjusting queue depth from sysfs. Meantime use 355 * default device queue depth to figure out sbitmap shift 356 * since we use this queue depth most of times. 357 */ 358 if (scsi_realloc_sdev_budget_map(sdev, depth)) { 359 put_device(&starget->dev); 360 kfree(sdev); 361 goto out; 362 } 363 364 scsi_change_queue_depth(sdev, depth); 365 366 scsi_sysfs_device_initialize(sdev); 367 368 if (shost->hostt->sdev_init) { 369 ret = shost->hostt->sdev_init(sdev); 370 if (ret) { 371 /* 372 * if LLDD reports slave not present, don't clutter 373 * console with alloc failure messages 374 */ 375 if (ret == -ENXIO) 376 display_failure_msg = 0; 377 goto out_device_destroy; 378 } 379 } 380 381 return sdev; 382 383 out_device_destroy: 384 __scsi_remove_device(sdev); 385 out: 386 if (display_failure_msg) 387 printk(ALLOC_FAILURE_MSG, __func__); 388 return NULL; 389 } 390 391 static void scsi_target_destroy(struct scsi_target *starget) 392 { 393 struct device *dev = &starget->dev; 394 struct Scsi_Host *shost = dev_to_shost(dev->parent); 395 unsigned long flags; 396 397 BUG_ON(starget->state == STARGET_DEL); 398 starget->state = STARGET_DEL; 399 transport_destroy_device(dev); 400 spin_lock_irqsave(shost->host_lock, flags); 401 if (shost->hostt->target_destroy) 402 shost->hostt->target_destroy(starget); 403 list_del_init(&starget->siblings); 404 spin_unlock_irqrestore(shost->host_lock, flags); 405 put_device(dev); 406 } 407 408 static void scsi_target_dev_release(struct device *dev) 409 { 410 struct device *parent = dev->parent; 411 struct scsi_target *starget = to_scsi_target(dev); 412 413 kfree(starget); 414 put_device(parent); 415 } 416 417 static const struct device_type scsi_target_type = { 418 .name = "scsi_target", 419 .release = scsi_target_dev_release, 420 }; 421 422 int scsi_is_target_device(const struct device *dev) 423 { 424 return dev->type == &scsi_target_type; 425 } 426 EXPORT_SYMBOL(scsi_is_target_device); 427 428 static struct scsi_target *__scsi_find_target(struct device *parent, 429 int channel, uint id) 430 { 431 struct scsi_target *starget, *found_starget = NULL; 432 struct Scsi_Host *shost = dev_to_shost(parent); 433 /* 434 * Search for an existing target for this sdev. 435 */ 436 list_for_each_entry(starget, &shost->__targets, siblings) { 437 if (starget->id == id && 438 starget->channel == channel) { 439 found_starget = starget; 440 break; 441 } 442 } 443 if (found_starget) 444 get_device(&found_starget->dev); 445 446 return found_starget; 447 } 448 449 /** 450 * scsi_target_reap_ref_release - remove target from visibility 451 * @kref: the reap_ref in the target being released 452 * 453 * Called on last put of reap_ref, which is the indication that no device 454 * under this target is visible anymore, so render the target invisible in 455 * sysfs. Note: we have to be in user context here because the target reaps 456 * should be done in places where the scsi device visibility is being removed. 457 */ 458 static void scsi_target_reap_ref_release(struct kref *kref) 459 { 460 struct scsi_target *starget 461 = container_of(kref, struct scsi_target, reap_ref); 462 463 /* 464 * if we get here and the target is still in a CREATED state that 465 * means it was allocated but never made visible (because a scan 466 * turned up no LUNs), so don't call device_del() on it. 467 */ 468 if ((starget->state != STARGET_CREATED) && 469 (starget->state != STARGET_CREATED_REMOVE)) { 470 transport_remove_device(&starget->dev); 471 device_del(&starget->dev); 472 } 473 scsi_target_destroy(starget); 474 } 475 476 static void scsi_target_reap_ref_put(struct scsi_target *starget) 477 { 478 kref_put(&starget->reap_ref, scsi_target_reap_ref_release); 479 } 480 481 /** 482 * scsi_alloc_target - allocate a new or find an existing target 483 * @parent: parent of the target (need not be a scsi host) 484 * @channel: target channel number (zero if no channels) 485 * @id: target id number 486 * 487 * Return an existing target if one exists, provided it hasn't already 488 * gone into STARGET_DEL state, otherwise allocate a new target. 489 * 490 * The target is returned with an incremented reference, so the caller 491 * is responsible for both reaping and doing a last put 492 */ 493 static struct scsi_target *scsi_alloc_target(struct device *parent, 494 int channel, uint id) 495 { 496 struct Scsi_Host *shost = dev_to_shost(parent); 497 struct device *dev = NULL; 498 unsigned long flags; 499 const int size = sizeof(struct scsi_target) 500 + shost->transportt->target_size; 501 struct scsi_target *starget; 502 struct scsi_target *found_target; 503 int error, ref_got; 504 505 starget = kzalloc(size, GFP_KERNEL); 506 if (!starget) { 507 printk(KERN_ERR "%s: allocation failure\n", __func__); 508 return NULL; 509 } 510 dev = &starget->dev; 511 device_initialize(dev); 512 kref_init(&starget->reap_ref); 513 dev->parent = get_device(parent); 514 dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id); 515 dev->bus = &scsi_bus_type; 516 dev->type = &scsi_target_type; 517 scsi_enable_async_suspend(dev); 518 starget->id = id; 519 starget->channel = channel; 520 starget->can_queue = 0; 521 INIT_LIST_HEAD(&starget->siblings); 522 INIT_LIST_HEAD(&starget->devices); 523 starget->state = STARGET_CREATED; 524 starget->scsi_level = SCSI_2; 525 starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED; 526 retry: 527 spin_lock_irqsave(shost->host_lock, flags); 528 529 found_target = __scsi_find_target(parent, channel, id); 530 if (found_target) 531 goto found; 532 533 list_add_tail(&starget->siblings, &shost->__targets); 534 spin_unlock_irqrestore(shost->host_lock, flags); 535 /* allocate and add */ 536 transport_setup_device(dev); 537 if (shost->hostt->target_alloc) { 538 error = shost->hostt->target_alloc(starget); 539 540 if(error) { 541 if (error != -ENXIO) 542 dev_err(dev, "target allocation failed, error %d\n", error); 543 /* don't want scsi_target_reap to do the final 544 * put because it will be under the host lock */ 545 scsi_target_destroy(starget); 546 return NULL; 547 } 548 } 549 get_device(dev); 550 551 return starget; 552 553 found: 554 /* 555 * release routine already fired if kref is zero, so if we can still 556 * take the reference, the target must be alive. If we can't, it must 557 * be dying and we need to wait for a new target 558 */ 559 ref_got = kref_get_unless_zero(&found_target->reap_ref); 560 561 spin_unlock_irqrestore(shost->host_lock, flags); 562 if (ref_got) { 563 put_device(dev); 564 return found_target; 565 } 566 /* 567 * Unfortunately, we found a dying target; need to wait until it's 568 * dead before we can get a new one. There is an anomaly here. We 569 * *should* call scsi_target_reap() to balance the kref_get() of the 570 * reap_ref above. However, since the target being released, it's 571 * already invisible and the reap_ref is irrelevant. If we call 572 * scsi_target_reap() we might spuriously do another device_del() on 573 * an already invisible target. 574 */ 575 put_device(&found_target->dev); 576 /* 577 * length of time is irrelevant here, we just want to yield the CPU 578 * for a tick to avoid busy waiting for the target to die. 579 */ 580 msleep(1); 581 goto retry; 582 } 583 584 /** 585 * scsi_target_reap - check to see if target is in use and destroy if not 586 * @starget: target to be checked 587 * 588 * This is used after removing a LUN or doing a last put of the target 589 * it checks atomically that nothing is using the target and removes 590 * it if so. 591 */ 592 void scsi_target_reap(struct scsi_target *starget) 593 { 594 /* 595 * serious problem if this triggers: STARGET_DEL is only set in the if 596 * the reap_ref drops to zero, so we're trying to do another final put 597 * on an already released kref 598 */ 599 BUG_ON(starget->state == STARGET_DEL); 600 scsi_target_reap_ref_put(starget); 601 } 602 603 /** 604 * scsi_sanitize_inquiry_string - remove non-graphical chars from an 605 * INQUIRY result string 606 * @s: INQUIRY result string to sanitize 607 * @len: length of the string 608 * 609 * Description: 610 * The SCSI spec says that INQUIRY vendor, product, and revision 611 * strings must consist entirely of graphic ASCII characters, 612 * padded on the right with spaces. Since not all devices obey 613 * this rule, we will replace non-graphic or non-ASCII characters 614 * with spaces. Exception: a NUL character is interpreted as a 615 * string terminator, so all the following characters are set to 616 * spaces. 617 **/ 618 void scsi_sanitize_inquiry_string(unsigned char *s, int len) 619 { 620 int terminated = 0; 621 622 for (; len > 0; (--len, ++s)) { 623 if (*s == 0) 624 terminated = 1; 625 if (terminated || *s < 0x20 || *s > 0x7e) 626 *s = ' '; 627 } 628 } 629 EXPORT_SYMBOL(scsi_sanitize_inquiry_string); 630 631 632 /** 633 * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY 634 * @sdev: scsi_device to probe 635 * @inq_result: area to store the INQUIRY result 636 * @result_len: len of inq_result 637 * @bflags: store any bflags found here 638 * 639 * Description: 640 * Probe the lun associated with @req using a standard SCSI INQUIRY; 641 * 642 * If the INQUIRY is successful, zero is returned and the 643 * INQUIRY data is in @inq_result; the scsi_level and INQUIRY length 644 * are copied to the scsi_device any flags value is stored in *@bflags. 645 **/ 646 static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, 647 int result_len, blist_flags_t *bflags) 648 { 649 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 650 int first_inquiry_len, try_inquiry_len, next_inquiry_len; 651 int response_len = 0; 652 int pass, count, result, resid; 653 struct scsi_failure failure_defs[] = { 654 /* 655 * not-ready to ready transition [asc/ascq=0x28/0x0] or 656 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY 657 * should not yield UNIT_ATTENTION but many buggy devices do 658 * so anyway. 659 */ 660 { 661 .sense = UNIT_ATTENTION, 662 .asc = 0x28, 663 .result = SAM_STAT_CHECK_CONDITION, 664 }, 665 { 666 .sense = UNIT_ATTENTION, 667 .asc = 0x29, 668 .result = SAM_STAT_CHECK_CONDITION, 669 }, 670 { 671 .allowed = 1, 672 .result = DID_TIME_OUT << 16, 673 }, 674 {} 675 }; 676 struct scsi_failures failures = { 677 .total_allowed = 3, 678 .failure_definitions = failure_defs, 679 }; 680 const struct scsi_exec_args exec_args = { 681 .resid = &resid, 682 .failures = &failures, 683 }; 684 685 *bflags = 0; 686 687 /* Perform up to 3 passes. The first pass uses a conservative 688 * transfer length of 36 unless sdev->inquiry_len specifies a 689 * different value. */ 690 first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; 691 try_inquiry_len = first_inquiry_len; 692 pass = 1; 693 694 next_pass: 695 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 696 "scsi scan: INQUIRY pass %d length %d\n", 697 pass, try_inquiry_len)); 698 699 /* Each pass gets up to three chances to ignore Unit Attention */ 700 scsi_failures_reset_retries(&failures); 701 702 for (count = 0; count < 3; ++count) { 703 memset(scsi_cmd, 0, 6); 704 scsi_cmd[0] = INQUIRY; 705 scsi_cmd[4] = (unsigned char) try_inquiry_len; 706 707 memset(inq_result, 0, try_inquiry_len); 708 709 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, 710 inq_result, try_inquiry_len, 711 HZ / 2 + HZ * scsi_inq_timeout, 3, 712 &exec_args); 713 714 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 715 "scsi scan: INQUIRY %s with code 0x%x\n", 716 result ? "failed" : "successful", result)); 717 718 if (result == 0) { 719 /* 720 * if nothing was transferred, we try 721 * again. It's a workaround for some USB 722 * devices. 723 */ 724 if (resid == try_inquiry_len) 725 continue; 726 } 727 break; 728 } 729 730 if (result == 0) { 731 scsi_sanitize_inquiry_string(&inq_result[8], 8); 732 scsi_sanitize_inquiry_string(&inq_result[16], 16); 733 scsi_sanitize_inquiry_string(&inq_result[32], 4); 734 735 response_len = inq_result[4] + 5; 736 if (response_len > 255) 737 response_len = first_inquiry_len; /* sanity */ 738 739 /* 740 * Get any flags for this device. 741 * 742 * XXX add a bflags to scsi_device, and replace the 743 * corresponding bit fields in scsi_device, so bflags 744 * need not be passed as an argument. 745 */ 746 *bflags = scsi_get_device_flags(sdev, &inq_result[8], 747 &inq_result[16]); 748 749 /* When the first pass succeeds we gain information about 750 * what larger transfer lengths might work. */ 751 if (pass == 1) { 752 if (BLIST_INQUIRY_36 & *bflags) 753 next_inquiry_len = 36; 754 /* 755 * LLD specified a maximum sdev->inquiry_len 756 * but device claims it has more data. Capping 757 * the length only makes sense for legacy 758 * devices. If a device supports SPC-4 (2014) 759 * or newer, assume that it is safe to ask for 760 * as much as the device says it supports. 761 */ 762 else if (sdev->inquiry_len && 763 response_len > sdev->inquiry_len && 764 (inq_result[2] & 0x7) < 6) /* SPC-4 */ 765 next_inquiry_len = sdev->inquiry_len; 766 else 767 next_inquiry_len = response_len; 768 769 /* If more data is available perform the second pass */ 770 if (next_inquiry_len > try_inquiry_len) { 771 try_inquiry_len = next_inquiry_len; 772 pass = 2; 773 goto next_pass; 774 } 775 } 776 777 } else if (pass == 2) { 778 sdev_printk(KERN_INFO, sdev, 779 "scsi scan: %d byte inquiry failed. " 780 "Consider BLIST_INQUIRY_36 for this device\n", 781 try_inquiry_len); 782 783 /* If this pass failed, the third pass goes back and transfers 784 * the same amount as we successfully got in the first pass. */ 785 try_inquiry_len = first_inquiry_len; 786 pass = 3; 787 goto next_pass; 788 } 789 790 /* If the last transfer attempt got an error, assume the 791 * peripheral doesn't exist or is dead. */ 792 if (result) 793 return -EIO; 794 795 /* Don't report any more data than the device says is valid */ 796 sdev->inquiry_len = min(try_inquiry_len, response_len); 797 798 /* 799 * XXX Abort if the response length is less than 36? If less than 800 * 32, the lookup of the device flags (above) could be invalid, 801 * and it would be possible to take an incorrect action - we do 802 * not want to hang because of a short INQUIRY. On the flip side, 803 * if the device is spun down or becoming ready (and so it gives a 804 * short INQUIRY), an abort here prevents any further use of the 805 * device, including spin up. 806 * 807 * On the whole, the best approach seems to be to assume the first 808 * 36 bytes are valid no matter what the device says. That's 809 * better than copying < 36 bytes to the inquiry-result buffer 810 * and displaying garbage for the Vendor, Product, or Revision 811 * strings. 812 */ 813 if (sdev->inquiry_len < 36) { 814 if (!sdev->host->short_inquiry) { 815 shost_printk(KERN_INFO, sdev->host, 816 "scsi scan: INQUIRY result too short (%d)," 817 " using 36\n", sdev->inquiry_len); 818 sdev->host->short_inquiry = 1; 819 } 820 sdev->inquiry_len = 36; 821 } 822 823 /* 824 * Related to the above issue: 825 * 826 * XXX Devices (disk or all?) should be sent a TEST UNIT READY, 827 * and if not ready, sent a START_STOP to start (maybe spin up) and 828 * then send the INQUIRY again, since the INQUIRY can change after 829 * a device is initialized. 830 * 831 * Ideally, start a device if explicitly asked to do so. This 832 * assumes that a device is spun up on power on, spun down on 833 * request, and then spun up on request. 834 */ 835 836 /* 837 * The scanning code needs to know the scsi_level, even if no 838 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so 839 * non-zero LUNs can be scanned. 840 */ 841 sdev->scsi_level = inq_result[2] & 0x0f; 842 if (sdev->scsi_level >= 2 || 843 (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1)) 844 sdev->scsi_level++; 845 sdev->sdev_target->scsi_level = sdev->scsi_level; 846 847 /* 848 * If SCSI-2 or lower, and if the transport requires it, 849 * store the LUN value in CDB[1]. 850 */ 851 sdev->lun_in_cdb = 0; 852 if (sdev->scsi_level <= SCSI_2 && 853 sdev->scsi_level != SCSI_UNKNOWN && 854 !sdev->host->no_scsi2_lun_in_cdb) 855 sdev->lun_in_cdb = 1; 856 857 return 0; 858 } 859 860 /** 861 * scsi_add_lun - allocate and fully initialze a scsi_device 862 * @sdev: holds information to be stored in the new scsi_device 863 * @inq_result: holds the result of a previous INQUIRY to the LUN 864 * @bflags: black/white list flag 865 * @async: 1 if this device is being scanned asynchronously 866 * 867 * Description: 868 * Initialize the scsi_device @sdev. Optionally set fields based 869 * on values in *@bflags. 870 * 871 * Return: 872 * SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 873 * SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 874 **/ 875 static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, 876 blist_flags_t *bflags, int async) 877 { 878 const struct scsi_host_template *hostt = sdev->host->hostt; 879 struct queue_limits lim; 880 int ret; 881 882 /* 883 * XXX do not save the inquiry, since it can change underneath us, 884 * save just vendor/model/rev. 885 * 886 * Rather than save it and have an ioctl that retrieves the saved 887 * value, have an ioctl that executes the same INQUIRY code used 888 * in scsi_probe_lun, let user level programs doing INQUIRY 889 * scanning run at their own risk, or supply a user level program 890 * that can correctly scan. 891 */ 892 893 /* 894 * Copy at least 36 bytes of INQUIRY data, so that we don't 895 * dereference unallocated memory when accessing the Vendor, 896 * Product, and Revision strings. Badly behaved devices may set 897 * the INQUIRY Additional Length byte to a small value, indicating 898 * these strings are invalid, but often they contain plausible data 899 * nonetheless. It doesn't matter if the device sent < 36 bytes 900 * total, since scsi_probe_lun() initializes inq_result with 0s. 901 */ 902 sdev->inquiry = kmemdup(inq_result, 903 max_t(size_t, sdev->inquiry_len, 36), 904 GFP_KERNEL); 905 if (sdev->inquiry == NULL) 906 return SCSI_SCAN_NO_RESPONSE; 907 908 sdev->vendor = (char *) (sdev->inquiry + 8); 909 sdev->model = (char *) (sdev->inquiry + 16); 910 sdev->rev = (char *) (sdev->inquiry + 32); 911 912 if (strncmp(sdev->vendor, "ATA ", 8) == 0) { 913 /* 914 * sata emulation layer device. This is a hack to work around 915 * the SATL power management specifications which state that 916 * when the SATL detects the device has gone into standby 917 * mode, it shall respond with NOT READY. 918 */ 919 sdev->allow_restart = 1; 920 } 921 922 if (*bflags & BLIST_ISROM) { 923 sdev->type = TYPE_ROM; 924 sdev->removable = 1; 925 } else { 926 sdev->type = (inq_result[0] & 0x1f); 927 sdev->removable = (inq_result[1] & 0x80) >> 7; 928 929 /* 930 * some devices may respond with wrong type for 931 * well-known logical units. Force well-known type 932 * to enumerate them correctly. 933 */ 934 if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) { 935 sdev_printk(KERN_WARNING, sdev, 936 "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n", 937 __func__, sdev->type, (unsigned int)sdev->lun); 938 sdev->type = TYPE_WLUN; 939 } 940 941 } 942 943 if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) { 944 /* RBC and MMC devices can return SCSI-3 compliance and yet 945 * still not support REPORT LUNS, so make them act as 946 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is 947 * specifically set */ 948 if ((*bflags & BLIST_REPORTLUN2) == 0) 949 *bflags |= BLIST_NOREPORTLUN; 950 } 951 952 /* 953 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI 954 * spec says: The device server is capable of supporting the 955 * specified peripheral device type on this logical unit. However, 956 * the physical device is not currently connected to this logical 957 * unit. 958 * 959 * The above is vague, as it implies that we could treat 001 and 960 * 011 the same. Stay compatible with previous code, and create a 961 * scsi_device for a PQ of 1 962 * 963 * Don't set the device offline here; rather let the upper 964 * level drivers eval the PQ to decide whether they should 965 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check. 966 */ 967 968 sdev->inq_periph_qual = (inq_result[0] >> 5) & 7; 969 sdev->lockable = sdev->removable; 970 sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2); 971 972 if (sdev->scsi_level >= SCSI_3 || 973 (sdev->inquiry_len > 56 && inq_result[56] & 0x04)) 974 sdev->ppr = 1; 975 if (inq_result[7] & 0x60) 976 sdev->wdtr = 1; 977 if (inq_result[7] & 0x10) 978 sdev->sdtr = 1; 979 980 sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d " 981 "ANSI: %d%s\n", scsi_device_type(sdev->type), 982 sdev->vendor, sdev->model, sdev->rev, 983 sdev->inq_periph_qual, inq_result[2] & 0x07, 984 (inq_result[3] & 0x0f) == 1 ? " CCS" : ""); 985 986 if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) && 987 !(*bflags & BLIST_NOTQ)) { 988 sdev->tagged_supported = 1; 989 sdev->simple_tags = 1; 990 } 991 992 /* 993 * Some devices (Texel CD ROM drives) have handshaking problems 994 * when used with the Seagate controllers. borken is initialized 995 * to 1, and then set it to 0 here. 996 */ 997 if ((*bflags & BLIST_BORKEN) == 0) 998 sdev->borken = 0; 999 1000 if (*bflags & BLIST_NO_ULD_ATTACH) 1001 sdev->no_uld_attach = 1; 1002 1003 /* 1004 * Apparently some really broken devices (contrary to the SCSI 1005 * standards) need to be selected without asserting ATN 1006 */ 1007 if (*bflags & BLIST_SELECT_NO_ATN) 1008 sdev->select_no_atn = 1; 1009 1010 /* 1011 * Some devices may not want to have a start command automatically 1012 * issued when a device is added. 1013 */ 1014 if (*bflags & BLIST_NOSTARTONADD) 1015 sdev->no_start_on_add = 1; 1016 1017 if (*bflags & BLIST_SINGLELUN) 1018 scsi_target(sdev)->single_lun = 1; 1019 1020 sdev->use_10_for_rw = 1; 1021 1022 /* some devices don't like REPORT SUPPORTED OPERATION CODES 1023 * and will simply timeout causing sd_mod init to take a very 1024 * very long time */ 1025 if (*bflags & BLIST_NO_RSOC) 1026 sdev->no_report_opcodes = 1; 1027 1028 /* set the device running here so that slave configure 1029 * may do I/O */ 1030 mutex_lock(&sdev->state_mutex); 1031 ret = scsi_device_set_state(sdev, SDEV_RUNNING); 1032 if (ret) 1033 ret = scsi_device_set_state(sdev, SDEV_BLOCK); 1034 mutex_unlock(&sdev->state_mutex); 1035 1036 if (ret) { 1037 sdev_printk(KERN_ERR, sdev, 1038 "in wrong state %s to complete scan\n", 1039 scsi_device_state_name(sdev->sdev_state)); 1040 return SCSI_SCAN_NO_RESPONSE; 1041 } 1042 1043 if (*bflags & BLIST_NOT_LOCKABLE) 1044 sdev->lockable = 0; 1045 1046 if (*bflags & BLIST_RETRY_HWERROR) 1047 sdev->retry_hwerror = 1; 1048 1049 if (*bflags & BLIST_NO_DIF) 1050 sdev->no_dif = 1; 1051 1052 if (*bflags & BLIST_UNMAP_LIMIT_WS) 1053 sdev->unmap_limit_for_ws = 1; 1054 1055 if (*bflags & BLIST_IGN_MEDIA_CHANGE) 1056 sdev->ignore_media_change = 1; 1057 1058 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; 1059 1060 if (*bflags & BLIST_TRY_VPD_PAGES) 1061 sdev->try_vpd_pages = 1; 1062 else if (*bflags & BLIST_SKIP_VPD_PAGES) 1063 sdev->skip_vpd_pages = 1; 1064 1065 if (*bflags & BLIST_NO_VPD_SIZE) 1066 sdev->no_vpd_size = 1; 1067 1068 transport_configure_device(&sdev->sdev_gendev); 1069 1070 /* 1071 * No need to freeze the queue as it isn't reachable to anyone else yet. 1072 */ 1073 lim = queue_limits_start_update(sdev->request_queue); 1074 if (*bflags & BLIST_MAX_512) 1075 lim.max_hw_sectors = 512; 1076 else if (*bflags & BLIST_MAX_1024) 1077 lim.max_hw_sectors = 1024; 1078 1079 if (hostt->sdev_configure) 1080 ret = hostt->sdev_configure(sdev, &lim); 1081 if (ret) { 1082 queue_limits_cancel_update(sdev->request_queue); 1083 /* 1084 * If the LLDD reports device not present, don't clutter the 1085 * console with failure messages. 1086 */ 1087 if (ret != -ENXIO) 1088 sdev_printk(KERN_ERR, sdev, 1089 "failed to configure device\n"); 1090 return SCSI_SCAN_NO_RESPONSE; 1091 } 1092 1093 ret = queue_limits_commit_update(sdev->request_queue, &lim); 1094 if (ret) { 1095 sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n"); 1096 return SCSI_SCAN_NO_RESPONSE; 1097 } 1098 1099 /* 1100 * The queue_depth is often changed in ->sdev_configure. 1101 * 1102 * Set up budget map again since memory consumption of the map depends 1103 * on actual queue depth. 1104 */ 1105 if (hostt->sdev_configure) 1106 scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); 1107 1108 if (sdev->scsi_level >= SCSI_3) 1109 scsi_attach_vpd(sdev); 1110 1111 scsi_cdl_check(sdev); 1112 1113 sdev->max_queue_depth = sdev->queue_depth; 1114 WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth); 1115 sdev->sdev_bflags = *bflags; 1116 1117 /* 1118 * Ok, the device is now all set up, we can 1119 * register it and tell the rest of the kernel 1120 * about it. 1121 */ 1122 if (!async && scsi_sysfs_add_sdev(sdev) != 0) 1123 return SCSI_SCAN_NO_RESPONSE; 1124 1125 return SCSI_SCAN_LUN_PRESENT; 1126 } 1127 1128 #ifdef CONFIG_SCSI_LOGGING 1129 /** 1130 * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace 1131 * @buf: Output buffer with at least end-first+1 bytes of space 1132 * @inq: Inquiry buffer (input) 1133 * @first: Offset of string into inq 1134 * @end: Index after last character in inq 1135 */ 1136 static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq, 1137 unsigned first, unsigned end) 1138 { 1139 unsigned term = 0, idx; 1140 1141 for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) { 1142 if (inq[idx+first] > ' ') { 1143 buf[idx] = inq[idx+first]; 1144 term = idx+1; 1145 } else { 1146 buf[idx] = ' '; 1147 } 1148 } 1149 buf[term] = 0; 1150 return buf; 1151 } 1152 #endif 1153 1154 /** 1155 * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it 1156 * @starget: pointer to target device structure 1157 * @lun: LUN of target device 1158 * @bflagsp: store bflags here if not NULL 1159 * @sdevp: probe the LUN corresponding to this scsi_device 1160 * @rescan: if not equal to SCSI_SCAN_INITIAL skip some code only 1161 * needed on first scan 1162 * @hostdata: passed to scsi_alloc_sdev() 1163 * 1164 * Description: 1165 * Call scsi_probe_lun, if a LUN with an attached device is found, 1166 * allocate and set it up by calling scsi_add_lun. 1167 * 1168 * Return: 1169 * 1170 * - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device 1171 * - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is 1172 * attached at the LUN 1173 * - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized 1174 **/ 1175 static int scsi_probe_and_add_lun(struct scsi_target *starget, 1176 u64 lun, blist_flags_t *bflagsp, 1177 struct scsi_device **sdevp, 1178 enum scsi_scan_mode rescan, 1179 void *hostdata) 1180 { 1181 struct scsi_device *sdev; 1182 unsigned char *result; 1183 blist_flags_t bflags; 1184 int res = SCSI_SCAN_NO_RESPONSE, result_len = 256; 1185 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1186 1187 /* 1188 * The rescan flag is used as an optimization, the first scan of a 1189 * host adapter calls into here with rescan == 0. 1190 */ 1191 sdev = scsi_device_lookup_by_target(starget, lun); 1192 if (sdev) { 1193 if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) { 1194 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 1195 "scsi scan: device exists on %s\n", 1196 dev_name(&sdev->sdev_gendev))); 1197 if (sdevp) 1198 *sdevp = sdev; 1199 else 1200 scsi_device_put(sdev); 1201 1202 if (bflagsp) 1203 *bflagsp = scsi_get_device_flags(sdev, 1204 sdev->vendor, 1205 sdev->model); 1206 return SCSI_SCAN_LUN_PRESENT; 1207 } 1208 scsi_device_put(sdev); 1209 } else 1210 sdev = scsi_alloc_sdev(starget, lun, hostdata); 1211 if (!sdev) 1212 goto out; 1213 1214 result = kmalloc(result_len, GFP_KERNEL); 1215 if (!result) 1216 goto out_free_sdev; 1217 1218 if (scsi_probe_lun(sdev, result, result_len, &bflags)) 1219 goto out_free_result; 1220 1221 if (bflagsp) 1222 *bflagsp = bflags; 1223 /* 1224 * result contains valid SCSI INQUIRY data. 1225 */ 1226 if ((result[0] >> 5) == 3) { 1227 /* 1228 * For a Peripheral qualifier 3 (011b), the SCSI 1229 * spec says: The device server is not capable of 1230 * supporting a physical device on this logical 1231 * unit. 1232 * 1233 * For disks, this implies that there is no 1234 * logical disk configured at sdev->lun, but there 1235 * is a target id responding. 1236 */ 1237 SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:" 1238 " peripheral qualifier of 3, device not" 1239 " added\n")) 1240 if (lun == 0) { 1241 SCSI_LOG_SCAN_BUS(1, { 1242 unsigned char vend[9]; 1243 unsigned char mod[17]; 1244 1245 sdev_printk(KERN_INFO, sdev, 1246 "scsi scan: consider passing scsi_mod." 1247 "dev_flags=%s:%s:0x240 or 0x1000240\n", 1248 scsi_inq_str(vend, result, 8, 16), 1249 scsi_inq_str(mod, result, 16, 32)); 1250 }); 1251 1252 } 1253 1254 res = SCSI_SCAN_TARGET_PRESENT; 1255 goto out_free_result; 1256 } 1257 1258 /* 1259 * Some targets may set slight variations of PQ and PDT to signal 1260 * that no LUN is present, so don't add sdev in these cases. 1261 * Two specific examples are: 1262 * 1) NetApp targets: return PQ=1, PDT=0x1f 1263 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved" 1264 * in the UFI 1.0 spec (we cannot rely on reserved bits). 1265 * 1266 * References: 1267 * 1) SCSI SPC-3, pp. 145-146 1268 * PQ=1: "A peripheral device having the specified peripheral 1269 * device type is not connected to this logical unit. However, the 1270 * device server is capable of supporting the specified peripheral 1271 * device type on this logical unit." 1272 * PDT=0x1f: "Unknown or no device type" 1273 * 2) USB UFI 1.0, p. 20 1274 * PDT=00h Direct-access device (floppy) 1275 * PDT=1Fh none (no FDD connected to the requested logical unit) 1276 */ 1277 if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) && 1278 (result[0] & 0x1f) == 0x1f && 1279 !scsi_is_wlun(lun)) { 1280 SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev, 1281 "scsi scan: peripheral device type" 1282 " of 31, no device added\n")); 1283 res = SCSI_SCAN_TARGET_PRESENT; 1284 goto out_free_result; 1285 } 1286 1287 res = scsi_add_lun(sdev, result, &bflags, shost->async_scan); 1288 if (res == SCSI_SCAN_LUN_PRESENT) { 1289 if (bflags & BLIST_KEY) { 1290 sdev->lockable = 0; 1291 scsi_unlock_floptical(sdev, result); 1292 } 1293 } 1294 1295 out_free_result: 1296 kfree(result); 1297 out_free_sdev: 1298 if (res == SCSI_SCAN_LUN_PRESENT) { 1299 if (sdevp) { 1300 if (scsi_device_get(sdev) == 0) { 1301 *sdevp = sdev; 1302 } else { 1303 __scsi_remove_device(sdev); 1304 res = SCSI_SCAN_NO_RESPONSE; 1305 } 1306 } 1307 } else 1308 __scsi_remove_device(sdev); 1309 out: 1310 return res; 1311 } 1312 1313 /** 1314 * scsi_sequential_lun_scan - sequentially scan a SCSI target 1315 * @starget: pointer to target structure to scan 1316 * @bflags: black/white list flag for LUN 0 1317 * @scsi_level: Which version of the standard does this device adhere to 1318 * @rescan: passed to scsi_probe_add_lun() 1319 * 1320 * Description: 1321 * Generally, scan from LUN 1 (LUN 0 is assumed to already have been 1322 * scanned) to some maximum lun until a LUN is found with no device 1323 * attached. Use the bflags to figure out any oddities. 1324 * 1325 * Modifies sdevscan->lun. 1326 **/ 1327 static void scsi_sequential_lun_scan(struct scsi_target *starget, 1328 blist_flags_t bflags, int scsi_level, 1329 enum scsi_scan_mode rescan) 1330 { 1331 uint max_dev_lun; 1332 u64 sparse_lun, lun; 1333 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); 1334 1335 SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget, 1336 "scsi scan: Sequential scan\n")); 1337 1338 max_dev_lun = min(max_scsi_luns, shost->max_lun); 1339 /* 1340 * If this device is known to support sparse multiple units, 1341 * override the other settings, and scan all of them. Normally, 1342 * SCSI-3 devices should be scanned via the REPORT LUNS. 1343 */ 1344 if (bflags & BLIST_SPARSELUN) { 1345 max_dev_lun = shost->max_lun; 1346 sparse_lun = 1; 1347 } else 1348 sparse_lun = 0; 1349 1350 /* 1351 * If less than SCSI_1_CCS, and no special lun scanning, stop 1352 * scanning; this matches 2.4 behaviour, but could just be a bug 1353 * (to continue scanning a SCSI_1_CCS device). 1354 * 1355 * This test is broken. We might not have any device on lun0 for 1356 * a sparselun device, and if that's the case then how would we 1357 * know the real scsi_level, eh? It might make sense to just not 1358 * scan any SCSI_1 device for non-0 luns, but that check would best 1359 * go into scsi_alloc_sdev() and just have it return null when asked 1360 * to alloc an sdev for lun > 0 on an already found SCSI_1 device. 1361 * 1362 if ((sdevscan->scsi_level < SCSI_1_CCS) && 1363 ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN)) 1364 == 0)) 1365 return; 1366 */ 1367 /* 1368 * If this device is known to support multiple units, override 1369 * the other settings, and scan all of them. 1370 */ 1371 if (bflags & BLIST_FORCELUN) 1372 max_dev_lun = shost->max_lun; 1373 /* 1374 * REGAL CDC-4X: avoid hang after LUN 4 1375 */ 1376 if (bflags & BLIST_MAX5LUN) 1377 max_dev_lun = min(5U, max_dev_lun); 1378 /* 1379 * Do not scan SCSI-2 or lower device past LUN 7, unless 1380 * BLIST_LARGELUN. 1381 */ 1382 if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN)) 1383 max_dev_lun = min(8U, max_dev_lun); 1384 else 1385 max_dev_lun = min(256U, max_dev_lun); 1386 1387 /* 1388 * We have already scanned LUN 0, so start at LUN 1. Keep scanning 1389 * until we reach the max, or no LUN is found and we are not 1390 * sparse_lun. 1391 */ 1392 for (lun = 1; lun < max_dev_lun; ++lun) 1393 if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, 1394 NULL) != SCSI_SCAN_LUN_PRESENT) && 1395 !sparse_lun) 1396 return; 1397 } 1398 1399 /** 1400 * scsi_report_lun_scan - Scan using SCSI REPORT LUN results 1401 * @starget: which target 1402 * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN 1403 * @rescan: nonzero if we can skip code only needed on first scan 1404 * 1405 * Description: 1406 * Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command. 1407 * Scan the resulting list of LUNs by calling scsi_probe_and_add_lun. 1408 * 1409 * If BLINK_REPORTLUN2 is set, scan a target that supports more than 8 1410 * LUNs even if it's older than SCSI-3. 1411 * If BLIST_NOREPORTLUN is set, return 1 always. 1412 * If BLIST_NOLUN is set, return 0 always. 1413 * If starget->no_report_luns is set, return 1 always. 1414 * 1415 * Return: 1416 * 0: scan completed (or no memory, so further scanning is futile) 1417 * 1: could not scan with REPORT LUN 1418 **/ 1419 static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags, 1420 enum scsi_scan_mode rescan) 1421 { 1422 unsigned char scsi_cmd[MAX_COMMAND_SIZE]; 1423 unsigned int length; 1424 u64 lun; 1425 unsigned int num_luns; 1426 int result; 1427 struct scsi_lun *lunp, *lun_data; 1428 struct scsi_device *sdev; 1429 struct Scsi_Host *shost = dev_to_shost(&starget->dev); 1430 struct scsi_failure failure_defs[] = { 1431 { 1432 .sense = UNIT_ATTENTION, 1433 .asc = SCMD_FAILURE_ASC_ANY, 1434 .ascq = SCMD_FAILURE_ASCQ_ANY, 1435 .result = SAM_STAT_CHECK_CONDITION, 1436 }, 1437 /* Fail all CCs except the UA above */ 1438 { 1439 .sense = SCMD_FAILURE_SENSE_ANY, 1440 .result = SAM_STAT_CHECK_CONDITION, 1441 }, 1442 /* Retry any other errors not listed above */ 1443 { 1444 .result = SCMD_FAILURE_RESULT_ANY, 1445 }, 1446 {} 1447 }; 1448 struct scsi_failures failures = { 1449 .total_allowed = 3, 1450 .failure_definitions = failure_defs, 1451 }; 1452 const struct scsi_exec_args exec_args = { 1453 .failures = &failures, 1454 }; 1455 int ret = 0; 1456 1457 /* 1458 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set. 1459 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does 1460 * support more than 8 LUNs. 1461 * Don't attempt if the target doesn't support REPORT LUNS. 1462 */ 1463 if (bflags & BLIST_NOREPORTLUN) 1464 return 1; 1465 if (starget->scsi_level < SCSI_2 && 1466 starget->scsi_level != SCSI_UNKNOWN) 1467 return 1; 1468 if (starget->scsi_level < SCSI_3 && 1469 (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8)) 1470 return 1; 1471 if (bflags & BLIST_NOLUN) 1472 return 0; 1473 if (starget->no_report_luns) 1474 return 1; 1475 1476 if (!(sdev = scsi_device_lookup_by_target(starget, 0))) { 1477 sdev = scsi_alloc_sdev(starget, 0, NULL); 1478 if (!sdev) 1479 return 0; 1480 if (scsi_device_get(sdev)) { 1481 __scsi_remove_device(sdev); 1482 return 0; 1483 } 1484 } 1485 1486 /* 1487 * Allocate enough to hold the header (the same size as one scsi_lun) 1488 * plus the number of luns we are requesting. 511 was the default 1489 * value of the now removed max_report_luns parameter. 1490 */ 1491 length = (511 + 1) * sizeof(struct scsi_lun); 1492 retry: 1493 lun_data = kmalloc(length, GFP_KERNEL); 1494 if (!lun_data) { 1495 printk(ALLOC_FAILURE_MSG, __func__); 1496 goto out; 1497 } 1498 1499 scsi_cmd[0] = REPORT_LUNS; 1500 1501 /* 1502 * bytes 1 - 5: reserved, set to zero. 1503 */ 1504 memset(&scsi_cmd[1], 0, 5); 1505 1506 /* 1507 * bytes 6 - 9: length of the command. 1508 */ 1509 put_unaligned_be32(length, &scsi_cmd[6]); 1510 1511 scsi_cmd[10] = 0; /* reserved */ 1512 scsi_cmd[11] = 0; /* control */ 1513 1514 /* 1515 * We can get a UNIT ATTENTION, for example a power on/reset, so 1516 * retry a few times (like sd.c does for TEST UNIT READY). 1517 * Experience shows some combinations of adapter/devices get at 1518 * least two power on/resets. 1519 * 1520 * Illegal requests (for devices that do not support REPORT LUNS) 1521 * should come through as a check condition, and will not generate 1522 * a retry. 1523 */ 1524 scsi_failures_reset_retries(&failures); 1525 1526 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1527 "scsi scan: Sending REPORT LUNS\n")); 1528 1529 result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data, 1530 length, SCSI_REPORT_LUNS_TIMEOUT, 3, 1531 &exec_args); 1532 1533 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1534 "scsi scan: REPORT LUNS %s result 0x%x\n", 1535 result ? "failed" : "successful", result)); 1536 if (result) { 1537 /* 1538 * The device probably does not support a REPORT LUN command 1539 */ 1540 ret = 1; 1541 goto out_err; 1542 } 1543 1544 /* 1545 * Get the length from the first four bytes of lun_data. 1546 */ 1547 if (get_unaligned_be32(lun_data->scsi_lun) + 1548 sizeof(struct scsi_lun) > length) { 1549 length = get_unaligned_be32(lun_data->scsi_lun) + 1550 sizeof(struct scsi_lun); 1551 kfree(lun_data); 1552 goto retry; 1553 } 1554 length = get_unaligned_be32(lun_data->scsi_lun); 1555 1556 num_luns = (length / sizeof(struct scsi_lun)); 1557 1558 SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev, 1559 "scsi scan: REPORT LUN scan\n")); 1560 1561 /* 1562 * Scan the luns in lun_data. The entry at offset 0 is really 1563 * the header, so start at 1 and go up to and including num_luns. 1564 */ 1565 for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) { 1566 lun = scsilun_to_int(lunp); 1567 1568 if (lun > sdev->host->max_lun) { 1569 sdev_printk(KERN_WARNING, sdev, 1570 "lun%llu has a LUN larger than" 1571 " allowed by the host adapter\n", lun); 1572 } else { 1573 int res; 1574 1575 res = scsi_probe_and_add_lun(starget, 1576 lun, NULL, NULL, rescan, NULL); 1577 if (res == SCSI_SCAN_NO_RESPONSE) { 1578 /* 1579 * Got some results, but now none, abort. 1580 */ 1581 sdev_printk(KERN_ERR, sdev, 1582 "Unexpected response" 1583 " from lun %llu while scanning, scan" 1584 " aborted\n", (unsigned long long)lun); 1585 break; 1586 } 1587 } 1588 } 1589 1590 out_err: 1591 kfree(lun_data); 1592 out: 1593 if (scsi_device_created(sdev)) 1594 /* 1595 * the sdev we used didn't appear in the report luns scan 1596 */ 1597 __scsi_remove_device(sdev); 1598 scsi_device_put(sdev); 1599 return ret; 1600 } 1601 1602 struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel, 1603 uint id, u64 lun, void *hostdata) 1604 { 1605 struct scsi_device *sdev = ERR_PTR(-ENODEV); 1606 struct device *parent = &shost->shost_gendev; 1607 struct scsi_target *starget; 1608 1609 if (strncmp(scsi_scan_type, "none", 4) == 0) 1610 return ERR_PTR(-ENODEV); 1611 1612 starget = scsi_alloc_target(parent, channel, id); 1613 if (!starget) 1614 return ERR_PTR(-ENOMEM); 1615 scsi_autopm_get_target(starget); 1616 1617 mutex_lock(&shost->scan_mutex); 1618 if (!shost->async_scan) 1619 scsi_complete_async_scans(); 1620 1621 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1622 scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1623 SCSI_SCAN_RESCAN, hostdata); 1624 scsi_autopm_put_host(shost); 1625 } 1626 mutex_unlock(&shost->scan_mutex); 1627 scsi_autopm_put_target(starget); 1628 /* 1629 * paired with scsi_alloc_target(). Target will be destroyed unless 1630 * scsi_probe_and_add_lun made an underlying device visible 1631 */ 1632 scsi_target_reap(starget); 1633 put_device(&starget->dev); 1634 1635 return sdev; 1636 } 1637 EXPORT_SYMBOL(__scsi_add_device); 1638 1639 /** 1640 * scsi_add_device - creates a new SCSI (LU) instance 1641 * @host: the &Scsi_Host instance where the device is located 1642 * @channel: target channel number (rarely other than %0) 1643 * @target: target id number 1644 * @lun: LUN of target device 1645 * 1646 * Probe for a specific LUN and add it if found. 1647 * 1648 * Notes: This call is usually performed internally during a SCSI 1649 * bus scan when an HBA is added (i.e. scsi_scan_host()). So it 1650 * should only be called if the HBA becomes aware of a new SCSI 1651 * device (LU) after scsi_scan_host() has completed. If successful 1652 * this call can lead to sdev_init() and sdev_configure() callbacks 1653 * into the LLD. 1654 * 1655 * Return: %0 on success or negative error code on failure 1656 */ 1657 int scsi_add_device(struct Scsi_Host *host, uint channel, 1658 uint target, u64 lun) 1659 { 1660 struct scsi_device *sdev = 1661 __scsi_add_device(host, channel, target, lun, NULL); 1662 if (IS_ERR(sdev)) 1663 return PTR_ERR(sdev); 1664 1665 scsi_device_put(sdev); 1666 return 0; 1667 } 1668 EXPORT_SYMBOL(scsi_add_device); 1669 1670 int scsi_resume_device(struct scsi_device *sdev) 1671 { 1672 struct device *dev = &sdev->sdev_gendev; 1673 int ret = 0; 1674 1675 device_lock(dev); 1676 1677 /* 1678 * Bail out if the device or its queue are not running. Otherwise, 1679 * the rescan may block waiting for commands to be executed, with us 1680 * holding the device lock. This can result in a potential deadlock 1681 * in the power management core code when system resume is on-going. 1682 */ 1683 if (sdev->sdev_state != SDEV_RUNNING || 1684 blk_queue_pm_only(sdev->request_queue)) { 1685 ret = -EWOULDBLOCK; 1686 goto unlock; 1687 } 1688 1689 if (dev->driver && try_module_get(dev->driver->owner)) { 1690 struct scsi_driver *drv = to_scsi_driver(dev->driver); 1691 1692 if (drv->resume) 1693 ret = drv->resume(dev); 1694 module_put(dev->driver->owner); 1695 } 1696 1697 unlock: 1698 device_unlock(dev); 1699 1700 return ret; 1701 } 1702 EXPORT_SYMBOL(scsi_resume_device); 1703 1704 int scsi_rescan_device(struct scsi_device *sdev) 1705 { 1706 struct device *dev = &sdev->sdev_gendev; 1707 int ret = 0; 1708 1709 device_lock(dev); 1710 1711 /* 1712 * Bail out if the device or its queue are not running. Otherwise, 1713 * the rescan may block waiting for commands to be executed, with us 1714 * holding the device lock. This can result in a potential deadlock 1715 * in the power management core code when system resume is on-going. 1716 */ 1717 if (sdev->sdev_state != SDEV_RUNNING || 1718 blk_queue_pm_only(sdev->request_queue)) { 1719 ret = -EWOULDBLOCK; 1720 goto unlock; 1721 } 1722 1723 scsi_attach_vpd(sdev); 1724 scsi_cdl_check(sdev); 1725 1726 if (sdev->handler && sdev->handler->rescan) 1727 sdev->handler->rescan(sdev); 1728 1729 if (dev->driver && try_module_get(dev->driver->owner)) { 1730 struct scsi_driver *drv = to_scsi_driver(dev->driver); 1731 1732 if (drv->rescan) 1733 drv->rescan(dev); 1734 module_put(dev->driver->owner); 1735 } 1736 1737 unlock: 1738 device_unlock(dev); 1739 1740 return ret; 1741 } 1742 EXPORT_SYMBOL(scsi_rescan_device); 1743 1744 static void __scsi_scan_target(struct device *parent, unsigned int channel, 1745 unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1746 { 1747 struct Scsi_Host *shost = dev_to_shost(parent); 1748 blist_flags_t bflags = 0; 1749 int res; 1750 struct scsi_target *starget; 1751 1752 if (shost->this_id == id) 1753 /* 1754 * Don't scan the host adapter 1755 */ 1756 return; 1757 1758 starget = scsi_alloc_target(parent, channel, id); 1759 if (!starget) 1760 return; 1761 scsi_autopm_get_target(starget); 1762 1763 if (lun != SCAN_WILD_CARD) { 1764 /* 1765 * Scan for a specific host/chan/id/lun. 1766 */ 1767 scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL); 1768 goto out_reap; 1769 } 1770 1771 /* 1772 * Scan LUN 0, if there is some response, scan further. Ideally, we 1773 * would not configure LUN 0 until all LUNs are scanned. 1774 */ 1775 res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL); 1776 if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) { 1777 if (scsi_report_lun_scan(starget, bflags, rescan) != 0) 1778 /* 1779 * The REPORT LUN did not scan the target, 1780 * do a sequential scan. 1781 */ 1782 scsi_sequential_lun_scan(starget, bflags, 1783 starget->scsi_level, rescan); 1784 } 1785 1786 out_reap: 1787 scsi_autopm_put_target(starget); 1788 /* 1789 * paired with scsi_alloc_target(): determine if the target has 1790 * any children at all and if not, nuke it 1791 */ 1792 scsi_target_reap(starget); 1793 1794 put_device(&starget->dev); 1795 } 1796 1797 /** 1798 * scsi_scan_target - scan a target id, possibly including all LUNs on the target. 1799 * @parent: host to scan 1800 * @channel: channel to scan 1801 * @id: target id to scan 1802 * @lun: Specific LUN to scan or SCAN_WILD_CARD 1803 * @rescan: passed to LUN scanning routines; SCSI_SCAN_INITIAL for 1804 * no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs, 1805 * and SCSI_SCAN_MANUAL to force scanning even if 1806 * 'scan=manual' is set. 1807 * 1808 * Description: 1809 * Scan the target id on @parent, @channel, and @id. Scan at least LUN 0, 1810 * and possibly all LUNs on the target id. 1811 * 1812 * First try a REPORT LUN scan, if that does not scan the target, do a 1813 * sequential scan of LUNs on the target id. 1814 **/ 1815 void scsi_scan_target(struct device *parent, unsigned int channel, 1816 unsigned int id, u64 lun, enum scsi_scan_mode rescan) 1817 { 1818 struct Scsi_Host *shost = dev_to_shost(parent); 1819 1820 if (strncmp(scsi_scan_type, "none", 4) == 0) 1821 return; 1822 1823 if (rescan != SCSI_SCAN_MANUAL && 1824 strncmp(scsi_scan_type, "manual", 6) == 0) 1825 return; 1826 1827 mutex_lock(&shost->scan_mutex); 1828 if (!shost->async_scan) 1829 scsi_complete_async_scans(); 1830 1831 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1832 __scsi_scan_target(parent, channel, id, lun, rescan); 1833 scsi_autopm_put_host(shost); 1834 } 1835 mutex_unlock(&shost->scan_mutex); 1836 } 1837 EXPORT_SYMBOL(scsi_scan_target); 1838 1839 static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel, 1840 unsigned int id, u64 lun, 1841 enum scsi_scan_mode rescan) 1842 { 1843 uint order_id; 1844 1845 if (id == SCAN_WILD_CARD) 1846 for (id = 0; id < shost->max_id; ++id) { 1847 /* 1848 * XXX adapter drivers when possible (FCP, iSCSI) 1849 * could modify max_id to match the current max, 1850 * not the absolute max. 1851 * 1852 * XXX add a shost id iterator, so for example, 1853 * the FC ID can be the same as a target id 1854 * without a huge overhead of sparse id's. 1855 */ 1856 if (shost->reverse_ordering) 1857 /* 1858 * Scan from high to low id. 1859 */ 1860 order_id = shost->max_id - id - 1; 1861 else 1862 order_id = id; 1863 __scsi_scan_target(&shost->shost_gendev, channel, 1864 order_id, lun, rescan); 1865 } 1866 else 1867 __scsi_scan_target(&shost->shost_gendev, channel, 1868 id, lun, rescan); 1869 } 1870 1871 int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel, 1872 unsigned int id, u64 lun, 1873 enum scsi_scan_mode rescan) 1874 { 1875 SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost, 1876 "%s: <%u:%u:%llu>\n", 1877 __func__, channel, id, lun)); 1878 1879 if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) || 1880 ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) || 1881 ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun))) 1882 return -EINVAL; 1883 1884 mutex_lock(&shost->scan_mutex); 1885 if (!shost->async_scan) 1886 scsi_complete_async_scans(); 1887 1888 if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) { 1889 if (channel == SCAN_WILD_CARD) 1890 for (channel = 0; channel <= shost->max_channel; 1891 channel++) 1892 scsi_scan_channel(shost, channel, id, lun, 1893 rescan); 1894 else 1895 scsi_scan_channel(shost, channel, id, lun, rescan); 1896 scsi_autopm_put_host(shost); 1897 } 1898 mutex_unlock(&shost->scan_mutex); 1899 1900 return 0; 1901 } 1902 1903 static void scsi_sysfs_add_devices(struct Scsi_Host *shost) 1904 { 1905 struct scsi_device *sdev; 1906 shost_for_each_device(sdev, shost) { 1907 /* target removed before the device could be added */ 1908 if (sdev->sdev_state == SDEV_DEL) 1909 continue; 1910 /* If device is already visible, skip adding it to sysfs */ 1911 if (sdev->is_visible) 1912 continue; 1913 if (!scsi_host_scan_allowed(shost) || 1914 scsi_sysfs_add_sdev(sdev) != 0) 1915 __scsi_remove_device(sdev); 1916 } 1917 } 1918 1919 /** 1920 * scsi_prep_async_scan - prepare for an async scan 1921 * @shost: the host which will be scanned 1922 * Returns: a cookie to be passed to scsi_finish_async_scan() 1923 * 1924 * Tells the midlayer this host is going to do an asynchronous scan. 1925 * It reserves the host's position in the scanning list and ensures 1926 * that other asynchronous scans started after this one won't affect the 1927 * ordering of the discovered devices. 1928 */ 1929 static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost) 1930 { 1931 struct async_scan_data *data = NULL; 1932 unsigned long flags; 1933 1934 if (strncmp(scsi_scan_type, "sync", 4) == 0) 1935 return NULL; 1936 1937 mutex_lock(&shost->scan_mutex); 1938 if (shost->async_scan) { 1939 shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__); 1940 goto err; 1941 } 1942 1943 data = kmalloc(sizeof(*data), GFP_KERNEL); 1944 if (!data) 1945 goto err; 1946 data->shost = scsi_host_get(shost); 1947 if (!data->shost) 1948 goto err; 1949 init_completion(&data->prev_finished); 1950 1951 spin_lock_irqsave(shost->host_lock, flags); 1952 shost->async_scan = 1; 1953 spin_unlock_irqrestore(shost->host_lock, flags); 1954 mutex_unlock(&shost->scan_mutex); 1955 1956 spin_lock(&async_scan_lock); 1957 if (list_empty(&scanning_hosts)) 1958 complete(&data->prev_finished); 1959 list_add_tail(&data->list, &scanning_hosts); 1960 spin_unlock(&async_scan_lock); 1961 1962 return data; 1963 1964 err: 1965 mutex_unlock(&shost->scan_mutex); 1966 kfree(data); 1967 return NULL; 1968 } 1969 1970 /** 1971 * scsi_finish_async_scan - asynchronous scan has finished 1972 * @data: cookie returned from earlier call to scsi_prep_async_scan() 1973 * 1974 * All the devices currently attached to this host have been found. 1975 * This function announces all the devices it has found to the rest 1976 * of the system. 1977 */ 1978 static void scsi_finish_async_scan(struct async_scan_data *data) 1979 { 1980 struct Scsi_Host *shost; 1981 unsigned long flags; 1982 1983 if (!data) 1984 return; 1985 1986 shost = data->shost; 1987 1988 mutex_lock(&shost->scan_mutex); 1989 1990 if (!shost->async_scan) { 1991 shost_printk(KERN_INFO, shost, "%s called twice\n", __func__); 1992 dump_stack(); 1993 mutex_unlock(&shost->scan_mutex); 1994 return; 1995 } 1996 1997 wait_for_completion(&data->prev_finished); 1998 1999 scsi_sysfs_add_devices(shost); 2000 2001 spin_lock_irqsave(shost->host_lock, flags); 2002 shost->async_scan = 0; 2003 spin_unlock_irqrestore(shost->host_lock, flags); 2004 2005 mutex_unlock(&shost->scan_mutex); 2006 2007 spin_lock(&async_scan_lock); 2008 list_del(&data->list); 2009 if (!list_empty(&scanning_hosts)) { 2010 struct async_scan_data *next = list_entry(scanning_hosts.next, 2011 struct async_scan_data, list); 2012 complete(&next->prev_finished); 2013 } 2014 spin_unlock(&async_scan_lock); 2015 2016 scsi_autopm_put_host(shost); 2017 scsi_host_put(shost); 2018 kfree(data); 2019 } 2020 2021 static void do_scsi_scan_host(struct Scsi_Host *shost) 2022 { 2023 if (shost->hostt->scan_finished) { 2024 unsigned long start = jiffies; 2025 if (shost->hostt->scan_start) 2026 shost->hostt->scan_start(shost); 2027 2028 while (!shost->hostt->scan_finished(shost, jiffies - start)) 2029 msleep(10); 2030 } else { 2031 scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD, 2032 SCAN_WILD_CARD, SCSI_SCAN_INITIAL); 2033 } 2034 } 2035 2036 static void do_scan_async(void *_data, async_cookie_t c) 2037 { 2038 struct async_scan_data *data = _data; 2039 struct Scsi_Host *shost = data->shost; 2040 2041 do_scsi_scan_host(shost); 2042 scsi_finish_async_scan(data); 2043 } 2044 2045 /** 2046 * scsi_scan_host - scan the given adapter 2047 * @shost: adapter to scan 2048 * 2049 * Notes: Should be called after scsi_add_host() 2050 **/ 2051 void scsi_scan_host(struct Scsi_Host *shost) 2052 { 2053 struct async_scan_data *data; 2054 2055 if (strncmp(scsi_scan_type, "none", 4) == 0 || 2056 strncmp(scsi_scan_type, "manual", 6) == 0) 2057 return; 2058 if (scsi_autopm_get_host(shost) < 0) 2059 return; 2060 2061 data = scsi_prep_async_scan(shost); 2062 if (!data) { 2063 do_scsi_scan_host(shost); 2064 scsi_autopm_put_host(shost); 2065 return; 2066 } 2067 2068 /* register with the async subsystem so wait_for_device_probe() 2069 * will flush this work 2070 */ 2071 async_schedule(do_scan_async, data); 2072 2073 /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ 2074 } 2075 EXPORT_SYMBOL(scsi_scan_host); 2076 2077 void scsi_forget_host(struct Scsi_Host *shost) 2078 { 2079 struct scsi_device *sdev; 2080 unsigned long flags; 2081 2082 restart: 2083 spin_lock_irqsave(shost->host_lock, flags); 2084 list_for_each_entry(sdev, &shost->__devices, siblings) { 2085 if (sdev->sdev_state == SDEV_DEL) 2086 continue; 2087 spin_unlock_irqrestore(shost->host_lock, flags); 2088 __scsi_remove_device(sdev); 2089 goto restart; 2090 } 2091 spin_unlock_irqrestore(shost->host_lock, flags); 2092 } 2093 2094