1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * scsi_scan.c
4   *
5   * Copyright (C) 2000 Eric Youngdale,
6   * Copyright (C) 2002 Patrick Mansfield
7   *
8   * The general scanning/probing algorithm is as follows, exceptions are
9   * made to it depending on device specific flags, compilation options, and
10   * global variable (boot or module load time) settings.
11   *
12   * A specific LUN is scanned via an INQUIRY command; if the LUN has a
13   * device attached, a scsi_device is allocated and setup for it.
14   *
15   * For every id of every channel on the given host:
16   *
17   * 	Scan LUN 0; if the target responds to LUN 0 (even if there is no
18   * 	device or storage attached to LUN 0):
19   *
20   * 		If LUN 0 has a device attached, allocate and setup a
21   * 		scsi_device for it.
22   *
23   * 		If target is SCSI-3 or up, issue a REPORT LUN, and scan
24   * 		all of the LUNs returned by the REPORT LUN; else,
25   * 		sequentially scan LUNs up until some maximum is reached,
26   * 		or a LUN is seen that cannot have a device attached to it.
27   */
28  
29  #include <linux/module.h>
30  #include <linux/moduleparam.h>
31  #include <linux/init.h>
32  #include <linux/blkdev.h>
33  #include <linux/delay.h>
34  #include <linux/kthread.h>
35  #include <linux/spinlock.h>
36  #include <linux/async.h>
37  #include <linux/slab.h>
38  #include <linux/unaligned.h>
39  
40  #include <scsi/scsi.h>
41  #include <scsi/scsi_cmnd.h>
42  #include <scsi/scsi_device.h>
43  #include <scsi/scsi_driver.h>
44  #include <scsi/scsi_devinfo.h>
45  #include <scsi/scsi_host.h>
46  #include <scsi/scsi_transport.h>
47  #include <scsi/scsi_dh.h>
48  #include <scsi/scsi_eh.h>
49  
50  #include "scsi_priv.h"
51  #include "scsi_logging.h"
52  
53  #define ALLOC_FAILURE_MSG	KERN_ERR "%s: Allocation failure during" \
54  	" SCSI scanning, some SCSI devices might not be configured\n"
55  
56  /*
57   * Default timeout
58   */
59  #define SCSI_TIMEOUT (2*HZ)
60  #define SCSI_REPORT_LUNS_TIMEOUT (30*HZ)
61  
62  /*
63   * Prefix values for the SCSI id's (stored in sysfs name field)
64   */
65  #define SCSI_UID_SER_NUM 'S'
66  #define SCSI_UID_UNKNOWN 'Z'
67  
68  /*
69   * Return values of some of the scanning functions.
70   *
71   * SCSI_SCAN_NO_RESPONSE: no valid response received from the target, this
72   * includes allocation or general failures preventing IO from being sent.
73   *
74   * SCSI_SCAN_TARGET_PRESENT: target responded, but no device is available
75   * on the given LUN.
76   *
77   * SCSI_SCAN_LUN_PRESENT: target responded, and a device is available on a
78   * given LUN.
79   */
80  #define SCSI_SCAN_NO_RESPONSE		0
81  #define SCSI_SCAN_TARGET_PRESENT	1
82  #define SCSI_SCAN_LUN_PRESENT		2
83  
84  static const char *scsi_null_device_strs = "nullnullnullnull";
85  
86  #define MAX_SCSI_LUNS	512
87  
88  static u64 max_scsi_luns = MAX_SCSI_LUNS;
89  
90  module_param_named(max_luns, max_scsi_luns, ullong, S_IRUGO|S_IWUSR);
91  MODULE_PARM_DESC(max_luns,
92  		 "last scsi LUN (should be between 1 and 2^64-1)");
93  
94  #ifdef CONFIG_SCSI_SCAN_ASYNC
95  #define SCSI_SCAN_TYPE_DEFAULT "async"
96  #else
97  #define SCSI_SCAN_TYPE_DEFAULT "sync"
98  #endif
99  
100  static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
101  
102  module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
103  		    S_IRUGO|S_IWUSR);
104  MODULE_PARM_DESC(scan, "sync, async, manual, or none. "
105  		 "Setting to 'manual' disables automatic scanning, but allows "
106  		 "for manual device scan via the 'scan' sysfs attribute.");
107  
108  static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
109  
110  module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
111  MODULE_PARM_DESC(inq_timeout,
112  		 "Timeout (in seconds) waiting for devices to answer INQUIRY."
113  		 " Default is 20. Some devices may need more; most need less.");
114  
115  /* This lock protects only this list */
116  static DEFINE_SPINLOCK(async_scan_lock);
117  static LIST_HEAD(scanning_hosts);
118  
119  struct async_scan_data {
120  	struct list_head list;
121  	struct Scsi_Host *shost;
122  	struct completion prev_finished;
123  };
124  
125  /*
126   * scsi_enable_async_suspend - Enable async suspend and resume
127   */
scsi_enable_async_suspend(struct device * dev)128  void scsi_enable_async_suspend(struct device *dev)
129  {
130  	/*
131  	 * If a user has disabled async probing a likely reason is due to a
132  	 * storage enclosure that does not inject staggered spin-ups. For
133  	 * safety, make resume synchronous as well in that case.
134  	 */
135  	if (strncmp(scsi_scan_type, "async", 5) != 0)
136  		return;
137  	/* Enable asynchronous suspend and resume. */
138  	device_enable_async_suspend(dev);
139  }
140  
141  /**
142   * scsi_complete_async_scans - Wait for asynchronous scans to complete
143   *
144   * When this function returns, any host which started scanning before
145   * this function was called will have finished its scan.  Hosts which
146   * started scanning after this function was called may or may not have
147   * finished.
148   */
scsi_complete_async_scans(void)149  int scsi_complete_async_scans(void)
150  {
151  	struct async_scan_data *data;
152  
153  	do {
154  		if (list_empty(&scanning_hosts))
155  			return 0;
156  		/* If we can't get memory immediately, that's OK.  Just
157  		 * sleep a little.  Even if we never get memory, the async
158  		 * scans will finish eventually.
159  		 */
160  		data = kmalloc(sizeof(*data), GFP_KERNEL);
161  		if (!data)
162  			msleep(1);
163  	} while (!data);
164  
165  	data->shost = NULL;
166  	init_completion(&data->prev_finished);
167  
168  	spin_lock(&async_scan_lock);
169  	/* Check that there's still somebody else on the list */
170  	if (list_empty(&scanning_hosts))
171  		goto done;
172  	list_add_tail(&data->list, &scanning_hosts);
173  	spin_unlock(&async_scan_lock);
174  
175  	printk(KERN_INFO "scsi: waiting for bus probes to complete ...\n");
176  	wait_for_completion(&data->prev_finished);
177  
178  	spin_lock(&async_scan_lock);
179  	list_del(&data->list);
180  	if (!list_empty(&scanning_hosts)) {
181  		struct async_scan_data *next = list_entry(scanning_hosts.next,
182  				struct async_scan_data, list);
183  		complete(&next->prev_finished);
184  	}
185   done:
186  	spin_unlock(&async_scan_lock);
187  
188  	kfree(data);
189  	return 0;
190  }
191  
192  /**
193   * scsi_unlock_floptical - unlock device via a special MODE SENSE command
194   * @sdev:	scsi device to send command to
195   * @result:	area to store the result of the MODE SENSE
196   *
197   * Description:
198   *     Send a vendor specific MODE SENSE (not a MODE SELECT) command.
199   *     Called for BLIST_KEY devices.
200   **/
scsi_unlock_floptical(struct scsi_device * sdev,unsigned char * result)201  static void scsi_unlock_floptical(struct scsi_device *sdev,
202  				  unsigned char *result)
203  {
204  	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
205  
206  	sdev_printk(KERN_NOTICE, sdev, "unlocking floptical drive\n");
207  	scsi_cmd[0] = MODE_SENSE;
208  	scsi_cmd[1] = 0;
209  	scsi_cmd[2] = 0x2e;
210  	scsi_cmd[3] = 0;
211  	scsi_cmd[4] = 0x2a;     /* size */
212  	scsi_cmd[5] = 0;
213  	scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
214  			 SCSI_TIMEOUT, 3, NULL);
215  }
216  
scsi_realloc_sdev_budget_map(struct scsi_device * sdev,unsigned int depth)217  static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
218  					unsigned int depth)
219  {
220  	int new_shift = sbitmap_calculate_shift(depth);
221  	bool need_alloc = !sdev->budget_map.map;
222  	bool need_free = false;
223  	int ret;
224  	struct sbitmap sb_backup;
225  
226  	depth = min_t(unsigned int, depth, scsi_device_max_queue_depth(sdev));
227  
228  	/*
229  	 * realloc if new shift is calculated, which is caused by setting
230  	 * up one new default queue depth after calling ->device_configure
231  	 */
232  	if (!need_alloc && new_shift != sdev->budget_map.shift)
233  		need_alloc = need_free = true;
234  
235  	if (!need_alloc)
236  		return 0;
237  
238  	/*
239  	 * Request queue has to be frozen for reallocating budget map,
240  	 * and here disk isn't added yet, so freezing is pretty fast
241  	 */
242  	if (need_free) {
243  		blk_mq_freeze_queue(sdev->request_queue);
244  		sb_backup = sdev->budget_map;
245  	}
246  	ret = sbitmap_init_node(&sdev->budget_map,
247  				scsi_device_max_queue_depth(sdev),
248  				new_shift, GFP_KERNEL,
249  				sdev->request_queue->node, false, true);
250  	if (!ret)
251  		sbitmap_resize(&sdev->budget_map, depth);
252  
253  	if (need_free) {
254  		if (ret)
255  			sdev->budget_map = sb_backup;
256  		else
257  			sbitmap_free(&sb_backup);
258  		ret = 0;
259  		blk_mq_unfreeze_queue(sdev->request_queue);
260  	}
261  	return ret;
262  }
263  
264  /**
265   * scsi_alloc_sdev - allocate and setup a scsi_Device
266   * @starget: which target to allocate a &scsi_device for
267   * @lun: which lun
268   * @hostdata: usually NULL and set by ->slave_alloc instead
269   *
270   * Description:
271   *     Allocate, initialize for io, and return a pointer to a scsi_Device.
272   *     Stores the @shost, @channel, @id, and @lun in the scsi_Device, and
273   *     adds scsi_Device to the appropriate list.
274   *
275   * Return value:
276   *     scsi_Device pointer, or NULL on failure.
277   **/
scsi_alloc_sdev(struct scsi_target * starget,u64 lun,void * hostdata)278  static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
279  					   u64 lun, void *hostdata)
280  {
281  	unsigned int depth;
282  	struct scsi_device *sdev;
283  	struct request_queue *q;
284  	int display_failure_msg = 1, ret;
285  	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
286  	struct queue_limits lim;
287  
288  	sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
289  		       GFP_KERNEL);
290  	if (!sdev)
291  		goto out;
292  
293  	sdev->vendor = scsi_null_device_strs;
294  	sdev->model = scsi_null_device_strs;
295  	sdev->rev = scsi_null_device_strs;
296  	sdev->host = shost;
297  	sdev->queue_ramp_up_period = SCSI_DEFAULT_RAMP_UP_PERIOD;
298  	sdev->id = starget->id;
299  	sdev->lun = lun;
300  	sdev->channel = starget->channel;
301  	mutex_init(&sdev->state_mutex);
302  	sdev->sdev_state = SDEV_CREATED;
303  	INIT_LIST_HEAD(&sdev->siblings);
304  	INIT_LIST_HEAD(&sdev->same_target_siblings);
305  	INIT_LIST_HEAD(&sdev->starved_entry);
306  	INIT_LIST_HEAD(&sdev->event_list);
307  	spin_lock_init(&sdev->list_lock);
308  	mutex_init(&sdev->inquiry_mutex);
309  	INIT_WORK(&sdev->event_work, scsi_evt_thread);
310  	INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
311  
312  	sdev->sdev_gendev.parent = get_device(&starget->dev);
313  	sdev->sdev_target = starget;
314  
315  	/* usually NULL and set by ->slave_alloc instead */
316  	sdev->hostdata = hostdata;
317  
318  	/* if the device needs this changing, it may do so in the
319  	 * slave_configure function */
320  	sdev->max_device_blocked = SCSI_DEFAULT_DEVICE_BLOCKED;
321  
322  	/*
323  	 * Some low level driver could use device->type
324  	 */
325  	sdev->type = -1;
326  
327  	/*
328  	 * Assume that the device will have handshaking problems,
329  	 * and then fix this field later if it turns out it
330  	 * doesn't
331  	 */
332  	sdev->borken = 1;
333  
334  	sdev->sg_reserved_size = INT_MAX;
335  
336  	scsi_init_limits(shost, &lim);
337  	q = blk_mq_alloc_queue(&sdev->host->tag_set, &lim, sdev);
338  	if (IS_ERR(q)) {
339  		/* release fn is set up in scsi_sysfs_device_initialise, so
340  		 * have to free and put manually here */
341  		put_device(&starget->dev);
342  		kfree(sdev);
343  		goto out;
344  	}
345  	kref_get(&sdev->host->tagset_refcnt);
346  	sdev->request_queue = q;
347  
348  	depth = sdev->host->cmd_per_lun ?: 1;
349  
350  	/*
351  	 * Use .can_queue as budget map's depth because we have to
352  	 * support adjusting queue depth from sysfs. Meantime use
353  	 * default device queue depth to figure out sbitmap shift
354  	 * since we use this queue depth most of times.
355  	 */
356  	if (scsi_realloc_sdev_budget_map(sdev, depth)) {
357  		put_device(&starget->dev);
358  		kfree(sdev);
359  		goto out;
360  	}
361  
362  	scsi_change_queue_depth(sdev, depth);
363  
364  	scsi_sysfs_device_initialize(sdev);
365  
366  	if (shost->hostt->slave_alloc) {
367  		ret = shost->hostt->slave_alloc(sdev);
368  		if (ret) {
369  			/*
370  			 * if LLDD reports slave not present, don't clutter
371  			 * console with alloc failure messages
372  			 */
373  			if (ret == -ENXIO)
374  				display_failure_msg = 0;
375  			goto out_device_destroy;
376  		}
377  	}
378  
379  	return sdev;
380  
381  out_device_destroy:
382  	__scsi_remove_device(sdev);
383  out:
384  	if (display_failure_msg)
385  		printk(ALLOC_FAILURE_MSG, __func__);
386  	return NULL;
387  }
388  
scsi_target_destroy(struct scsi_target * starget)389  static void scsi_target_destroy(struct scsi_target *starget)
390  {
391  	struct device *dev = &starget->dev;
392  	struct Scsi_Host *shost = dev_to_shost(dev->parent);
393  	unsigned long flags;
394  
395  	BUG_ON(starget->state == STARGET_DEL);
396  	starget->state = STARGET_DEL;
397  	transport_destroy_device(dev);
398  	spin_lock_irqsave(shost->host_lock, flags);
399  	if (shost->hostt->target_destroy)
400  		shost->hostt->target_destroy(starget);
401  	list_del_init(&starget->siblings);
402  	spin_unlock_irqrestore(shost->host_lock, flags);
403  	put_device(dev);
404  }
405  
scsi_target_dev_release(struct device * dev)406  static void scsi_target_dev_release(struct device *dev)
407  {
408  	struct device *parent = dev->parent;
409  	struct scsi_target *starget = to_scsi_target(dev);
410  
411  	kfree(starget);
412  	put_device(parent);
413  }
414  
415  static const struct device_type scsi_target_type = {
416  	.name =		"scsi_target",
417  	.release =	scsi_target_dev_release,
418  };
419  
scsi_is_target_device(const struct device * dev)420  int scsi_is_target_device(const struct device *dev)
421  {
422  	return dev->type == &scsi_target_type;
423  }
424  EXPORT_SYMBOL(scsi_is_target_device);
425  
__scsi_find_target(struct device * parent,int channel,uint id)426  static struct scsi_target *__scsi_find_target(struct device *parent,
427  					      int channel, uint id)
428  {
429  	struct scsi_target *starget, *found_starget = NULL;
430  	struct Scsi_Host *shost = dev_to_shost(parent);
431  	/*
432  	 * Search for an existing target for this sdev.
433  	 */
434  	list_for_each_entry(starget, &shost->__targets, siblings) {
435  		if (starget->id == id &&
436  		    starget->channel == channel) {
437  			found_starget = starget;
438  			break;
439  		}
440  	}
441  	if (found_starget)
442  		get_device(&found_starget->dev);
443  
444  	return found_starget;
445  }
446  
447  /**
448   * scsi_target_reap_ref_release - remove target from visibility
449   * @kref: the reap_ref in the target being released
450   *
451   * Called on last put of reap_ref, which is the indication that no device
452   * under this target is visible anymore, so render the target invisible in
453   * sysfs.  Note: we have to be in user context here because the target reaps
454   * should be done in places where the scsi device visibility is being removed.
455   */
scsi_target_reap_ref_release(struct kref * kref)456  static void scsi_target_reap_ref_release(struct kref *kref)
457  {
458  	struct scsi_target *starget
459  		= container_of(kref, struct scsi_target, reap_ref);
460  
461  	/*
462  	 * if we get here and the target is still in a CREATED state that
463  	 * means it was allocated but never made visible (because a scan
464  	 * turned up no LUNs), so don't call device_del() on it.
465  	 */
466  	if ((starget->state != STARGET_CREATED) &&
467  	    (starget->state != STARGET_CREATED_REMOVE)) {
468  		transport_remove_device(&starget->dev);
469  		device_del(&starget->dev);
470  	}
471  	scsi_target_destroy(starget);
472  }
473  
scsi_target_reap_ref_put(struct scsi_target * starget)474  static void scsi_target_reap_ref_put(struct scsi_target *starget)
475  {
476  	kref_put(&starget->reap_ref, scsi_target_reap_ref_release);
477  }
478  
479  /**
480   * scsi_alloc_target - allocate a new or find an existing target
481   * @parent:	parent of the target (need not be a scsi host)
482   * @channel:	target channel number (zero if no channels)
483   * @id:		target id number
484   *
485   * Return an existing target if one exists, provided it hasn't already
486   * gone into STARGET_DEL state, otherwise allocate a new target.
487   *
488   * The target is returned with an incremented reference, so the caller
489   * is responsible for both reaping and doing a last put
490   */
scsi_alloc_target(struct device * parent,int channel,uint id)491  static struct scsi_target *scsi_alloc_target(struct device *parent,
492  					     int channel, uint id)
493  {
494  	struct Scsi_Host *shost = dev_to_shost(parent);
495  	struct device *dev = NULL;
496  	unsigned long flags;
497  	const int size = sizeof(struct scsi_target)
498  		+ shost->transportt->target_size;
499  	struct scsi_target *starget;
500  	struct scsi_target *found_target;
501  	int error, ref_got;
502  
503  	starget = kzalloc(size, GFP_KERNEL);
504  	if (!starget) {
505  		printk(KERN_ERR "%s: allocation failure\n", __func__);
506  		return NULL;
507  	}
508  	dev = &starget->dev;
509  	device_initialize(dev);
510  	kref_init(&starget->reap_ref);
511  	dev->parent = get_device(parent);
512  	dev_set_name(dev, "target%d:%d:%d", shost->host_no, channel, id);
513  	dev->bus = &scsi_bus_type;
514  	dev->type = &scsi_target_type;
515  	scsi_enable_async_suspend(dev);
516  	starget->id = id;
517  	starget->channel = channel;
518  	starget->can_queue = 0;
519  	INIT_LIST_HEAD(&starget->siblings);
520  	INIT_LIST_HEAD(&starget->devices);
521  	starget->state = STARGET_CREATED;
522  	starget->scsi_level = SCSI_2;
523  	starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
524   retry:
525  	spin_lock_irqsave(shost->host_lock, flags);
526  
527  	found_target = __scsi_find_target(parent, channel, id);
528  	if (found_target)
529  		goto found;
530  
531  	list_add_tail(&starget->siblings, &shost->__targets);
532  	spin_unlock_irqrestore(shost->host_lock, flags);
533  	/* allocate and add */
534  	transport_setup_device(dev);
535  	if (shost->hostt->target_alloc) {
536  		error = shost->hostt->target_alloc(starget);
537  
538  		if(error) {
539  			if (error != -ENXIO)
540  				dev_err(dev, "target allocation failed, error %d\n", error);
541  			/* don't want scsi_target_reap to do the final
542  			 * put because it will be under the host lock */
543  			scsi_target_destroy(starget);
544  			return NULL;
545  		}
546  	}
547  	get_device(dev);
548  
549  	return starget;
550  
551   found:
552  	/*
553  	 * release routine already fired if kref is zero, so if we can still
554  	 * take the reference, the target must be alive.  If we can't, it must
555  	 * be dying and we need to wait for a new target
556  	 */
557  	ref_got = kref_get_unless_zero(&found_target->reap_ref);
558  
559  	spin_unlock_irqrestore(shost->host_lock, flags);
560  	if (ref_got) {
561  		put_device(dev);
562  		return found_target;
563  	}
564  	/*
565  	 * Unfortunately, we found a dying target; need to wait until it's
566  	 * dead before we can get a new one.  There is an anomaly here.  We
567  	 * *should* call scsi_target_reap() to balance the kref_get() of the
568  	 * reap_ref above.  However, since the target being released, it's
569  	 * already invisible and the reap_ref is irrelevant.  If we call
570  	 * scsi_target_reap() we might spuriously do another device_del() on
571  	 * an already invisible target.
572  	 */
573  	put_device(&found_target->dev);
574  	/*
575  	 * length of time is irrelevant here, we just want to yield the CPU
576  	 * for a tick to avoid busy waiting for the target to die.
577  	 */
578  	msleep(1);
579  	goto retry;
580  }
581  
582  /**
583   * scsi_target_reap - check to see if target is in use and destroy if not
584   * @starget: target to be checked
585   *
586   * This is used after removing a LUN or doing a last put of the target
587   * it checks atomically that nothing is using the target and removes
588   * it if so.
589   */
scsi_target_reap(struct scsi_target * starget)590  void scsi_target_reap(struct scsi_target *starget)
591  {
592  	/*
593  	 * serious problem if this triggers: STARGET_DEL is only set in the if
594  	 * the reap_ref drops to zero, so we're trying to do another final put
595  	 * on an already released kref
596  	 */
597  	BUG_ON(starget->state == STARGET_DEL);
598  	scsi_target_reap_ref_put(starget);
599  }
600  
601  /**
602   * scsi_sanitize_inquiry_string - remove non-graphical chars from an
603   *                                INQUIRY result string
604   * @s: INQUIRY result string to sanitize
605   * @len: length of the string
606   *
607   * Description:
608   *	The SCSI spec says that INQUIRY vendor, product, and revision
609   *	strings must consist entirely of graphic ASCII characters,
610   *	padded on the right with spaces.  Since not all devices obey
611   *	this rule, we will replace non-graphic or non-ASCII characters
612   *	with spaces.  Exception: a NUL character is interpreted as a
613   *	string terminator, so all the following characters are set to
614   *	spaces.
615   **/
scsi_sanitize_inquiry_string(unsigned char * s,int len)616  void scsi_sanitize_inquiry_string(unsigned char *s, int len)
617  {
618  	int terminated = 0;
619  
620  	for (; len > 0; (--len, ++s)) {
621  		if (*s == 0)
622  			terminated = 1;
623  		if (terminated || *s < 0x20 || *s > 0x7e)
624  			*s = ' ';
625  	}
626  }
627  EXPORT_SYMBOL(scsi_sanitize_inquiry_string);
628  
629  
630  /**
631   * scsi_probe_lun - probe a single LUN using a SCSI INQUIRY
632   * @sdev:	scsi_device to probe
633   * @inq_result:	area to store the INQUIRY result
634   * @result_len: len of inq_result
635   * @bflags:	store any bflags found here
636   *
637   * Description:
638   *     Probe the lun associated with @req using a standard SCSI INQUIRY;
639   *
640   *     If the INQUIRY is successful, zero is returned and the
641   *     INQUIRY data is in @inq_result; the scsi_level and INQUIRY length
642   *     are copied to the scsi_device any flags value is stored in *@bflags.
643   **/
scsi_probe_lun(struct scsi_device * sdev,unsigned char * inq_result,int result_len,blist_flags_t * bflags)644  static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
645  			  int result_len, blist_flags_t *bflags)
646  {
647  	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
648  	int first_inquiry_len, try_inquiry_len, next_inquiry_len;
649  	int response_len = 0;
650  	int pass, count, result, resid;
651  	struct scsi_failure failure_defs[] = {
652  		/*
653  		 * not-ready to ready transition [asc/ascq=0x28/0x0] or
654  		 * power-on, reset [asc/ascq=0x29/0x0], continue. INQUIRY
655  		 * should not yield UNIT_ATTENTION but many buggy devices do
656  		 * so anyway.
657  		 */
658  		{
659  			.sense = UNIT_ATTENTION,
660  			.asc = 0x28,
661  			.result = SAM_STAT_CHECK_CONDITION,
662  		},
663  		{
664  			.sense = UNIT_ATTENTION,
665  			.asc = 0x29,
666  			.result = SAM_STAT_CHECK_CONDITION,
667  		},
668  		{
669  			.allowed = 1,
670  			.result = DID_TIME_OUT << 16,
671  		},
672  		{}
673  	};
674  	struct scsi_failures failures = {
675  		.total_allowed = 3,
676  		.failure_definitions = failure_defs,
677  	};
678  	const struct scsi_exec_args exec_args = {
679  		.resid = &resid,
680  		.failures = &failures,
681  	};
682  
683  	*bflags = 0;
684  
685  	/* Perform up to 3 passes.  The first pass uses a conservative
686  	 * transfer length of 36 unless sdev->inquiry_len specifies a
687  	 * different value. */
688  	first_inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36;
689  	try_inquiry_len = first_inquiry_len;
690  	pass = 1;
691  
692   next_pass:
693  	SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
694  				"scsi scan: INQUIRY pass %d length %d\n",
695  				pass, try_inquiry_len));
696  
697  	/* Each pass gets up to three chances to ignore Unit Attention */
698  	scsi_failures_reset_retries(&failures);
699  
700  	for (count = 0; count < 3; ++count) {
701  		memset(scsi_cmd, 0, 6);
702  		scsi_cmd[0] = INQUIRY;
703  		scsi_cmd[4] = (unsigned char) try_inquiry_len;
704  
705  		memset(inq_result, 0, try_inquiry_len);
706  
707  		result = scsi_execute_cmd(sdev,  scsi_cmd, REQ_OP_DRV_IN,
708  					  inq_result, try_inquiry_len,
709  					  HZ / 2 + HZ * scsi_inq_timeout, 3,
710  					  &exec_args);
711  
712  		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
713  				"scsi scan: INQUIRY %s with code 0x%x\n",
714  				result ? "failed" : "successful", result));
715  
716  		if (result == 0) {
717  			/*
718  			 * if nothing was transferred, we try
719  			 * again. It's a workaround for some USB
720  			 * devices.
721  			 */
722  			if (resid == try_inquiry_len)
723  				continue;
724  		}
725  		break;
726  	}
727  
728  	if (result == 0) {
729  		scsi_sanitize_inquiry_string(&inq_result[8], 8);
730  		scsi_sanitize_inquiry_string(&inq_result[16], 16);
731  		scsi_sanitize_inquiry_string(&inq_result[32], 4);
732  
733  		response_len = inq_result[4] + 5;
734  		if (response_len > 255)
735  			response_len = first_inquiry_len;	/* sanity */
736  
737  		/*
738  		 * Get any flags for this device.
739  		 *
740  		 * XXX add a bflags to scsi_device, and replace the
741  		 * corresponding bit fields in scsi_device, so bflags
742  		 * need not be passed as an argument.
743  		 */
744  		*bflags = scsi_get_device_flags(sdev, &inq_result[8],
745  				&inq_result[16]);
746  
747  		/* When the first pass succeeds we gain information about
748  		 * what larger transfer lengths might work. */
749  		if (pass == 1) {
750  			if (BLIST_INQUIRY_36 & *bflags)
751  				next_inquiry_len = 36;
752  			/*
753  			 * LLD specified a maximum sdev->inquiry_len
754  			 * but device claims it has more data. Capping
755  			 * the length only makes sense for legacy
756  			 * devices. If a device supports SPC-4 (2014)
757  			 * or newer, assume that it is safe to ask for
758  			 * as much as the device says it supports.
759  			 */
760  			else if (sdev->inquiry_len &&
761  				 response_len > sdev->inquiry_len &&
762  				 (inq_result[2] & 0x7) < 6) /* SPC-4 */
763  				next_inquiry_len = sdev->inquiry_len;
764  			else
765  				next_inquiry_len = response_len;
766  
767  			/* If more data is available perform the second pass */
768  			if (next_inquiry_len > try_inquiry_len) {
769  				try_inquiry_len = next_inquiry_len;
770  				pass = 2;
771  				goto next_pass;
772  			}
773  		}
774  
775  	} else if (pass == 2) {
776  		sdev_printk(KERN_INFO, sdev,
777  			    "scsi scan: %d byte inquiry failed.  "
778  			    "Consider BLIST_INQUIRY_36 for this device\n",
779  			    try_inquiry_len);
780  
781  		/* If this pass failed, the third pass goes back and transfers
782  		 * the same amount as we successfully got in the first pass. */
783  		try_inquiry_len = first_inquiry_len;
784  		pass = 3;
785  		goto next_pass;
786  	}
787  
788  	/* If the last transfer attempt got an error, assume the
789  	 * peripheral doesn't exist or is dead. */
790  	if (result)
791  		return -EIO;
792  
793  	/* Don't report any more data than the device says is valid */
794  	sdev->inquiry_len = min(try_inquiry_len, response_len);
795  
796  	/*
797  	 * XXX Abort if the response length is less than 36? If less than
798  	 * 32, the lookup of the device flags (above) could be invalid,
799  	 * and it would be possible to take an incorrect action - we do
800  	 * not want to hang because of a short INQUIRY. On the flip side,
801  	 * if the device is spun down or becoming ready (and so it gives a
802  	 * short INQUIRY), an abort here prevents any further use of the
803  	 * device, including spin up.
804  	 *
805  	 * On the whole, the best approach seems to be to assume the first
806  	 * 36 bytes are valid no matter what the device says.  That's
807  	 * better than copying < 36 bytes to the inquiry-result buffer
808  	 * and displaying garbage for the Vendor, Product, or Revision
809  	 * strings.
810  	 */
811  	if (sdev->inquiry_len < 36) {
812  		if (!sdev->host->short_inquiry) {
813  			shost_printk(KERN_INFO, sdev->host,
814  				    "scsi scan: INQUIRY result too short (%d),"
815  				    " using 36\n", sdev->inquiry_len);
816  			sdev->host->short_inquiry = 1;
817  		}
818  		sdev->inquiry_len = 36;
819  	}
820  
821  	/*
822  	 * Related to the above issue:
823  	 *
824  	 * XXX Devices (disk or all?) should be sent a TEST UNIT READY,
825  	 * and if not ready, sent a START_STOP to start (maybe spin up) and
826  	 * then send the INQUIRY again, since the INQUIRY can change after
827  	 * a device is initialized.
828  	 *
829  	 * Ideally, start a device if explicitly asked to do so.  This
830  	 * assumes that a device is spun up on power on, spun down on
831  	 * request, and then spun up on request.
832  	 */
833  
834  	/*
835  	 * The scanning code needs to know the scsi_level, even if no
836  	 * device is attached at LUN 0 (SCSI_SCAN_TARGET_PRESENT) so
837  	 * non-zero LUNs can be scanned.
838  	 */
839  	sdev->scsi_level = inq_result[2] & 0x0f;
840  	if (sdev->scsi_level >= 2 ||
841  	    (sdev->scsi_level == 1 && (inq_result[3] & 0x0f) == 1))
842  		sdev->scsi_level++;
843  	sdev->sdev_target->scsi_level = sdev->scsi_level;
844  
845  	/*
846  	 * If SCSI-2 or lower, and if the transport requires it,
847  	 * store the LUN value in CDB[1].
848  	 */
849  	sdev->lun_in_cdb = 0;
850  	if (sdev->scsi_level <= SCSI_2 &&
851  	    sdev->scsi_level != SCSI_UNKNOWN &&
852  	    !sdev->host->no_scsi2_lun_in_cdb)
853  		sdev->lun_in_cdb = 1;
854  
855  	return 0;
856  }
857  
858  /**
859   * scsi_add_lun - allocate and fully initialze a scsi_device
860   * @sdev:	holds information to be stored in the new scsi_device
861   * @inq_result:	holds the result of a previous INQUIRY to the LUN
862   * @bflags:	black/white list flag
863   * @async:	1 if this device is being scanned asynchronously
864   *
865   * Description:
866   *     Initialize the scsi_device @sdev.  Optionally set fields based
867   *     on values in *@bflags.
868   *
869   * Return:
870   *     SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
871   *     SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
872   **/
scsi_add_lun(struct scsi_device * sdev,unsigned char * inq_result,blist_flags_t * bflags,int async)873  static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
874  		blist_flags_t *bflags, int async)
875  {
876  	const struct scsi_host_template *hostt = sdev->host->hostt;
877  	struct queue_limits lim;
878  	int ret;
879  
880  	/*
881  	 * XXX do not save the inquiry, since it can change underneath us,
882  	 * save just vendor/model/rev.
883  	 *
884  	 * Rather than save it and have an ioctl that retrieves the saved
885  	 * value, have an ioctl that executes the same INQUIRY code used
886  	 * in scsi_probe_lun, let user level programs doing INQUIRY
887  	 * scanning run at their own risk, or supply a user level program
888  	 * that can correctly scan.
889  	 */
890  
891  	/*
892  	 * Copy at least 36 bytes of INQUIRY data, so that we don't
893  	 * dereference unallocated memory when accessing the Vendor,
894  	 * Product, and Revision strings.  Badly behaved devices may set
895  	 * the INQUIRY Additional Length byte to a small value, indicating
896  	 * these strings are invalid, but often they contain plausible data
897  	 * nonetheless.  It doesn't matter if the device sent < 36 bytes
898  	 * total, since scsi_probe_lun() initializes inq_result with 0s.
899  	 */
900  	sdev->inquiry = kmemdup(inq_result,
901  				max_t(size_t, sdev->inquiry_len, 36),
902  				GFP_KERNEL);
903  	if (sdev->inquiry == NULL)
904  		return SCSI_SCAN_NO_RESPONSE;
905  
906  	sdev->vendor = (char *) (sdev->inquiry + 8);
907  	sdev->model = (char *) (sdev->inquiry + 16);
908  	sdev->rev = (char *) (sdev->inquiry + 32);
909  
910  	if (strncmp(sdev->vendor, "ATA     ", 8) == 0) {
911  		/*
912  		 * sata emulation layer device.  This is a hack to work around
913  		 * the SATL power management specifications which state that
914  		 * when the SATL detects the device has gone into standby
915  		 * mode, it shall respond with NOT READY.
916  		 */
917  		sdev->allow_restart = 1;
918  	}
919  
920  	if (*bflags & BLIST_ISROM) {
921  		sdev->type = TYPE_ROM;
922  		sdev->removable = 1;
923  	} else {
924  		sdev->type = (inq_result[0] & 0x1f);
925  		sdev->removable = (inq_result[1] & 0x80) >> 7;
926  
927  		/*
928  		 * some devices may respond with wrong type for
929  		 * well-known logical units. Force well-known type
930  		 * to enumerate them correctly.
931  		 */
932  		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
933  			sdev_printk(KERN_WARNING, sdev,
934  				"%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
935  				__func__, sdev->type, (unsigned int)sdev->lun);
936  			sdev->type = TYPE_WLUN;
937  		}
938  
939  	}
940  
941  	if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
942  		/* RBC and MMC devices can return SCSI-3 compliance and yet
943  		 * still not support REPORT LUNS, so make them act as
944  		 * BLIST_NOREPORTLUN unless BLIST_REPORTLUN2 is
945  		 * specifically set */
946  		if ((*bflags & BLIST_REPORTLUN2) == 0)
947  			*bflags |= BLIST_NOREPORTLUN;
948  	}
949  
950  	/*
951  	 * For a peripheral qualifier (PQ) value of 1 (001b), the SCSI
952  	 * spec says: The device server is capable of supporting the
953  	 * specified peripheral device type on this logical unit. However,
954  	 * the physical device is not currently connected to this logical
955  	 * unit.
956  	 *
957  	 * The above is vague, as it implies that we could treat 001 and
958  	 * 011 the same. Stay compatible with previous code, and create a
959  	 * scsi_device for a PQ of 1
960  	 *
961  	 * Don't set the device offline here; rather let the upper
962  	 * level drivers eval the PQ to decide whether they should
963  	 * attach. So remove ((inq_result[0] >> 5) & 7) == 1 check.
964  	 */
965  
966  	sdev->inq_periph_qual = (inq_result[0] >> 5) & 7;
967  	sdev->lockable = sdev->removable;
968  	sdev->soft_reset = (inq_result[7] & 1) && ((inq_result[3] & 7) == 2);
969  
970  	if (sdev->scsi_level >= SCSI_3 ||
971  			(sdev->inquiry_len > 56 && inq_result[56] & 0x04))
972  		sdev->ppr = 1;
973  	if (inq_result[7] & 0x60)
974  		sdev->wdtr = 1;
975  	if (inq_result[7] & 0x10)
976  		sdev->sdtr = 1;
977  
978  	sdev_printk(KERN_NOTICE, sdev, "%s %.8s %.16s %.4s PQ: %d "
979  			"ANSI: %d%s\n", scsi_device_type(sdev->type),
980  			sdev->vendor, sdev->model, sdev->rev,
981  			sdev->inq_periph_qual, inq_result[2] & 0x07,
982  			(inq_result[3] & 0x0f) == 1 ? " CCS" : "");
983  
984  	if ((sdev->scsi_level >= SCSI_2) && (inq_result[7] & 2) &&
985  	    !(*bflags & BLIST_NOTQ)) {
986  		sdev->tagged_supported = 1;
987  		sdev->simple_tags = 1;
988  	}
989  
990  	/*
991  	 * Some devices (Texel CD ROM drives) have handshaking problems
992  	 * when used with the Seagate controllers. borken is initialized
993  	 * to 1, and then set it to 0 here.
994  	 */
995  	if ((*bflags & BLIST_BORKEN) == 0)
996  		sdev->borken = 0;
997  
998  	if (*bflags & BLIST_NO_ULD_ATTACH)
999  		sdev->no_uld_attach = 1;
1000  
1001  	/*
1002  	 * Apparently some really broken devices (contrary to the SCSI
1003  	 * standards) need to be selected without asserting ATN
1004  	 */
1005  	if (*bflags & BLIST_SELECT_NO_ATN)
1006  		sdev->select_no_atn = 1;
1007  
1008  	/*
1009  	 * Some devices may not want to have a start command automatically
1010  	 * issued when a device is added.
1011  	 */
1012  	if (*bflags & BLIST_NOSTARTONADD)
1013  		sdev->no_start_on_add = 1;
1014  
1015  	if (*bflags & BLIST_SINGLELUN)
1016  		scsi_target(sdev)->single_lun = 1;
1017  
1018  	sdev->use_10_for_rw = 1;
1019  
1020  	/* some devices don't like REPORT SUPPORTED OPERATION CODES
1021  	 * and will simply timeout causing sd_mod init to take a very
1022  	 * very long time */
1023  	if (*bflags & BLIST_NO_RSOC)
1024  		sdev->no_report_opcodes = 1;
1025  
1026  	/* set the device running here so that slave configure
1027  	 * may do I/O */
1028  	mutex_lock(&sdev->state_mutex);
1029  	ret = scsi_device_set_state(sdev, SDEV_RUNNING);
1030  	if (ret)
1031  		ret = scsi_device_set_state(sdev, SDEV_BLOCK);
1032  	mutex_unlock(&sdev->state_mutex);
1033  
1034  	if (ret) {
1035  		sdev_printk(KERN_ERR, sdev,
1036  			    "in wrong state %s to complete scan\n",
1037  			    scsi_device_state_name(sdev->sdev_state));
1038  		return SCSI_SCAN_NO_RESPONSE;
1039  	}
1040  
1041  	if (*bflags & BLIST_NOT_LOCKABLE)
1042  		sdev->lockable = 0;
1043  
1044  	if (*bflags & BLIST_RETRY_HWERROR)
1045  		sdev->retry_hwerror = 1;
1046  
1047  	if (*bflags & BLIST_NO_DIF)
1048  		sdev->no_dif = 1;
1049  
1050  	if (*bflags & BLIST_UNMAP_LIMIT_WS)
1051  		sdev->unmap_limit_for_ws = 1;
1052  
1053  	if (*bflags & BLIST_IGN_MEDIA_CHANGE)
1054  		sdev->ignore_media_change = 1;
1055  
1056  	sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
1057  
1058  	if (*bflags & BLIST_TRY_VPD_PAGES)
1059  		sdev->try_vpd_pages = 1;
1060  	else if (*bflags & BLIST_SKIP_VPD_PAGES)
1061  		sdev->skip_vpd_pages = 1;
1062  
1063  	if (*bflags & BLIST_NO_VPD_SIZE)
1064  		sdev->no_vpd_size = 1;
1065  
1066  	transport_configure_device(&sdev->sdev_gendev);
1067  
1068  	/*
1069  	 * No need to freeze the queue as it isn't reachable to anyone else yet.
1070  	 */
1071  	lim = queue_limits_start_update(sdev->request_queue);
1072  	if (*bflags & BLIST_MAX_512)
1073  		lim.max_hw_sectors = 512;
1074  	else if (*bflags & BLIST_MAX_1024)
1075  		lim.max_hw_sectors = 1024;
1076  
1077  	if (hostt->device_configure)
1078  		ret = hostt->device_configure(sdev, &lim);
1079  	else if (hostt->slave_configure)
1080  		ret = hostt->slave_configure(sdev);
1081  	if (ret) {
1082  		queue_limits_cancel_update(sdev->request_queue);
1083  		/*
1084  		 * If the LLDD reports device not present, don't clutter the
1085  		 * console with failure messages.
1086  		 */
1087  		if (ret != -ENXIO)
1088  			sdev_printk(KERN_ERR, sdev,
1089  				"failed to configure device\n");
1090  		return SCSI_SCAN_NO_RESPONSE;
1091  	}
1092  
1093  	ret = queue_limits_commit_update(sdev->request_queue, &lim);
1094  	if (ret) {
1095  		sdev_printk(KERN_ERR, sdev, "failed to apply queue limits.\n");
1096  		return SCSI_SCAN_NO_RESPONSE;
1097  	}
1098  
1099  	/*
1100  	 * The queue_depth is often changed in ->device_configure.
1101  	 *
1102  	 * Set up budget map again since memory consumption of the map depends
1103  	 * on actual queue depth.
1104  	 */
1105  	if (hostt->device_configure || hostt->slave_configure)
1106  		scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
1107  
1108  	if (sdev->scsi_level >= SCSI_3)
1109  		scsi_attach_vpd(sdev);
1110  
1111  	scsi_cdl_check(sdev);
1112  
1113  	sdev->max_queue_depth = sdev->queue_depth;
1114  	WARN_ON_ONCE(sdev->max_queue_depth > sdev->budget_map.depth);
1115  	sdev->sdev_bflags = *bflags;
1116  
1117  	/*
1118  	 * Ok, the device is now all set up, we can
1119  	 * register it and tell the rest of the kernel
1120  	 * about it.
1121  	 */
1122  	if (!async && scsi_sysfs_add_sdev(sdev) != 0)
1123  		return SCSI_SCAN_NO_RESPONSE;
1124  
1125  	return SCSI_SCAN_LUN_PRESENT;
1126  }
1127  
1128  #ifdef CONFIG_SCSI_LOGGING
1129  /**
1130   * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
1131   * @buf:   Output buffer with at least end-first+1 bytes of space
1132   * @inq:   Inquiry buffer (input)
1133   * @first: Offset of string into inq
1134   * @end:   Index after last character in inq
1135   */
scsi_inq_str(unsigned char * buf,unsigned char * inq,unsigned first,unsigned end)1136  static unsigned char *scsi_inq_str(unsigned char *buf, unsigned char *inq,
1137  				   unsigned first, unsigned end)
1138  {
1139  	unsigned term = 0, idx;
1140  
1141  	for (idx = 0; idx + first < end && idx + first < inq[4] + 5; idx++) {
1142  		if (inq[idx+first] > ' ') {
1143  			buf[idx] = inq[idx+first];
1144  			term = idx+1;
1145  		} else {
1146  			buf[idx] = ' ';
1147  		}
1148  	}
1149  	buf[term] = 0;
1150  	return buf;
1151  }
1152  #endif
1153  
1154  /**
1155   * scsi_probe_and_add_lun - probe a LUN, if a LUN is found add it
1156   * @starget:	pointer to target device structure
1157   * @lun:	LUN of target device
1158   * @bflagsp:	store bflags here if not NULL
1159   * @sdevp:	probe the LUN corresponding to this scsi_device
1160   * @rescan:     if not equal to SCSI_SCAN_INITIAL skip some code only
1161   *              needed on first scan
1162   * @hostdata:	passed to scsi_alloc_sdev()
1163   *
1164   * Description:
1165   *     Call scsi_probe_lun, if a LUN with an attached device is found,
1166   *     allocate and set it up by calling scsi_add_lun.
1167   *
1168   * Return:
1169   *
1170   *   - SCSI_SCAN_NO_RESPONSE: could not allocate or setup a scsi_device
1171   *   - SCSI_SCAN_TARGET_PRESENT: target responded, but no device is
1172   *         attached at the LUN
1173   *   - SCSI_SCAN_LUN_PRESENT: a new scsi_device was allocated and initialized
1174   **/
scsi_probe_and_add_lun(struct scsi_target * starget,u64 lun,blist_flags_t * bflagsp,struct scsi_device ** sdevp,enum scsi_scan_mode rescan,void * hostdata)1175  static int scsi_probe_and_add_lun(struct scsi_target *starget,
1176  				  u64 lun, blist_flags_t *bflagsp,
1177  				  struct scsi_device **sdevp,
1178  				  enum scsi_scan_mode rescan,
1179  				  void *hostdata)
1180  {
1181  	struct scsi_device *sdev;
1182  	unsigned char *result;
1183  	blist_flags_t bflags;
1184  	int res = SCSI_SCAN_NO_RESPONSE, result_len = 256;
1185  	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1186  
1187  	/*
1188  	 * The rescan flag is used as an optimization, the first scan of a
1189  	 * host adapter calls into here with rescan == 0.
1190  	 */
1191  	sdev = scsi_device_lookup_by_target(starget, lun);
1192  	if (sdev) {
1193  		if (rescan != SCSI_SCAN_INITIAL || !scsi_device_created(sdev)) {
1194  			SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1195  				"scsi scan: device exists on %s\n",
1196  				dev_name(&sdev->sdev_gendev)));
1197  			if (sdevp)
1198  				*sdevp = sdev;
1199  			else
1200  				scsi_device_put(sdev);
1201  
1202  			if (bflagsp)
1203  				*bflagsp = scsi_get_device_flags(sdev,
1204  								 sdev->vendor,
1205  								 sdev->model);
1206  			return SCSI_SCAN_LUN_PRESENT;
1207  		}
1208  		scsi_device_put(sdev);
1209  	} else
1210  		sdev = scsi_alloc_sdev(starget, lun, hostdata);
1211  	if (!sdev)
1212  		goto out;
1213  
1214  	result = kmalloc(result_len, GFP_KERNEL);
1215  	if (!result)
1216  		goto out_free_sdev;
1217  
1218  	if (scsi_probe_lun(sdev, result, result_len, &bflags))
1219  		goto out_free_result;
1220  
1221  	if (bflagsp)
1222  		*bflagsp = bflags;
1223  	/*
1224  	 * result contains valid SCSI INQUIRY data.
1225  	 */
1226  	if ((result[0] >> 5) == 3) {
1227  		/*
1228  		 * For a Peripheral qualifier 3 (011b), the SCSI
1229  		 * spec says: The device server is not capable of
1230  		 * supporting a physical device on this logical
1231  		 * unit.
1232  		 *
1233  		 * For disks, this implies that there is no
1234  		 * logical disk configured at sdev->lun, but there
1235  		 * is a target id responding.
1236  		 */
1237  		SCSI_LOG_SCAN_BUS(2, sdev_printk(KERN_INFO, sdev, "scsi scan:"
1238  				   " peripheral qualifier of 3, device not"
1239  				   " added\n"))
1240  		if (lun == 0) {
1241  			SCSI_LOG_SCAN_BUS(1, {
1242  				unsigned char vend[9];
1243  				unsigned char mod[17];
1244  
1245  				sdev_printk(KERN_INFO, sdev,
1246  					"scsi scan: consider passing scsi_mod."
1247  					"dev_flags=%s:%s:0x240 or 0x1000240\n",
1248  					scsi_inq_str(vend, result, 8, 16),
1249  					scsi_inq_str(mod, result, 16, 32));
1250  			});
1251  
1252  		}
1253  
1254  		res = SCSI_SCAN_TARGET_PRESENT;
1255  		goto out_free_result;
1256  	}
1257  
1258  	/*
1259  	 * Some targets may set slight variations of PQ and PDT to signal
1260  	 * that no LUN is present, so don't add sdev in these cases.
1261  	 * Two specific examples are:
1262  	 * 1) NetApp targets: return PQ=1, PDT=0x1f
1263  	 * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
1264  	 *    in the UFI 1.0 spec (we cannot rely on reserved bits).
1265  	 *
1266  	 * References:
1267  	 * 1) SCSI SPC-3, pp. 145-146
1268  	 * PQ=1: "A peripheral device having the specified peripheral
1269  	 * device type is not connected to this logical unit. However, the
1270  	 * device server is capable of supporting the specified peripheral
1271  	 * device type on this logical unit."
1272  	 * PDT=0x1f: "Unknown or no device type"
1273  	 * 2) USB UFI 1.0, p. 20
1274  	 * PDT=00h Direct-access device (floppy)
1275  	 * PDT=1Fh none (no FDD connected to the requested logical unit)
1276  	 */
1277  	if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
1278  	    (result[0] & 0x1f) == 0x1f &&
1279  	    !scsi_is_wlun(lun)) {
1280  		SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
1281  					"scsi scan: peripheral device type"
1282  					" of 31, no device added\n"));
1283  		res = SCSI_SCAN_TARGET_PRESENT;
1284  		goto out_free_result;
1285  	}
1286  
1287  	res = scsi_add_lun(sdev, result, &bflags, shost->async_scan);
1288  	if (res == SCSI_SCAN_LUN_PRESENT) {
1289  		if (bflags & BLIST_KEY) {
1290  			sdev->lockable = 0;
1291  			scsi_unlock_floptical(sdev, result);
1292  		}
1293  	}
1294  
1295   out_free_result:
1296  	kfree(result);
1297   out_free_sdev:
1298  	if (res == SCSI_SCAN_LUN_PRESENT) {
1299  		if (sdevp) {
1300  			if (scsi_device_get(sdev) == 0) {
1301  				*sdevp = sdev;
1302  			} else {
1303  				__scsi_remove_device(sdev);
1304  				res = SCSI_SCAN_NO_RESPONSE;
1305  			}
1306  		}
1307  	} else
1308  		__scsi_remove_device(sdev);
1309   out:
1310  	return res;
1311  }
1312  
1313  /**
1314   * scsi_sequential_lun_scan - sequentially scan a SCSI target
1315   * @starget:	pointer to target structure to scan
1316   * @bflags:	black/white list flag for LUN 0
1317   * @scsi_level: Which version of the standard does this device adhere to
1318   * @rescan:     passed to scsi_probe_add_lun()
1319   *
1320   * Description:
1321   *     Generally, scan from LUN 1 (LUN 0 is assumed to already have been
1322   *     scanned) to some maximum lun until a LUN is found with no device
1323   *     attached. Use the bflags to figure out any oddities.
1324   *
1325   *     Modifies sdevscan->lun.
1326   **/
scsi_sequential_lun_scan(struct scsi_target * starget,blist_flags_t bflags,int scsi_level,enum scsi_scan_mode rescan)1327  static void scsi_sequential_lun_scan(struct scsi_target *starget,
1328  				     blist_flags_t bflags, int scsi_level,
1329  				     enum scsi_scan_mode rescan)
1330  {
1331  	uint max_dev_lun;
1332  	u64 sparse_lun, lun;
1333  	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
1334  
1335  	SCSI_LOG_SCAN_BUS(3, starget_printk(KERN_INFO, starget,
1336  		"scsi scan: Sequential scan\n"));
1337  
1338  	max_dev_lun = min(max_scsi_luns, shost->max_lun);
1339  	/*
1340  	 * If this device is known to support sparse multiple units,
1341  	 * override the other settings, and scan all of them. Normally,
1342  	 * SCSI-3 devices should be scanned via the REPORT LUNS.
1343  	 */
1344  	if (bflags & BLIST_SPARSELUN) {
1345  		max_dev_lun = shost->max_lun;
1346  		sparse_lun = 1;
1347  	} else
1348  		sparse_lun = 0;
1349  
1350  	/*
1351  	 * If less than SCSI_1_CCS, and no special lun scanning, stop
1352  	 * scanning; this matches 2.4 behaviour, but could just be a bug
1353  	 * (to continue scanning a SCSI_1_CCS device).
1354  	 *
1355  	 * This test is broken.  We might not have any device on lun0 for
1356  	 * a sparselun device, and if that's the case then how would we
1357  	 * know the real scsi_level, eh?  It might make sense to just not
1358  	 * scan any SCSI_1 device for non-0 luns, but that check would best
1359  	 * go into scsi_alloc_sdev() and just have it return null when asked
1360  	 * to alloc an sdev for lun > 0 on an already found SCSI_1 device.
1361  	 *
1362  	if ((sdevscan->scsi_level < SCSI_1_CCS) &&
1363  	    ((bflags & (BLIST_FORCELUN | BLIST_SPARSELUN | BLIST_MAX5LUN))
1364  	     == 0))
1365  		return;
1366  	 */
1367  	/*
1368  	 * If this device is known to support multiple units, override
1369  	 * the other settings, and scan all of them.
1370  	 */
1371  	if (bflags & BLIST_FORCELUN)
1372  		max_dev_lun = shost->max_lun;
1373  	/*
1374  	 * REGAL CDC-4X: avoid hang after LUN 4
1375  	 */
1376  	if (bflags & BLIST_MAX5LUN)
1377  		max_dev_lun = min(5U, max_dev_lun);
1378  	/*
1379  	 * Do not scan SCSI-2 or lower device past LUN 7, unless
1380  	 * BLIST_LARGELUN.
1381  	 */
1382  	if (scsi_level < SCSI_3 && !(bflags & BLIST_LARGELUN))
1383  		max_dev_lun = min(8U, max_dev_lun);
1384  	else
1385  		max_dev_lun = min(256U, max_dev_lun);
1386  
1387  	/*
1388  	 * We have already scanned LUN 0, so start at LUN 1. Keep scanning
1389  	 * until we reach the max, or no LUN is found and we are not
1390  	 * sparse_lun.
1391  	 */
1392  	for (lun = 1; lun < max_dev_lun; ++lun)
1393  		if ((scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan,
1394  					    NULL) != SCSI_SCAN_LUN_PRESENT) &&
1395  		    !sparse_lun)
1396  			return;
1397  }
1398  
1399  /**
1400   * scsi_report_lun_scan - Scan using SCSI REPORT LUN results
1401   * @starget: which target
1402   * @bflags: Zero or a mix of BLIST_NOLUN, BLIST_REPORTLUN2, or BLIST_NOREPORTLUN
1403   * @rescan: nonzero if we can skip code only needed on first scan
1404   *
1405   * Description:
1406   *   Fast scanning for modern (SCSI-3) devices by sending a REPORT LUN command.
1407   *   Scan the resulting list of LUNs by calling scsi_probe_and_add_lun.
1408   *
1409   *   If BLINK_REPORTLUN2 is set, scan a target that supports more than 8
1410   *   LUNs even if it's older than SCSI-3.
1411   *   If BLIST_NOREPORTLUN is set, return 1 always.
1412   *   If BLIST_NOLUN is set, return 0 always.
1413   *   If starget->no_report_luns is set, return 1 always.
1414   *
1415   * Return:
1416   *     0: scan completed (or no memory, so further scanning is futile)
1417   *     1: could not scan with REPORT LUN
1418   **/
scsi_report_lun_scan(struct scsi_target * starget,blist_flags_t bflags,enum scsi_scan_mode rescan)1419  static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflags,
1420  				enum scsi_scan_mode rescan)
1421  {
1422  	unsigned char scsi_cmd[MAX_COMMAND_SIZE];
1423  	unsigned int length;
1424  	u64 lun;
1425  	unsigned int num_luns;
1426  	int result;
1427  	struct scsi_lun *lunp, *lun_data;
1428  	struct scsi_device *sdev;
1429  	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1430  	struct scsi_failure failure_defs[] = {
1431  		{
1432  			.sense = UNIT_ATTENTION,
1433  			.asc = SCMD_FAILURE_ASC_ANY,
1434  			.ascq = SCMD_FAILURE_ASCQ_ANY,
1435  			.result = SAM_STAT_CHECK_CONDITION,
1436  		},
1437  		/* Fail all CCs except the UA above */
1438  		{
1439  			.sense = SCMD_FAILURE_SENSE_ANY,
1440  			.result = SAM_STAT_CHECK_CONDITION,
1441  		},
1442  		/* Retry any other errors not listed above */
1443  		{
1444  			.result = SCMD_FAILURE_RESULT_ANY,
1445  		},
1446  		{}
1447  	};
1448  	struct scsi_failures failures = {
1449  		.total_allowed = 3,
1450  		.failure_definitions = failure_defs,
1451  	};
1452  	const struct scsi_exec_args exec_args = {
1453  		.failures = &failures,
1454  	};
1455  	int ret = 0;
1456  
1457  	/*
1458  	 * Only support SCSI-3 and up devices if BLIST_NOREPORTLUN is not set.
1459  	 * Also allow SCSI-2 if BLIST_REPORTLUN2 is set and host adapter does
1460  	 * support more than 8 LUNs.
1461  	 * Don't attempt if the target doesn't support REPORT LUNS.
1462  	 */
1463  	if (bflags & BLIST_NOREPORTLUN)
1464  		return 1;
1465  	if (starget->scsi_level < SCSI_2 &&
1466  	    starget->scsi_level != SCSI_UNKNOWN)
1467  		return 1;
1468  	if (starget->scsi_level < SCSI_3 &&
1469  	    (!(bflags & BLIST_REPORTLUN2) || shost->max_lun <= 8))
1470  		return 1;
1471  	if (bflags & BLIST_NOLUN)
1472  		return 0;
1473  	if (starget->no_report_luns)
1474  		return 1;
1475  
1476  	if (!(sdev = scsi_device_lookup_by_target(starget, 0))) {
1477  		sdev = scsi_alloc_sdev(starget, 0, NULL);
1478  		if (!sdev)
1479  			return 0;
1480  		if (scsi_device_get(sdev)) {
1481  			__scsi_remove_device(sdev);
1482  			return 0;
1483  		}
1484  	}
1485  
1486  	/*
1487  	 * Allocate enough to hold the header (the same size as one scsi_lun)
1488  	 * plus the number of luns we are requesting.  511 was the default
1489  	 * value of the now removed max_report_luns parameter.
1490  	 */
1491  	length = (511 + 1) * sizeof(struct scsi_lun);
1492  retry:
1493  	lun_data = kmalloc(length, GFP_KERNEL);
1494  	if (!lun_data) {
1495  		printk(ALLOC_FAILURE_MSG, __func__);
1496  		goto out;
1497  	}
1498  
1499  	scsi_cmd[0] = REPORT_LUNS;
1500  
1501  	/*
1502  	 * bytes 1 - 5: reserved, set to zero.
1503  	 */
1504  	memset(&scsi_cmd[1], 0, 5);
1505  
1506  	/*
1507  	 * bytes 6 - 9: length of the command.
1508  	 */
1509  	put_unaligned_be32(length, &scsi_cmd[6]);
1510  
1511  	scsi_cmd[10] = 0;	/* reserved */
1512  	scsi_cmd[11] = 0;	/* control */
1513  
1514  	/*
1515  	 * We can get a UNIT ATTENTION, for example a power on/reset, so
1516  	 * retry a few times (like sd.c does for TEST UNIT READY).
1517  	 * Experience shows some combinations of adapter/devices get at
1518  	 * least two power on/resets.
1519  	 *
1520  	 * Illegal requests (for devices that do not support REPORT LUNS)
1521  	 * should come through as a check condition, and will not generate
1522  	 * a retry.
1523  	 */
1524  	scsi_failures_reset_retries(&failures);
1525  
1526  	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1527  			  "scsi scan: Sending REPORT LUNS\n"));
1528  
1529  	result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, lun_data,
1530  				  length, SCSI_REPORT_LUNS_TIMEOUT, 3,
1531  				  &exec_args);
1532  
1533  	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1534  			  "scsi scan: REPORT LUNS  %s result 0x%x\n",
1535  			  result ?  "failed" : "successful", result));
1536  	if (result) {
1537  		/*
1538  		 * The device probably does not support a REPORT LUN command
1539  		 */
1540  		ret = 1;
1541  		goto out_err;
1542  	}
1543  
1544  	/*
1545  	 * Get the length from the first four bytes of lun_data.
1546  	 */
1547  	if (get_unaligned_be32(lun_data->scsi_lun) +
1548  	    sizeof(struct scsi_lun) > length) {
1549  		length = get_unaligned_be32(lun_data->scsi_lun) +
1550  			 sizeof(struct scsi_lun);
1551  		kfree(lun_data);
1552  		goto retry;
1553  	}
1554  	length = get_unaligned_be32(lun_data->scsi_lun);
1555  
1556  	num_luns = (length / sizeof(struct scsi_lun));
1557  
1558  	SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
1559  		"scsi scan: REPORT LUN scan\n"));
1560  
1561  	/*
1562  	 * Scan the luns in lun_data. The entry at offset 0 is really
1563  	 * the header, so start at 1 and go up to and including num_luns.
1564  	 */
1565  	for (lunp = &lun_data[1]; lunp <= &lun_data[num_luns]; lunp++) {
1566  		lun = scsilun_to_int(lunp);
1567  
1568  		if (lun > sdev->host->max_lun) {
1569  			sdev_printk(KERN_WARNING, sdev,
1570  				    "lun%llu has a LUN larger than"
1571  				    " allowed by the host adapter\n", lun);
1572  		} else {
1573  			int res;
1574  
1575  			res = scsi_probe_and_add_lun(starget,
1576  				lun, NULL, NULL, rescan, NULL);
1577  			if (res == SCSI_SCAN_NO_RESPONSE) {
1578  				/*
1579  				 * Got some results, but now none, abort.
1580  				 */
1581  				sdev_printk(KERN_ERR, sdev,
1582  					"Unexpected response"
1583  					" from lun %llu while scanning, scan"
1584  					" aborted\n", (unsigned long long)lun);
1585  				break;
1586  			}
1587  		}
1588  	}
1589  
1590   out_err:
1591  	kfree(lun_data);
1592   out:
1593  	if (scsi_device_created(sdev))
1594  		/*
1595  		 * the sdev we used didn't appear in the report luns scan
1596  		 */
1597  		__scsi_remove_device(sdev);
1598  	scsi_device_put(sdev);
1599  	return ret;
1600  }
1601  
__scsi_add_device(struct Scsi_Host * shost,uint channel,uint id,u64 lun,void * hostdata)1602  struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
1603  				      uint id, u64 lun, void *hostdata)
1604  {
1605  	struct scsi_device *sdev = ERR_PTR(-ENODEV);
1606  	struct device *parent = &shost->shost_gendev;
1607  	struct scsi_target *starget;
1608  
1609  	if (strncmp(scsi_scan_type, "none", 4) == 0)
1610  		return ERR_PTR(-ENODEV);
1611  
1612  	starget = scsi_alloc_target(parent, channel, id);
1613  	if (!starget)
1614  		return ERR_PTR(-ENOMEM);
1615  	scsi_autopm_get_target(starget);
1616  
1617  	mutex_lock(&shost->scan_mutex);
1618  	if (!shost->async_scan)
1619  		scsi_complete_async_scans();
1620  
1621  	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1622  		scsi_probe_and_add_lun(starget, lun, NULL, &sdev,
1623  				       SCSI_SCAN_RESCAN, hostdata);
1624  		scsi_autopm_put_host(shost);
1625  	}
1626  	mutex_unlock(&shost->scan_mutex);
1627  	scsi_autopm_put_target(starget);
1628  	/*
1629  	 * paired with scsi_alloc_target().  Target will be destroyed unless
1630  	 * scsi_probe_and_add_lun made an underlying device visible
1631  	 */
1632  	scsi_target_reap(starget);
1633  	put_device(&starget->dev);
1634  
1635  	return sdev;
1636  }
1637  EXPORT_SYMBOL(__scsi_add_device);
1638  
scsi_add_device(struct Scsi_Host * host,uint channel,uint target,u64 lun)1639  int scsi_add_device(struct Scsi_Host *host, uint channel,
1640  		    uint target, u64 lun)
1641  {
1642  	struct scsi_device *sdev =
1643  		__scsi_add_device(host, channel, target, lun, NULL);
1644  	if (IS_ERR(sdev))
1645  		return PTR_ERR(sdev);
1646  
1647  	scsi_device_put(sdev);
1648  	return 0;
1649  }
1650  EXPORT_SYMBOL(scsi_add_device);
1651  
scsi_resume_device(struct scsi_device * sdev)1652  int scsi_resume_device(struct scsi_device *sdev)
1653  {
1654  	struct device *dev = &sdev->sdev_gendev;
1655  	int ret = 0;
1656  
1657  	device_lock(dev);
1658  
1659  	/*
1660  	 * Bail out if the device or its queue are not running. Otherwise,
1661  	 * the rescan may block waiting for commands to be executed, with us
1662  	 * holding the device lock. This can result in a potential deadlock
1663  	 * in the power management core code when system resume is on-going.
1664  	 */
1665  	if (sdev->sdev_state != SDEV_RUNNING ||
1666  	    blk_queue_pm_only(sdev->request_queue)) {
1667  		ret = -EWOULDBLOCK;
1668  		goto unlock;
1669  	}
1670  
1671  	if (dev->driver && try_module_get(dev->driver->owner)) {
1672  		struct scsi_driver *drv = to_scsi_driver(dev->driver);
1673  
1674  		if (drv->resume)
1675  			ret = drv->resume(dev);
1676  		module_put(dev->driver->owner);
1677  	}
1678  
1679  unlock:
1680  	device_unlock(dev);
1681  
1682  	return ret;
1683  }
1684  EXPORT_SYMBOL(scsi_resume_device);
1685  
scsi_rescan_device(struct scsi_device * sdev)1686  int scsi_rescan_device(struct scsi_device *sdev)
1687  {
1688  	struct device *dev = &sdev->sdev_gendev;
1689  	int ret = 0;
1690  
1691  	device_lock(dev);
1692  
1693  	/*
1694  	 * Bail out if the device or its queue are not running. Otherwise,
1695  	 * the rescan may block waiting for commands to be executed, with us
1696  	 * holding the device lock. This can result in a potential deadlock
1697  	 * in the power management core code when system resume is on-going.
1698  	 */
1699  	if (sdev->sdev_state != SDEV_RUNNING ||
1700  	    blk_queue_pm_only(sdev->request_queue)) {
1701  		ret = -EWOULDBLOCK;
1702  		goto unlock;
1703  	}
1704  
1705  	scsi_attach_vpd(sdev);
1706  	scsi_cdl_check(sdev);
1707  
1708  	if (sdev->handler && sdev->handler->rescan)
1709  		sdev->handler->rescan(sdev);
1710  
1711  	if (dev->driver && try_module_get(dev->driver->owner)) {
1712  		struct scsi_driver *drv = to_scsi_driver(dev->driver);
1713  
1714  		if (drv->rescan)
1715  			drv->rescan(dev);
1716  		module_put(dev->driver->owner);
1717  	}
1718  
1719  unlock:
1720  	device_unlock(dev);
1721  
1722  	return ret;
1723  }
1724  EXPORT_SYMBOL(scsi_rescan_device);
1725  
__scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1726  static void __scsi_scan_target(struct device *parent, unsigned int channel,
1727  		unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1728  {
1729  	struct Scsi_Host *shost = dev_to_shost(parent);
1730  	blist_flags_t bflags = 0;
1731  	int res;
1732  	struct scsi_target *starget;
1733  
1734  	if (shost->this_id == id)
1735  		/*
1736  		 * Don't scan the host adapter
1737  		 */
1738  		return;
1739  
1740  	starget = scsi_alloc_target(parent, channel, id);
1741  	if (!starget)
1742  		return;
1743  	scsi_autopm_get_target(starget);
1744  
1745  	if (lun != SCAN_WILD_CARD) {
1746  		/*
1747  		 * Scan for a specific host/chan/id/lun.
1748  		 */
1749  		scsi_probe_and_add_lun(starget, lun, NULL, NULL, rescan, NULL);
1750  		goto out_reap;
1751  	}
1752  
1753  	/*
1754  	 * Scan LUN 0, if there is some response, scan further. Ideally, we
1755  	 * would not configure LUN 0 until all LUNs are scanned.
1756  	 */
1757  	res = scsi_probe_and_add_lun(starget, 0, &bflags, NULL, rescan, NULL);
1758  	if (res == SCSI_SCAN_LUN_PRESENT || res == SCSI_SCAN_TARGET_PRESENT) {
1759  		if (scsi_report_lun_scan(starget, bflags, rescan) != 0)
1760  			/*
1761  			 * The REPORT LUN did not scan the target,
1762  			 * do a sequential scan.
1763  			 */
1764  			scsi_sequential_lun_scan(starget, bflags,
1765  						 starget->scsi_level, rescan);
1766  	}
1767  
1768   out_reap:
1769  	scsi_autopm_put_target(starget);
1770  	/*
1771  	 * paired with scsi_alloc_target(): determine if the target has
1772  	 * any children at all and if not, nuke it
1773  	 */
1774  	scsi_target_reap(starget);
1775  
1776  	put_device(&starget->dev);
1777  }
1778  
1779  /**
1780   * scsi_scan_target - scan a target id, possibly including all LUNs on the target.
1781   * @parent:	host to scan
1782   * @channel:	channel to scan
1783   * @id:		target id to scan
1784   * @lun:	Specific LUN to scan or SCAN_WILD_CARD
1785   * @rescan:	passed to LUN scanning routines; SCSI_SCAN_INITIAL for
1786   *              no rescan, SCSI_SCAN_RESCAN to rescan existing LUNs,
1787   *              and SCSI_SCAN_MANUAL to force scanning even if
1788   *              'scan=manual' is set.
1789   *
1790   * Description:
1791   *     Scan the target id on @parent, @channel, and @id. Scan at least LUN 0,
1792   *     and possibly all LUNs on the target id.
1793   *
1794   *     First try a REPORT LUN scan, if that does not scan the target, do a
1795   *     sequential scan of LUNs on the target id.
1796   **/
scsi_scan_target(struct device * parent,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1797  void scsi_scan_target(struct device *parent, unsigned int channel,
1798  		      unsigned int id, u64 lun, enum scsi_scan_mode rescan)
1799  {
1800  	struct Scsi_Host *shost = dev_to_shost(parent);
1801  
1802  	if (strncmp(scsi_scan_type, "none", 4) == 0)
1803  		return;
1804  
1805  	if (rescan != SCSI_SCAN_MANUAL &&
1806  	    strncmp(scsi_scan_type, "manual", 6) == 0)
1807  		return;
1808  
1809  	mutex_lock(&shost->scan_mutex);
1810  	if (!shost->async_scan)
1811  		scsi_complete_async_scans();
1812  
1813  	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1814  		__scsi_scan_target(parent, channel, id, lun, rescan);
1815  		scsi_autopm_put_host(shost);
1816  	}
1817  	mutex_unlock(&shost->scan_mutex);
1818  }
1819  EXPORT_SYMBOL(scsi_scan_target);
1820  
scsi_scan_channel(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1821  static void scsi_scan_channel(struct Scsi_Host *shost, unsigned int channel,
1822  			      unsigned int id, u64 lun,
1823  			      enum scsi_scan_mode rescan)
1824  {
1825  	uint order_id;
1826  
1827  	if (id == SCAN_WILD_CARD)
1828  		for (id = 0; id < shost->max_id; ++id) {
1829  			/*
1830  			 * XXX adapter drivers when possible (FCP, iSCSI)
1831  			 * could modify max_id to match the current max,
1832  			 * not the absolute max.
1833  			 *
1834  			 * XXX add a shost id iterator, so for example,
1835  			 * the FC ID can be the same as a target id
1836  			 * without a huge overhead of sparse id's.
1837  			 */
1838  			if (shost->reverse_ordering)
1839  				/*
1840  				 * Scan from high to low id.
1841  				 */
1842  				order_id = shost->max_id - id - 1;
1843  			else
1844  				order_id = id;
1845  			__scsi_scan_target(&shost->shost_gendev, channel,
1846  					order_id, lun, rescan);
1847  		}
1848  	else
1849  		__scsi_scan_target(&shost->shost_gendev, channel,
1850  				id, lun, rescan);
1851  }
1852  
scsi_scan_host_selected(struct Scsi_Host * shost,unsigned int channel,unsigned int id,u64 lun,enum scsi_scan_mode rescan)1853  int scsi_scan_host_selected(struct Scsi_Host *shost, unsigned int channel,
1854  			    unsigned int id, u64 lun,
1855  			    enum scsi_scan_mode rescan)
1856  {
1857  	SCSI_LOG_SCAN_BUS(3, shost_printk (KERN_INFO, shost,
1858  		"%s: <%u:%u:%llu>\n",
1859  		__func__, channel, id, lun));
1860  
1861  	if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
1862  	    ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
1863  	    ((lun != SCAN_WILD_CARD) && (lun >= shost->max_lun)))
1864  		return -EINVAL;
1865  
1866  	mutex_lock(&shost->scan_mutex);
1867  	if (!shost->async_scan)
1868  		scsi_complete_async_scans();
1869  
1870  	if (scsi_host_scan_allowed(shost) && scsi_autopm_get_host(shost) == 0) {
1871  		if (channel == SCAN_WILD_CARD)
1872  			for (channel = 0; channel <= shost->max_channel;
1873  			     channel++)
1874  				scsi_scan_channel(shost, channel, id, lun,
1875  						  rescan);
1876  		else
1877  			scsi_scan_channel(shost, channel, id, lun, rescan);
1878  		scsi_autopm_put_host(shost);
1879  	}
1880  	mutex_unlock(&shost->scan_mutex);
1881  
1882  	return 0;
1883  }
1884  
scsi_sysfs_add_devices(struct Scsi_Host * shost)1885  static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
1886  {
1887  	struct scsi_device *sdev;
1888  	shost_for_each_device(sdev, shost) {
1889  		/* target removed before the device could be added */
1890  		if (sdev->sdev_state == SDEV_DEL)
1891  			continue;
1892  		/* If device is already visible, skip adding it to sysfs */
1893  		if (sdev->is_visible)
1894  			continue;
1895  		if (!scsi_host_scan_allowed(shost) ||
1896  		    scsi_sysfs_add_sdev(sdev) != 0)
1897  			__scsi_remove_device(sdev);
1898  	}
1899  }
1900  
1901  /**
1902   * scsi_prep_async_scan - prepare for an async scan
1903   * @shost: the host which will be scanned
1904   * Returns: a cookie to be passed to scsi_finish_async_scan()
1905   *
1906   * Tells the midlayer this host is going to do an asynchronous scan.
1907   * It reserves the host's position in the scanning list and ensures
1908   * that other asynchronous scans started after this one won't affect the
1909   * ordering of the discovered devices.
1910   */
scsi_prep_async_scan(struct Scsi_Host * shost)1911  static struct async_scan_data *scsi_prep_async_scan(struct Scsi_Host *shost)
1912  {
1913  	struct async_scan_data *data = NULL;
1914  	unsigned long flags;
1915  
1916  	if (strncmp(scsi_scan_type, "sync", 4) == 0)
1917  		return NULL;
1918  
1919  	mutex_lock(&shost->scan_mutex);
1920  	if (shost->async_scan) {
1921  		shost_printk(KERN_DEBUG, shost, "%s called twice\n", __func__);
1922  		goto err;
1923  	}
1924  
1925  	data = kmalloc(sizeof(*data), GFP_KERNEL);
1926  	if (!data)
1927  		goto err;
1928  	data->shost = scsi_host_get(shost);
1929  	if (!data->shost)
1930  		goto err;
1931  	init_completion(&data->prev_finished);
1932  
1933  	spin_lock_irqsave(shost->host_lock, flags);
1934  	shost->async_scan = 1;
1935  	spin_unlock_irqrestore(shost->host_lock, flags);
1936  	mutex_unlock(&shost->scan_mutex);
1937  
1938  	spin_lock(&async_scan_lock);
1939  	if (list_empty(&scanning_hosts))
1940  		complete(&data->prev_finished);
1941  	list_add_tail(&data->list, &scanning_hosts);
1942  	spin_unlock(&async_scan_lock);
1943  
1944  	return data;
1945  
1946   err:
1947  	mutex_unlock(&shost->scan_mutex);
1948  	kfree(data);
1949  	return NULL;
1950  }
1951  
1952  /**
1953   * scsi_finish_async_scan - asynchronous scan has finished
1954   * @data: cookie returned from earlier call to scsi_prep_async_scan()
1955   *
1956   * All the devices currently attached to this host have been found.
1957   * This function announces all the devices it has found to the rest
1958   * of the system.
1959   */
scsi_finish_async_scan(struct async_scan_data * data)1960  static void scsi_finish_async_scan(struct async_scan_data *data)
1961  {
1962  	struct Scsi_Host *shost;
1963  	unsigned long flags;
1964  
1965  	if (!data)
1966  		return;
1967  
1968  	shost = data->shost;
1969  
1970  	mutex_lock(&shost->scan_mutex);
1971  
1972  	if (!shost->async_scan) {
1973  		shost_printk(KERN_INFO, shost, "%s called twice\n", __func__);
1974  		dump_stack();
1975  		mutex_unlock(&shost->scan_mutex);
1976  		return;
1977  	}
1978  
1979  	wait_for_completion(&data->prev_finished);
1980  
1981  	scsi_sysfs_add_devices(shost);
1982  
1983  	spin_lock_irqsave(shost->host_lock, flags);
1984  	shost->async_scan = 0;
1985  	spin_unlock_irqrestore(shost->host_lock, flags);
1986  
1987  	mutex_unlock(&shost->scan_mutex);
1988  
1989  	spin_lock(&async_scan_lock);
1990  	list_del(&data->list);
1991  	if (!list_empty(&scanning_hosts)) {
1992  		struct async_scan_data *next = list_entry(scanning_hosts.next,
1993  				struct async_scan_data, list);
1994  		complete(&next->prev_finished);
1995  	}
1996  	spin_unlock(&async_scan_lock);
1997  
1998  	scsi_autopm_put_host(shost);
1999  	scsi_host_put(shost);
2000  	kfree(data);
2001  }
2002  
do_scsi_scan_host(struct Scsi_Host * shost)2003  static void do_scsi_scan_host(struct Scsi_Host *shost)
2004  {
2005  	if (shost->hostt->scan_finished) {
2006  		unsigned long start = jiffies;
2007  		if (shost->hostt->scan_start)
2008  			shost->hostt->scan_start(shost);
2009  
2010  		while (!shost->hostt->scan_finished(shost, jiffies - start))
2011  			msleep(10);
2012  	} else {
2013  		scsi_scan_host_selected(shost, SCAN_WILD_CARD, SCAN_WILD_CARD,
2014  				SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2015  	}
2016  }
2017  
do_scan_async(void * _data,async_cookie_t c)2018  static void do_scan_async(void *_data, async_cookie_t c)
2019  {
2020  	struct async_scan_data *data = _data;
2021  	struct Scsi_Host *shost = data->shost;
2022  
2023  	do_scsi_scan_host(shost);
2024  	scsi_finish_async_scan(data);
2025  }
2026  
2027  /**
2028   * scsi_scan_host - scan the given adapter
2029   * @shost:	adapter to scan
2030   **/
scsi_scan_host(struct Scsi_Host * shost)2031  void scsi_scan_host(struct Scsi_Host *shost)
2032  {
2033  	struct async_scan_data *data;
2034  
2035  	if (strncmp(scsi_scan_type, "none", 4) == 0 ||
2036  	    strncmp(scsi_scan_type, "manual", 6) == 0)
2037  		return;
2038  	if (scsi_autopm_get_host(shost) < 0)
2039  		return;
2040  
2041  	data = scsi_prep_async_scan(shost);
2042  	if (!data) {
2043  		do_scsi_scan_host(shost);
2044  		scsi_autopm_put_host(shost);
2045  		return;
2046  	}
2047  
2048  	/* register with the async subsystem so wait_for_device_probe()
2049  	 * will flush this work
2050  	 */
2051  	async_schedule(do_scan_async, data);
2052  
2053  	/* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
2054  }
2055  EXPORT_SYMBOL(scsi_scan_host);
2056  
scsi_forget_host(struct Scsi_Host * shost)2057  void scsi_forget_host(struct Scsi_Host *shost)
2058  {
2059  	struct scsi_device *sdev;
2060  	unsigned long flags;
2061  
2062   restart:
2063  	spin_lock_irqsave(shost->host_lock, flags);
2064  	list_for_each_entry(sdev, &shost->__devices, siblings) {
2065  		if (sdev->sdev_state == SDEV_DEL)
2066  			continue;
2067  		spin_unlock_irqrestore(shost->host_lock, flags);
2068  		__scsi_remove_device(sdev);
2069  		goto restart;
2070  	}
2071  	spin_unlock_irqrestore(shost->host_lock, flags);
2072  }
2073  
2074