1  /* SPDX-License-Identifier: GPL-2.0 */
2  #ifndef _SCSI_SCSI_HOST_H
3  #define _SCSI_SCSI_HOST_H
4  
5  #include <linux/device.h>
6  #include <linux/list.h>
7  #include <linux/types.h>
8  #include <linux/workqueue.h>
9  #include <linux/mutex.h>
10  #include <linux/seq_file.h>
11  #include <linux/blk-mq.h>
12  #include <scsi/scsi.h>
13  
14  struct block_device;
15  struct completion;
16  struct module;
17  struct scsi_cmnd;
18  struct scsi_device;
19  struct scsi_target;
20  struct Scsi_Host;
21  struct scsi_transport_template;
22  
23  
24  #define SG_ALL	SG_CHUNK_SIZE
25  
26  #define MODE_UNKNOWN 0x00
27  #define MODE_INITIATOR 0x01
28  #define MODE_TARGET 0x02
29  
30  /**
31   * enum scsi_timeout_action - How to handle a command that timed out.
32   * @SCSI_EH_DONE: The command has already been completed.
33   * @SCSI_EH_RESET_TIMER: Reset the timer and continue waiting for completion.
34   * @SCSI_EH_NOT_HANDLED: The command has not yet finished. Abort the command.
35   */
36  enum scsi_timeout_action {
37  	SCSI_EH_DONE,
38  	SCSI_EH_RESET_TIMER,
39  	SCSI_EH_NOT_HANDLED,
40  };
41  
42  struct scsi_host_template {
43  	/*
44  	 * Put fields referenced in IO submission path together in
45  	 * same cacheline
46  	 */
47  
48  	/*
49  	 * Additional per-command data allocated for the driver.
50  	 */
51  	unsigned int cmd_size;
52  
53  	/*
54  	 * The queuecommand function is used to queue up a scsi
55  	 * command block to the LLDD.  When the driver finished
56  	 * processing the command the done callback is invoked.
57  	 *
58  	 * If queuecommand returns 0, then the driver has accepted the
59  	 * command.  It must also push it to the HBA if the scsi_cmnd
60  	 * flag SCMD_LAST is set, or if the driver does not implement
61  	 * commit_rqs.  The done() function must be called on the command
62  	 * when the driver has finished with it. (you may call done on the
63  	 * command before queuecommand returns, but in this case you
64  	 * *must* return 0 from queuecommand).
65  	 *
66  	 * Queuecommand may also reject the command, in which case it may
67  	 * not touch the command and must not call done() for it.
68  	 *
69  	 * There are two possible rejection returns:
70  	 *
71  	 *   SCSI_MLQUEUE_DEVICE_BUSY: Block this device temporarily, but
72  	 *   allow commands to other devices serviced by this host.
73  	 *
74  	 *   SCSI_MLQUEUE_HOST_BUSY: Block all devices served by this
75  	 *   host temporarily.
76  	 *
77           * For compatibility, any other non-zero return is treated the
78           * same as SCSI_MLQUEUE_HOST_BUSY.
79  	 *
80  	 * NOTE: "temporarily" means either until the next command for#
81  	 * this device/host completes, or a period of time determined by
82  	 * I/O pressure in the system if there are no other outstanding
83  	 * commands.
84  	 *
85  	 * STATUS: REQUIRED
86  	 */
87  	int (* queuecommand)(struct Scsi_Host *, struct scsi_cmnd *);
88  
89  	/*
90  	 * The commit_rqs function is used to trigger a hardware
91  	 * doorbell after some requests have been queued with
92  	 * queuecommand, when an error is encountered before sending
93  	 * the request with SCMD_LAST set.
94  	 *
95  	 * STATUS: OPTIONAL
96  	 */
97  	void (*commit_rqs)(struct Scsi_Host *, u16);
98  
99  	struct module *module;
100  	const char *name;
101  
102  	/*
103  	 * The info function will return whatever useful information the
104  	 * developer sees fit.  If not provided, then the name field will
105  	 * be used instead.
106  	 *
107  	 * Status: OPTIONAL
108  	 */
109  	const char *(*info)(struct Scsi_Host *);
110  
111  	/*
112  	 * Ioctl interface
113  	 *
114  	 * Status: OPTIONAL
115  	 */
116  	int (*ioctl)(struct scsi_device *dev, unsigned int cmd,
117  		     void __user *arg);
118  
119  
120  #ifdef CONFIG_COMPAT
121  	/*
122  	 * Compat handler. Handle 32bit ABI.
123  	 * When unknown ioctl is passed return -ENOIOCTLCMD.
124  	 *
125  	 * Status: OPTIONAL
126  	 */
127  	int (*compat_ioctl)(struct scsi_device *dev, unsigned int cmd,
128  			    void __user *arg);
129  #endif
130  
131  	int (*init_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
132  	int (*exit_cmd_priv)(struct Scsi_Host *shost, struct scsi_cmnd *cmd);
133  
134  	/*
135  	 * This is an error handling strategy routine.  You don't need to
136  	 * define one of these if you don't want to - there is a default
137  	 * routine that is present that should work in most cases.  For those
138  	 * driver authors that have the inclination and ability to write their
139  	 * own strategy routine, this is where it is specified.  Note - the
140  	 * strategy routine is *ALWAYS* run in the context of the kernel eh
141  	 * thread.  Thus you are guaranteed to *NOT* be in an interrupt
142  	 * handler when you execute this, and you are also guaranteed to
143  	 * *NOT* have any other commands being queued while you are in the
144  	 * strategy routine. When you return from this function, operations
145  	 * return to normal.
146  	 *
147  	 * See scsi_error.c scsi_unjam_host for additional comments about
148  	 * what this function should and should not be attempting to do.
149  	 *
150  	 * Status: REQUIRED	(at least one of them)
151  	 */
152  	int (* eh_abort_handler)(struct scsi_cmnd *);
153  	int (* eh_device_reset_handler)(struct scsi_cmnd *);
154  	int (* eh_target_reset_handler)(struct scsi_cmnd *);
155  	int (* eh_bus_reset_handler)(struct scsi_cmnd *);
156  	int (* eh_host_reset_handler)(struct scsi_cmnd *);
157  
158  	/*
159  	 * Before the mid layer attempts to scan for a new device where none
160  	 * currently exists, it will call this entry in your driver.  Should
161  	 * your driver need to allocate any structs or perform any other init
162  	 * items in order to send commands to a currently unused target/lun
163  	 * combo, then this is where you can perform those allocations.  This
164  	 * is specifically so that drivers won't have to perform any kind of
165  	 * "is this a new device" checks in their queuecommand routine,
166  	 * thereby making the hot path a bit quicker.
167  	 *
168  	 * Return values: 0 on success, non-0 on failure
169  	 *
170  	 * Deallocation:  If we didn't find any devices at this ID, you will
171  	 * get an immediate call to slave_destroy().  If we find something
172  	 * here then you will get a call to slave_configure(), then the
173  	 * device will be used for however long it is kept around, then when
174  	 * the device is removed from the system (or * possibly at reboot
175  	 * time), you will then get a call to slave_destroy().  This is
176  	 * assuming you implement slave_configure and slave_destroy.
177  	 * However, if you allocate memory and hang it off the device struct,
178  	 * then you must implement the slave_destroy() routine at a minimum
179  	 * in order to avoid leaking memory
180  	 * each time a device is tore down.
181  	 *
182  	 * Status: OPTIONAL
183  	 */
184  	int (* slave_alloc)(struct scsi_device *);
185  
186  	/*
187  	 * Once the device has responded to an INQUIRY and we know the
188  	 * device is online, we call into the low level driver with the
189  	 * struct scsi_device *.  If the low level device driver implements
190  	 * this function, it *must* perform the task of setting the queue
191  	 * depth on the device.  All other tasks are optional and depend
192  	 * on what the driver supports and various implementation details.
193  	 *
194  	 * Things currently recommended to be handled at this time include:
195  	 *
196  	 * 1.  Setting the device queue depth.  Proper setting of this is
197  	 *     described in the comments for scsi_change_queue_depth.
198  	 * 2.  Determining if the device supports the various synchronous
199  	 *     negotiation protocols.  The device struct will already have
200  	 *     responded to INQUIRY and the results of the standard items
201  	 *     will have been shoved into the various device flag bits, eg.
202  	 *     device->sdtr will be true if the device supports SDTR messages.
203  	 * 3.  Allocating command structs that the device will need.
204  	 * 4.  Setting the default timeout on this device (if needed).
205  	 * 5.  Anything else the low level driver might want to do on a device
206  	 *     specific setup basis...
207  	 * 6.  Return 0 on success, non-0 on error.  The device will be marked
208  	 *     as offline on error so that no access will occur.  If you return
209  	 *     non-0, your slave_destroy routine will never get called for this
210  	 *     device, so don't leave any loose memory hanging around, clean
211  	 *     up after yourself before returning non-0
212  	 *
213  	 * Status: OPTIONAL
214  	 *
215  	 * Note: slave_configure is the legacy version, use device_configure for
216  	 * all new code.  A driver must never define both.
217  	 */
218  	int (* device_configure)(struct scsi_device *, struct queue_limits *lim);
219  	int (* slave_configure)(struct scsi_device *);
220  
221  	/*
222  	 * Immediately prior to deallocating the device and after all activity
223  	 * has ceased the mid layer calls this point so that the low level
224  	 * driver may completely detach itself from the scsi device and vice
225  	 * versa.  The low level driver is responsible for freeing any memory
226  	 * it allocated in the slave_alloc or slave_configure calls.
227  	 *
228  	 * Status: OPTIONAL
229  	 */
230  	void (* slave_destroy)(struct scsi_device *);
231  
232  	/*
233  	 * Before the mid layer attempts to scan for a new device attached
234  	 * to a target where no target currently exists, it will call this
235  	 * entry in your driver.  Should your driver need to allocate any
236  	 * structs or perform any other init items in order to send commands
237  	 * to a currently unused target, then this is where you can perform
238  	 * those allocations.
239  	 *
240  	 * Return values: 0 on success, non-0 on failure
241  	 *
242  	 * Status: OPTIONAL
243  	 */
244  	int (* target_alloc)(struct scsi_target *);
245  
246  	/*
247  	 * Immediately prior to deallocating the target structure, and
248  	 * after all activity to attached scsi devices has ceased, the
249  	 * midlayer calls this point so that the driver may deallocate
250  	 * and terminate any references to the target.
251  	 *
252  	 * Note: This callback is called with the host lock held and hence
253  	 * must not sleep.
254  	 *
255  	 * Status: OPTIONAL
256  	 */
257  	void (* target_destroy)(struct scsi_target *);
258  
259  	/*
260  	 * If a host has the ability to discover targets on its own instead
261  	 * of scanning the entire bus, it can fill in this function and
262  	 * call scsi_scan_host().  This function will be called periodically
263  	 * until it returns 1 with the scsi_host and the elapsed time of
264  	 * the scan in jiffies.
265  	 *
266  	 * Status: OPTIONAL
267  	 */
268  	int (* scan_finished)(struct Scsi_Host *, unsigned long);
269  
270  	/*
271  	 * If the host wants to be called before the scan starts, but
272  	 * after the midlayer has set up ready for the scan, it can fill
273  	 * in this function.
274  	 *
275  	 * Status: OPTIONAL
276  	 */
277  	void (* scan_start)(struct Scsi_Host *);
278  
279  	/*
280  	 * Fill in this function to allow the queue depth of this host
281  	 * to be changeable (on a per device basis).  Returns either
282  	 * the current queue depth setting (may be different from what
283  	 * was passed in) or an error.  An error should only be
284  	 * returned if the requested depth is legal but the driver was
285  	 * unable to set it.  If the requested depth is illegal, the
286  	 * driver should set and return the closest legal queue depth.
287  	 *
288  	 * Status: OPTIONAL
289  	 */
290  	int (* change_queue_depth)(struct scsi_device *, int);
291  
292  	/*
293  	 * This functions lets the driver expose the queue mapping
294  	 * to the block layer.
295  	 *
296  	 * Status: OPTIONAL
297  	 */
298  	void (* map_queues)(struct Scsi_Host *shost);
299  
300  	/*
301  	 * SCSI interface of blk_poll - poll for IO completions.
302  	 * Only applicable if SCSI LLD exposes multiple h/w queues.
303  	 *
304  	 * Return value: Number of completed entries found.
305  	 *
306  	 * Status: OPTIONAL
307  	 */
308  	int (* mq_poll)(struct Scsi_Host *shost, unsigned int queue_num);
309  
310  	/*
311  	 * Check if scatterlists need to be padded for DMA draining.
312  	 *
313  	 * Status: OPTIONAL
314  	 */
315  	bool (* dma_need_drain)(struct request *rq);
316  
317  	/*
318  	 * This function determines the BIOS parameters for a given
319  	 * harddisk.  These tend to be numbers that are made up by
320  	 * the host adapter.  Parameters:
321  	 * size, device, list (heads, sectors, cylinders)
322  	 *
323  	 * Status: OPTIONAL
324  	 */
325  	int (* bios_param)(struct scsi_device *, struct block_device *,
326  			sector_t, int []);
327  
328  	/*
329  	 * This function is called when one or more partitions on the
330  	 * device reach beyond the end of the device.
331  	 *
332  	 * Status: OPTIONAL
333  	 */
334  	void (*unlock_native_capacity)(struct scsi_device *);
335  
336  	/*
337  	 * Can be used to export driver statistics and other infos to the
338  	 * world outside the kernel ie. userspace and it also provides an
339  	 * interface to feed the driver with information.
340  	 *
341  	 * Status: OBSOLETE
342  	 */
343  	int (*show_info)(struct seq_file *, struct Scsi_Host *);
344  	int (*write_info)(struct Scsi_Host *, char *, int);
345  
346  	/*
347  	 * This is an optional routine that allows the transport to become
348  	 * involved when a scsi io timer fires. The return value tells the
349  	 * timer routine how to finish the io timeout handling.
350  	 *
351  	 * Status: OPTIONAL
352  	 */
353  	enum scsi_timeout_action (*eh_timed_out)(struct scsi_cmnd *);
354  	/*
355  	 * Optional routine that allows the transport to decide if a cmd
356  	 * is retryable. Return true if the transport is in a state the
357  	 * cmd should be retried on.
358  	 */
359  	bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
360  
361  	/* This is an optional routine that allows transport to initiate
362  	 * LLD adapter or firmware reset using sysfs attribute.
363  	 *
364  	 * Return values: 0 on success, -ve value on failure.
365  	 *
366  	 * Status: OPTIONAL
367  	 */
368  
369  	int (*host_reset)(struct Scsi_Host *shost, int reset_type);
370  #define SCSI_ADAPTER_RESET	1
371  #define SCSI_FIRMWARE_RESET	2
372  
373  
374  	/*
375  	 * Name of proc directory
376  	 */
377  	const char *proc_name;
378  
379  	/*
380  	 * This determines if we will use a non-interrupt driven
381  	 * or an interrupt driven scheme.  It is set to the maximum number
382  	 * of simultaneous commands a single hw queue in HBA will accept.
383  	 */
384  	int can_queue;
385  
386  	/*
387  	 * In many instances, especially where disconnect / reconnect are
388  	 * supported, our host also has an ID on the SCSI bus.  If this is
389  	 * the case, then it must be reserved.  Please set this_id to -1 if
390  	 * your setup is in single initiator mode, and the host lacks an
391  	 * ID.
392  	 */
393  	int this_id;
394  
395  	/*
396  	 * This determines the degree to which the host adapter is capable
397  	 * of scatter-gather.
398  	 */
399  	unsigned short sg_tablesize;
400  	unsigned short sg_prot_tablesize;
401  
402  	/*
403  	 * Set this if the host adapter has limitations beside segment count.
404  	 */
405  	unsigned int max_sectors;
406  
407  	/*
408  	 * Maximum size in bytes of a single segment.
409  	 */
410  	unsigned int max_segment_size;
411  
412  	unsigned int dma_alignment;
413  
414  	/*
415  	 * DMA scatter gather segment boundary limit. A segment crossing this
416  	 * boundary will be split in two.
417  	 */
418  	unsigned long dma_boundary;
419  
420  	unsigned long virt_boundary_mask;
421  
422  	/*
423  	 * This specifies "machine infinity" for host templates which don't
424  	 * limit the transfer size.  Note this limit represents an absolute
425  	 * maximum, and may be over the transfer limits allowed for
426  	 * individual devices (e.g. 256 for SCSI-1).
427  	 */
428  #define SCSI_DEFAULT_MAX_SECTORS	1024
429  
430  	/*
431  	 * True if this host adapter can make good use of linked commands.
432  	 * This will allow more than one command to be queued to a given
433  	 * unit on a given host.  Set this to the maximum number of command
434  	 * blocks to be provided for each device.  Set this to 1 for one
435  	 * command block per lun, 2 for two, etc.  Do not set this to 0.
436  	 * You should make sure that the host adapter will do the right thing
437  	 * before you try setting this above 1.
438  	 */
439  	short cmd_per_lun;
440  
441  	/* If use block layer to manage tags, this is tag allocation policy */
442  	int tag_alloc_policy;
443  
444  	/*
445  	 * Track QUEUE_FULL events and reduce queue depth on demand.
446  	 */
447  	unsigned track_queue_depth:1;
448  
449  	/*
450  	 * This specifies the mode that a LLD supports.
451  	 */
452  	unsigned supported_mode:2;
453  
454  	/*
455  	 * True for emulated SCSI host adapters (e.g. ATAPI).
456  	 */
457  	unsigned emulated:1;
458  
459  	/*
460  	 * True if the low-level driver performs its own reset-settle delays.
461  	 */
462  	unsigned skip_settle_delay:1;
463  
464  	/* True if the controller does not support WRITE SAME */
465  	unsigned no_write_same:1;
466  
467  	/* True if the host uses host-wide tagspace */
468  	unsigned host_tagset:1;
469  
470  	/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
471  	unsigned queuecommand_may_block:1;
472  
473  	/*
474  	 * Countdown for host blocking with no commands outstanding.
475  	 */
476  	unsigned int max_host_blocked;
477  
478  	/*
479  	 * Default value for the blocking.  If the queue is empty,
480  	 * host_blocked counts down in the request_fn until it restarts
481  	 * host operations as zero is reached.
482  	 *
483  	 * FIXME: This should probably be a value in the template
484  	 */
485  #define SCSI_DEFAULT_HOST_BLOCKED	7
486  
487  	/*
488  	 * Pointer to the SCSI host sysfs attribute groups, NULL terminated.
489  	 */
490  	const struct attribute_group **shost_groups;
491  
492  	/*
493  	 * Pointer to the SCSI device attribute groups for this host,
494  	 * NULL terminated.
495  	 */
496  	const struct attribute_group **sdev_groups;
497  
498  	/*
499  	 * Vendor Identifier associated with the host
500  	 *
501  	 * Note: When specifying vendor_id, be sure to read the
502  	 *   Vendor Type and ID formatting requirements specified in
503  	 *   scsi_netlink.h
504  	 */
505  	u64 vendor_id;
506  };
507  
508  /*
509   * Temporary #define for host lock push down. Can be removed when all
510   * drivers have been updated to take advantage of unlocked
511   * queuecommand.
512   *
513   */
514  #define DEF_SCSI_QCMD(func_name) \
515  	int func_name(struct Scsi_Host *shost, struct scsi_cmnd *cmd)	\
516  	{								\
517  		unsigned long irq_flags;				\
518  		int rc;							\
519  		spin_lock_irqsave(shost->host_lock, irq_flags);		\
520  		rc = func_name##_lck(cmd);				\
521  		spin_unlock_irqrestore(shost->host_lock, irq_flags);	\
522  		return rc;						\
523  	}
524  
525  
526  /*
527   * shost state: If you alter this, you also need to alter scsi_sysfs.c
528   * (for the ascii descriptions) and the state model enforcer:
529   * scsi_host_set_state()
530   */
531  enum scsi_host_state {
532  	SHOST_CREATED = 1,
533  	SHOST_RUNNING,
534  	SHOST_CANCEL,
535  	SHOST_DEL,
536  	SHOST_RECOVERY,
537  	SHOST_CANCEL_RECOVERY,
538  	SHOST_DEL_RECOVERY,
539  };
540  
541  struct Scsi_Host {
542  	/*
543  	 * __devices is protected by the host_lock, but you should
544  	 * usually use scsi_device_lookup / shost_for_each_device
545  	 * to access it and don't care about locking yourself.
546  	 * In the rare case of being in irq context you can use
547  	 * their __ prefixed variants with the lock held. NEVER
548  	 * access this list directly from a driver.
549  	 */
550  	struct list_head	__devices;
551  	struct list_head	__targets;
552  
553  	struct list_head	starved_list;
554  
555  	spinlock_t		default_lock;
556  	spinlock_t		*host_lock;
557  
558  	struct mutex		scan_mutex;/* serialize scanning activity */
559  
560  	struct list_head	eh_abort_list;
561  	struct list_head	eh_cmd_q;
562  	struct task_struct    * ehandler;  /* Error recovery thread. */
563  	struct completion     * eh_action; /* Wait for specific actions on the
564  					      host. */
565  	wait_queue_head_t       host_wait;
566  	const struct scsi_host_template *hostt;
567  	struct scsi_transport_template *transportt;
568  
569  	struct kref		tagset_refcnt;
570  	struct completion	tagset_freed;
571  	/* Area to keep a shared tag map */
572  	struct blk_mq_tag_set	tag_set;
573  
574  	atomic_t host_blocked;
575  
576  	unsigned int host_failed;	   /* commands that failed.
577  					      protected by host_lock */
578  	unsigned int host_eh_scheduled;    /* EH scheduled without command */
579  
580  	unsigned int host_no;  /* Used for IOCTL_GET_IDLUN, /proc/scsi et al. */
581  
582  	/* next two fields are used to bound the time spent in error handling */
583  	int eh_deadline;
584  	unsigned long last_reset;
585  
586  
587  	/*
588  	 * These three parameters can be used to allow for wide scsi,
589  	 * and for host adapters that support multiple busses
590  	 * The last two should be set to 1 more than the actual max id
591  	 * or lun (e.g. 8 for SCSI parallel systems).
592  	 */
593  	unsigned int max_channel;
594  	unsigned int max_id;
595  	u64 max_lun;
596  
597  	/*
598  	 * This is a unique identifier that must be assigned so that we
599  	 * have some way of identifying each detected host adapter properly
600  	 * and uniquely.  For hosts that do not support more than one card
601  	 * in the system at one time, this does not need to be set.  It is
602  	 * initialized to 0 in scsi_register.
603  	 */
604  	unsigned int unique_id;
605  
606  	/*
607  	 * The maximum length of SCSI commands that this host can accept.
608  	 * Probably 12 for most host adapters, but could be 16 for others.
609  	 * or 260 if the driver supports variable length cdbs.
610  	 * For drivers that don't set this field, a value of 12 is
611  	 * assumed.
612  	 */
613  	unsigned short max_cmd_len;
614  
615  	int this_id;
616  	int can_queue;
617  	short cmd_per_lun;
618  	short unsigned int sg_tablesize;
619  	short unsigned int sg_prot_tablesize;
620  	unsigned int max_sectors;
621  	unsigned int opt_sectors;
622  	unsigned int max_segment_size;
623  	unsigned int dma_alignment;
624  	unsigned long dma_boundary;
625  	unsigned long virt_boundary_mask;
626  	/*
627  	 * In scsi-mq mode, the number of hardware queues supported by the LLD.
628  	 *
629  	 * Note: it is assumed that each hardware queue has a queue depth of
630  	 * can_queue. In other words, the total queue depth per host
631  	 * is nr_hw_queues * can_queue. However, for when host_tagset is set,
632  	 * the total queue depth is can_queue.
633  	 */
634  	unsigned nr_hw_queues;
635  	unsigned nr_maps;
636  	unsigned active_mode:2;
637  
638  	/*
639  	 * Host has requested that no further requests come through for the
640  	 * time being.
641  	 */
642  	unsigned host_self_blocked:1;
643  
644  	/*
645  	 * Host uses correct SCSI ordering not PC ordering. The bit is
646  	 * set for the minority of drivers whose authors actually read
647  	 * the spec ;).
648  	 */
649  	unsigned reverse_ordering:1;
650  
651  	/* Task mgmt function in progress */
652  	unsigned tmf_in_progress:1;
653  
654  	/* Asynchronous scan in progress */
655  	unsigned async_scan:1;
656  
657  	/* Don't resume host in EH */
658  	unsigned eh_noresume:1;
659  
660  	/* The controller does not support WRITE SAME */
661  	unsigned no_write_same:1;
662  
663  	/* True if the host uses host-wide tagspace */
664  	unsigned host_tagset:1;
665  
666  	/* The queuecommand callback may block. See also BLK_MQ_F_BLOCKING. */
667  	unsigned queuecommand_may_block:1;
668  
669  	/* Host responded with short (<36 bytes) INQUIRY result */
670  	unsigned short_inquiry:1;
671  
672  	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
673  	unsigned no_scsi2_lun_in_cdb:1;
674  
675  	unsigned no_highmem:1;
676  
677  	/*
678  	 * Optional work queue to be utilized by the transport
679  	 */
680  	struct workqueue_struct *work_q;
681  
682  	/*
683  	 * Task management function work queue
684  	 */
685  	struct workqueue_struct *tmf_work_q;
686  
687  	/*
688  	 * Value host_blocked counts down from
689  	 */
690  	unsigned int max_host_blocked;
691  
692  	/* Protection Information */
693  	unsigned int prot_capabilities;
694  	unsigned char prot_guard_type;
695  
696  	/* legacy crap */
697  	unsigned long base;
698  	unsigned long io_port;
699  	unsigned char n_io_port;
700  	unsigned char dma_channel;
701  	unsigned int  irq;
702  
703  
704  	enum scsi_host_state shost_state;
705  
706  	/* ldm bits */
707  	struct device		shost_gendev, shost_dev;
708  
709  	/*
710  	 * Points to the transport data (if any) which is allocated
711  	 * separately
712  	 */
713  	void *shost_data;
714  
715  	/*
716  	 * Points to the physical bus device we'd use to do DMA
717  	 * Needed just in case we have virtual hosts.
718  	 */
719  	struct device *dma_dev;
720  
721  	/* Delay for runtime autosuspend */
722  	int rpm_autosuspend_delay;
723  
724  	/*
725  	 * We should ensure that this is aligned, both for better performance
726  	 * and also because some compilers (m68k) don't automatically force
727  	 * alignment to a long boundary.
728  	 */
729  	unsigned long hostdata[]  /* Used for storage of host specific stuff */
730  		__attribute__ ((aligned (sizeof(unsigned long))));
731  };
732  
733  #define		class_to_shost(d)	\
734  	container_of(d, struct Scsi_Host, shost_dev)
735  
736  #define shost_printk(prefix, shost, fmt, a...)	\
737  	dev_printk(prefix, &(shost)->shost_gendev, fmt, ##a)
738  
shost_priv(struct Scsi_Host * shost)739  static inline void *shost_priv(struct Scsi_Host *shost)
740  {
741  	return (void *)shost->hostdata;
742  }
743  
744  int scsi_is_host_device(const struct device *);
745  
dev_to_shost(struct device * dev)746  static inline struct Scsi_Host *dev_to_shost(struct device *dev)
747  {
748  	while (!scsi_is_host_device(dev)) {
749  		if (!dev->parent)
750  			return NULL;
751  		dev = dev->parent;
752  	}
753  	return container_of(dev, struct Scsi_Host, shost_gendev);
754  }
755  
scsi_host_in_recovery(struct Scsi_Host * shost)756  static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
757  {
758  	return shost->shost_state == SHOST_RECOVERY ||
759  		shost->shost_state == SHOST_CANCEL_RECOVERY ||
760  		shost->shost_state == SHOST_DEL_RECOVERY ||
761  		shost->tmf_in_progress;
762  }
763  
764  extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
765  extern void scsi_flush_work(struct Scsi_Host *);
766  
767  extern struct Scsi_Host *scsi_host_alloc(const struct scsi_host_template *, int);
768  extern int __must_check scsi_add_host_with_dma(struct Scsi_Host *,
769  					       struct device *,
770  					       struct device *);
771  #if defined(CONFIG_SCSI_PROC_FS)
772  struct proc_dir_entry *
773  scsi_template_proc_dir(const struct scsi_host_template *sht);
774  #else
775  #define scsi_template_proc_dir(sht) NULL
776  #endif
777  extern void scsi_scan_host(struct Scsi_Host *);
778  extern int scsi_resume_device(struct scsi_device *sdev);
779  extern int scsi_rescan_device(struct scsi_device *sdev);
780  extern void scsi_remove_host(struct Scsi_Host *);
781  extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
782  extern int scsi_host_busy(struct Scsi_Host *shost);
783  extern void scsi_host_put(struct Scsi_Host *t);
784  extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
785  extern const char *scsi_host_state_name(enum scsi_host_state);
786  extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
787  					    enum scsi_host_status status);
788  
scsi_add_host(struct Scsi_Host * host,struct device * dev)789  static inline int __must_check scsi_add_host(struct Scsi_Host *host,
790  					     struct device *dev)
791  {
792  	return scsi_add_host_with_dma(host, dev, dev);
793  }
794  
scsi_get_device(struct Scsi_Host * shost)795  static inline struct device *scsi_get_device(struct Scsi_Host *shost)
796  {
797          return shost->shost_gendev.parent;
798  }
799  
800  /**
801   * scsi_host_scan_allowed - Is scanning of this host allowed
802   * @shost:	Pointer to Scsi_Host.
803   **/
scsi_host_scan_allowed(struct Scsi_Host * shost)804  static inline int scsi_host_scan_allowed(struct Scsi_Host *shost)
805  {
806  	return shost->shost_state == SHOST_RUNNING ||
807  	       shost->shost_state == SHOST_RECOVERY;
808  }
809  
810  extern void scsi_unblock_requests(struct Scsi_Host *);
811  extern void scsi_block_requests(struct Scsi_Host *);
812  extern int scsi_host_block(struct Scsi_Host *shost);
813  extern int scsi_host_unblock(struct Scsi_Host *shost, int new_state);
814  
815  void scsi_host_busy_iter(struct Scsi_Host *,
816  			 bool (*fn)(struct scsi_cmnd *, void *), void *priv);
817  
818  struct class_container;
819  
820  /*
821   * DIF defines the exchange of protection information between
822   * initiator and SBC block device.
823   *
824   * DIX defines the exchange of protection information between OS and
825   * initiator.
826   */
827  enum scsi_host_prot_capabilities {
828  	SHOST_DIF_TYPE1_PROTECTION = 1 << 0, /* T10 DIF Type 1 */
829  	SHOST_DIF_TYPE2_PROTECTION = 1 << 1, /* T10 DIF Type 2 */
830  	SHOST_DIF_TYPE3_PROTECTION = 1 << 2, /* T10 DIF Type 3 */
831  
832  	SHOST_DIX_TYPE0_PROTECTION = 1 << 3, /* DIX between OS and HBA only */
833  	SHOST_DIX_TYPE1_PROTECTION = 1 << 4, /* DIX with DIF Type 1 */
834  	SHOST_DIX_TYPE2_PROTECTION = 1 << 5, /* DIX with DIF Type 2 */
835  	SHOST_DIX_TYPE3_PROTECTION = 1 << 6, /* DIX with DIF Type 3 */
836  };
837  
838  /*
839   * SCSI hosts which support the Data Integrity Extensions must
840   * indicate their capabilities by setting the prot_capabilities using
841   * this call.
842   */
scsi_host_set_prot(struct Scsi_Host * shost,unsigned int mask)843  static inline void scsi_host_set_prot(struct Scsi_Host *shost, unsigned int mask)
844  {
845  	shost->prot_capabilities = mask;
846  }
847  
scsi_host_get_prot(struct Scsi_Host * shost)848  static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
849  {
850  	return shost->prot_capabilities;
851  }
852  
scsi_host_prot_dma(struct Scsi_Host * shost)853  static inline int scsi_host_prot_dma(struct Scsi_Host *shost)
854  {
855  	return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION;
856  }
857  
scsi_host_dif_capable(struct Scsi_Host * shost,unsigned int target_type)858  static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
859  {
860  	static unsigned char cap[] = { 0,
861  				       SHOST_DIF_TYPE1_PROTECTION,
862  				       SHOST_DIF_TYPE2_PROTECTION,
863  				       SHOST_DIF_TYPE3_PROTECTION };
864  
865  	if (target_type >= ARRAY_SIZE(cap))
866  		return 0;
867  
868  	return shost->prot_capabilities & cap[target_type] ? target_type : 0;
869  }
870  
scsi_host_dix_capable(struct Scsi_Host * shost,unsigned int target_type)871  static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
872  {
873  #if defined(CONFIG_BLK_DEV_INTEGRITY)
874  	static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
875  				       SHOST_DIX_TYPE1_PROTECTION,
876  				       SHOST_DIX_TYPE2_PROTECTION,
877  				       SHOST_DIX_TYPE3_PROTECTION };
878  
879  	if (target_type >= ARRAY_SIZE(cap))
880  		return 0;
881  
882  	return shost->prot_capabilities & cap[target_type];
883  #endif
884  	return 0;
885  }
886  
887  /*
888   * All DIX-capable initiators must support the T10-mandated CRC
889   * checksum.  Controllers can optionally implement the IP checksum
890   * scheme which has much lower impact on system performance.  Note
891   * that the main rationale for the checksum is to match integrity
892   * metadata with data.  Detecting bit errors are a job for ECC memory
893   * and buses.
894   */
895  
896  enum scsi_host_guard_type {
897  	SHOST_DIX_GUARD_CRC = 1 << 0,
898  	SHOST_DIX_GUARD_IP  = 1 << 1,
899  };
900  
scsi_host_set_guard(struct Scsi_Host * shost,unsigned char type)901  static inline void scsi_host_set_guard(struct Scsi_Host *shost, unsigned char type)
902  {
903  	shost->prot_guard_type = type;
904  }
905  
scsi_host_get_guard(struct Scsi_Host * shost)906  static inline unsigned char scsi_host_get_guard(struct Scsi_Host *shost)
907  {
908  	return shost->prot_guard_type;
909  }
910  
911  extern int scsi_host_set_state(struct Scsi_Host *, enum scsi_host_state);
912  
913  #endif /* _SCSI_SCSI_HOST_H */
914