1  /*
2   * Scsi Host Layer for MPT (Message Passing Technology) based controllers
3   *
4   * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5   * Copyright (C) 2012-2014  LSI Corporation
6   * Copyright (C) 2013-2014 Avago Technologies
7   *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
8   *
9   * This program is free software; you can redistribute it and/or
10   * modify it under the terms of the GNU General Public License
11   * as published by the Free Software Foundation; either version 2
12   * of the License, or (at your option) any later version.
13   *
14   * This program is distributed in the hope that it will be useful,
15   * but WITHOUT ANY WARRANTY; without even the implied warranty of
16   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   * GNU General Public License for more details.
18   *
19   * NO WARRANTY
20   * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21   * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22   * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23   * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24   * solely responsible for determining the appropriateness of using and
25   * distributing the Program and assumes all risks associated with its
26   * exercise of rights under this Agreement, including but not limited to
27   * the risks and costs of program errors, damage to or loss of data,
28   * programs or equipment, and unavailability or interruption of operations.
29  
30   * DISCLAIMER OF LIABILITY
31   * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32   * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33   * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34   * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35   * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36   * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37   * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38  
39   * You should have received a copy of the GNU General Public License
40   * along with this program; if not, write to the Free Software
41   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42   * USA.
43   */
44  
45  #include <linux/module.h>
46  #include <linux/kernel.h>
47  #include <linux/init.h>
48  #include <linux/errno.h>
49  #include <linux/blkdev.h>
50  #include <linux/sched.h>
51  #include <linux/workqueue.h>
52  #include <linux/delay.h>
53  #include <linux/pci.h>
54  #include <linux/interrupt.h>
55  #include <linux/raid_class.h>
56  #include <linux/blk-mq-pci.h>
57  #include <linux/unaligned.h>
58  
59  #include "mpt3sas_base.h"
60  
61  #define RAID_CHANNEL 1
62  
63  #define PCIE_CHANNEL 2
64  
65  /* forward proto's */
66  static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
67  	struct _sas_node *sas_expander);
68  static void _firmware_event_work(struct work_struct *work);
69  
70  static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
71  	struct _sas_device *sas_device);
72  static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
73  	u8 retry_count, u8 is_pd);
74  static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
75  static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
76  	struct _pcie_device *pcie_device);
77  static void
78  _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
79  static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
80  static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc);
81  
82  /* global parameters */
83  LIST_HEAD(mpt3sas_ioc_list);
84  /* global ioc lock for list operations */
85  DEFINE_SPINLOCK(gioc_lock);
86  
87  MODULE_AUTHOR(MPT3SAS_AUTHOR);
88  MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89  MODULE_LICENSE("GPL");
90  MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91  MODULE_ALIAS("mpt2sas");
92  
93  /* local parameters */
94  static u8 scsi_io_cb_idx = -1;
95  static u8 tm_cb_idx = -1;
96  static u8 ctl_cb_idx = -1;
97  static u8 base_cb_idx = -1;
98  static u8 port_enable_cb_idx = -1;
99  static u8 transport_cb_idx = -1;
100  static u8 scsih_cb_idx = -1;
101  static u8 config_cb_idx = -1;
102  static int mpt2_ids;
103  static int mpt3_ids;
104  
105  static u8 tm_tr_cb_idx = -1 ;
106  static u8 tm_tr_volume_cb_idx = -1 ;
107  static u8 tm_sas_control_cb_idx = -1;
108  
109  /* command line options */
110  static u32 logging_level;
111  MODULE_PARM_DESC(logging_level,
112  	" bits for enabling additional logging info (default=0)");
113  
114  
115  static ushort max_sectors = 0xFFFF;
116  module_param(max_sectors, ushort, 0444);
117  MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767  default=32767");
118  
119  
120  static int missing_delay[2] = {-1, -1};
121  module_param_array(missing_delay, int, NULL, 0444);
122  MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
123  
124  /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125  #define MPT3SAS_MAX_LUN (16895)
126  static u64 max_lun = MPT3SAS_MAX_LUN;
127  module_param(max_lun, ullong, 0444);
128  MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
129  
130  static ushort hbas_to_enumerate;
131  module_param(hbas_to_enumerate, ushort, 0444);
132  MODULE_PARM_DESC(hbas_to_enumerate,
133  		" 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134  		  1 - enumerates only SAS 2.0 generation HBAs\n \
135  		  2 - enumerates only SAS 3.0 generation HBAs (default=0)");
136  
137  /* diag_buffer_enable is bitwise
138   * bit 0 set = TRACE
139   * bit 1 set = SNAPSHOT
140   * bit 2 set = EXTENDED
141   *
142   * Either bit can be set, or both
143   */
144  static int diag_buffer_enable = -1;
145  module_param(diag_buffer_enable, int, 0444);
146  MODULE_PARM_DESC(diag_buffer_enable,
147  	" post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148  static int disable_discovery = -1;
149  module_param(disable_discovery, int, 0444);
150  MODULE_PARM_DESC(disable_discovery, " disable discovery ");
151  
152  
153  /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154  static int prot_mask = -1;
155  module_param(prot_mask, int, 0444);
156  MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
157  
158  static bool enable_sdev_max_qd;
159  module_param(enable_sdev_max_qd, bool, 0444);
160  MODULE_PARM_DESC(enable_sdev_max_qd,
161  	"Enable sdev max qd as can_queue, def=disabled(0)");
162  
163  static int multipath_on_hba = -1;
164  module_param(multipath_on_hba, int, 0);
165  MODULE_PARM_DESC(multipath_on_hba,
166  	"Multipath support to add same target device\n\t\t"
167  	"as many times as it is visible to HBA from various paths\n\t\t"
168  	"(by default:\n\t\t"
169  	"\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
170  	"\t SAS 3.5 HBA - This will be enabled)");
171  
172  static int host_tagset_enable = 1;
173  module_param(host_tagset_enable, int, 0444);
174  MODULE_PARM_DESC(host_tagset_enable,
175  	"Shared host tagset enable/disable Default: enable(1)");
176  
177  /* raid transport support */
178  static struct raid_template *mpt3sas_raid_template;
179  static struct raid_template *mpt2sas_raid_template;
180  
181  
182  /**
183   * struct sense_info - common structure for obtaining sense keys
184   * @skey: sense key
185   * @asc: additional sense code
186   * @ascq: additional sense code qualifier
187   */
188  struct sense_info {
189  	u8 skey;
190  	u8 asc;
191  	u8 ascq;
192  };
193  
194  #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
195  #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
196  #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
197  #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
198  #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
199  /**
200   * struct fw_event_work - firmware event struct
201   * @list: link list framework
202   * @work: work object (ioc->fault_reset_work_q)
203   * @ioc: per adapter object
204   * @device_handle: device handle
205   * @VF_ID: virtual function id
206   * @VP_ID: virtual port id
207   * @ignore: flag meaning this event has been marked to ignore
208   * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
209   * @refcount: kref for this event
210   * @event_data: reply event data payload follows
211   *
212   * This object stored on ioc->fw_event_list.
213   */
214  struct fw_event_work {
215  	struct list_head	list;
216  	struct work_struct	work;
217  
218  	struct MPT3SAS_ADAPTER *ioc;
219  	u16			device_handle;
220  	u8			VF_ID;
221  	u8			VP_ID;
222  	u8			ignore;
223  	u16			event;
224  	struct kref		refcount;
225  	char			event_data[] __aligned(4);
226  };
227  
fw_event_work_free(struct kref * r)228  static void fw_event_work_free(struct kref *r)
229  {
230  	kfree(container_of(r, struct fw_event_work, refcount));
231  }
232  
fw_event_work_get(struct fw_event_work * fw_work)233  static void fw_event_work_get(struct fw_event_work *fw_work)
234  {
235  	kref_get(&fw_work->refcount);
236  }
237  
fw_event_work_put(struct fw_event_work * fw_work)238  static void fw_event_work_put(struct fw_event_work *fw_work)
239  {
240  	kref_put(&fw_work->refcount, fw_event_work_free);
241  }
242  
alloc_fw_event_work(int len)243  static struct fw_event_work *alloc_fw_event_work(int len)
244  {
245  	struct fw_event_work *fw_event;
246  
247  	fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
248  	if (!fw_event)
249  		return NULL;
250  
251  	kref_init(&fw_event->refcount);
252  	return fw_event;
253  }
254  
255  /**
256   * struct _scsi_io_transfer - scsi io transfer
257   * @handle: sas device handle (assigned by firmware)
258   * @is_raid: flag set for hidden raid components
259   * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
260   * @data_length: data transfer length
261   * @data_dma: dma pointer to data
262   * @sense: sense data
263   * @lun: lun number
264   * @cdb_length: cdb length
265   * @cdb: cdb contents
266   * @timeout: timeout for this command
267   * @VF_ID: virtual function id
268   * @VP_ID: virtual port id
269   * @valid_reply: flag set for reply message
270   * @sense_length: sense length
271   * @ioc_status: ioc status
272   * @scsi_state: scsi state
273   * @scsi_status: scsi staus
274   * @log_info: log information
275   * @transfer_length: data length transfer when there is a reply message
276   *
277   * Used for sending internal scsi commands to devices within this module.
278   * Refer to _scsi_send_scsi_io().
279   */
280  struct _scsi_io_transfer {
281  	u16	handle;
282  	u8	is_raid;
283  	enum dma_data_direction dir;
284  	u32	data_length;
285  	dma_addr_t data_dma;
286  	u8	sense[SCSI_SENSE_BUFFERSIZE];
287  	u32	lun;
288  	u8	cdb_length;
289  	u8	cdb[32];
290  	u8	timeout;
291  	u8	VF_ID;
292  	u8	VP_ID;
293  	u8	valid_reply;
294    /* the following bits are only valid when 'valid_reply = 1' */
295  	u32	sense_length;
296  	u16	ioc_status;
297  	u8	scsi_state;
298  	u8	scsi_status;
299  	u32	log_info;
300  	u32	transfer_length;
301  };
302  
303  /**
304   * _scsih_set_debug_level - global setting of ioc->logging_level.
305   * @val: value of the parameter to be set
306   * @kp: pointer to kernel_param structure
307   *
308   * Note: The logging levels are defined in mpt3sas_debug.h.
309   */
310  static int
_scsih_set_debug_level(const char * val,const struct kernel_param * kp)311  _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
312  {
313  	int ret = param_set_int(val, kp);
314  	struct MPT3SAS_ADAPTER *ioc;
315  
316  	if (ret)
317  		return ret;
318  
319  	pr_info("setting logging_level(0x%08x)\n", logging_level);
320  	spin_lock(&gioc_lock);
321  	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
322  		ioc->logging_level = logging_level;
323  	spin_unlock(&gioc_lock);
324  	return 0;
325  }
326  module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
327  	&logging_level, 0644);
328  
329  /**
330   * _scsih_srch_boot_sas_address - search based on sas_address
331   * @sas_address: sas address
332   * @boot_device: boot device object from bios page 2
333   *
334   * Return: 1 when there's a match, 0 means no match.
335   */
336  static inline int
_scsih_srch_boot_sas_address(u64 sas_address,Mpi2BootDeviceSasWwid_t * boot_device)337  _scsih_srch_boot_sas_address(u64 sas_address,
338  	Mpi2BootDeviceSasWwid_t *boot_device)
339  {
340  	return (sas_address == le64_to_cpu(boot_device->SASAddress)) ?  1 : 0;
341  }
342  
343  /**
344   * _scsih_srch_boot_device_name - search based on device name
345   * @device_name: device name specified in INDENTIFY fram
346   * @boot_device: boot device object from bios page 2
347   *
348   * Return: 1 when there's a match, 0 means no match.
349   */
350  static inline int
_scsih_srch_boot_device_name(u64 device_name,Mpi2BootDeviceDeviceName_t * boot_device)351  _scsih_srch_boot_device_name(u64 device_name,
352  	Mpi2BootDeviceDeviceName_t *boot_device)
353  {
354  	return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
355  }
356  
357  /**
358   * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
359   * @enclosure_logical_id: enclosure logical id
360   * @slot_number: slot number
361   * @boot_device: boot device object from bios page 2
362   *
363   * Return: 1 when there's a match, 0 means no match.
364   */
365  static inline int
_scsih_srch_boot_encl_slot(u64 enclosure_logical_id,u16 slot_number,Mpi2BootDeviceEnclosureSlot_t * boot_device)366  _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
367  	Mpi2BootDeviceEnclosureSlot_t *boot_device)
368  {
369  	return (enclosure_logical_id == le64_to_cpu(boot_device->
370  	    EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
371  	    SlotNumber)) ? 1 : 0;
372  }
373  
374  /**
375   * mpt3sas_get_port_by_id - get hba port entry corresponding to provided
376   *			  port number from port list
377   * @ioc: per adapter object
378   * @port_id: port number
379   * @bypass_dirty_port_flag: when set look the matching hba port entry even
380   *			if hba port entry is marked as dirty.
381   *
382   * Search for hba port entry corresponding to provided port number,
383   * if available return port object otherwise return NULL.
384   */
385  struct hba_port *
mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 bypass_dirty_port_flag)386  mpt3sas_get_port_by_id(struct MPT3SAS_ADAPTER *ioc,
387  	u8 port_id, u8 bypass_dirty_port_flag)
388  {
389  	struct hba_port *port, *port_next;
390  
391  	/*
392  	 * When multipath_on_hba is disabled then
393  	 * search the hba_port entry using default
394  	 * port id i.e. 255
395  	 */
396  	if (!ioc->multipath_on_hba)
397  		port_id = MULTIPATH_DISABLED_PORT_ID;
398  
399  	list_for_each_entry_safe(port, port_next,
400  	    &ioc->port_table_list, list) {
401  		if (port->port_id != port_id)
402  			continue;
403  		if (bypass_dirty_port_flag)
404  			return port;
405  		if (port->flags & HBA_PORT_FLAG_DIRTY_PORT)
406  			continue;
407  		return port;
408  	}
409  
410  	/*
411  	 * Allocate hba_port object for default port id (i.e. 255)
412  	 * when multipath_on_hba is disabled for the HBA.
413  	 * And add this object to port_table_list.
414  	 */
415  	if (!ioc->multipath_on_hba) {
416  		port = kzalloc(sizeof(struct hba_port), GFP_ATOMIC);
417  		if (!port)
418  			return NULL;
419  
420  		port->port_id = port_id;
421  		ioc_info(ioc,
422  		   "hba_port entry: %p, port: %d is added to hba_port list\n",
423  		   port, port->port_id);
424  		list_add_tail(&port->list,
425  		    &ioc->port_table_list);
426  		return port;
427  	}
428  	return NULL;
429  }
430  
431  /**
432   * mpt3sas_get_vphy_by_phy - get virtual_phy object corresponding to phy number
433   * @ioc: per adapter object
434   * @port: hba_port object
435   * @phy: phy number
436   *
437   * Return virtual_phy object corresponding to phy number.
438   */
439  struct virtual_phy *
mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port,u32 phy)440  mpt3sas_get_vphy_by_phy(struct MPT3SAS_ADAPTER *ioc,
441  	struct hba_port *port, u32 phy)
442  {
443  	struct virtual_phy *vphy, *vphy_next;
444  
445  	if (!port->vphys_mask)
446  		return NULL;
447  
448  	list_for_each_entry_safe(vphy, vphy_next, &port->vphys_list, list) {
449  		if (vphy->phy_mask & (1 << phy))
450  			return vphy;
451  	}
452  	return NULL;
453  }
454  
455  /**
456   * _scsih_is_boot_device - search for matching boot device.
457   * @sas_address: sas address
458   * @device_name: device name specified in INDENTIFY fram
459   * @enclosure_logical_id: enclosure logical id
460   * @slot: slot number
461   * @form: specifies boot device form
462   * @boot_device: boot device object from bios page 2
463   *
464   * Return: 1 when there's a match, 0 means no match.
465   */
466  static int
_scsih_is_boot_device(u64 sas_address,u64 device_name,u64 enclosure_logical_id,u16 slot,u8 form,Mpi2BiosPage2BootDevice_t * boot_device)467  _scsih_is_boot_device(u64 sas_address, u64 device_name,
468  	u64 enclosure_logical_id, u16 slot, u8 form,
469  	Mpi2BiosPage2BootDevice_t *boot_device)
470  {
471  	int rc = 0;
472  
473  	switch (form) {
474  	case MPI2_BIOSPAGE2_FORM_SAS_WWID:
475  		if (!sas_address)
476  			break;
477  		rc = _scsih_srch_boot_sas_address(
478  		    sas_address, &boot_device->SasWwid);
479  		break;
480  	case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
481  		if (!enclosure_logical_id)
482  			break;
483  		rc = _scsih_srch_boot_encl_slot(
484  		    enclosure_logical_id,
485  		    slot, &boot_device->EnclosureSlot);
486  		break;
487  	case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
488  		if (!device_name)
489  			break;
490  		rc = _scsih_srch_boot_device_name(
491  		    device_name, &boot_device->DeviceName);
492  		break;
493  	case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
494  		break;
495  	}
496  
497  	return rc;
498  }
499  
500  /**
501   * _scsih_get_sas_address - set the sas_address for given device handle
502   * @ioc: ?
503   * @handle: device handle
504   * @sas_address: sas address
505   *
506   * Return: 0 success, non-zero when failure
507   */
508  static int
_scsih_get_sas_address(struct MPT3SAS_ADAPTER * ioc,u16 handle,u64 * sas_address)509  _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
510  	u64 *sas_address)
511  {
512  	Mpi2SasDevicePage0_t sas_device_pg0;
513  	Mpi2ConfigReply_t mpi_reply;
514  	u32 ioc_status;
515  
516  	*sas_address = 0;
517  
518  	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
519  	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
520  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
521  			__FILE__, __LINE__, __func__);
522  		return -ENXIO;
523  	}
524  
525  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
526  	if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
527  		/* For HBA, vSES doesn't return HBA SAS address. Instead return
528  		 * vSES's sas address.
529  		 */
530  		if ((handle <= ioc->sas_hba.num_phys) &&
531  		   (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
532  		   MPI2_SAS_DEVICE_INFO_SEP)))
533  			*sas_address = ioc->sas_hba.sas_address;
534  		else
535  			*sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
536  		return 0;
537  	}
538  
539  	/* we hit this because the given parent handle doesn't exist */
540  	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
541  		return -ENXIO;
542  
543  	/* else error case */
544  	ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
545  		handle, ioc_status, __FILE__, __LINE__, __func__);
546  	return -EIO;
547  }
548  
549  /**
550   * _scsih_determine_boot_device - determine boot device.
551   * @ioc: per adapter object
552   * @device: sas_device or pcie_device object
553   * @channel: SAS or PCIe channel
554   *
555   * Determines whether this device should be first reported device to
556   * to scsi-ml or sas transport, this purpose is for persistent boot device.
557   * There are primary, alternate, and current entries in bios page 2. The order
558   * priority is primary, alternate, then current.  This routine saves
559   * the corresponding device object.
560   * The saved data to be used later in _scsih_probe_boot_devices().
561   */
562  static void
_scsih_determine_boot_device(struct MPT3SAS_ADAPTER * ioc,void * device,u32 channel)563  _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
564  	u32 channel)
565  {
566  	struct _sas_device *sas_device;
567  	struct _pcie_device *pcie_device;
568  	struct _raid_device *raid_device;
569  	u64 sas_address;
570  	u64 device_name;
571  	u64 enclosure_logical_id;
572  	u16 slot;
573  
574  	 /* only process this function when driver loads */
575  	if (!ioc->is_driver_loading)
576  		return;
577  
578  	 /* no Bios, return immediately */
579  	if (!ioc->bios_pg3.BiosVersion)
580  		return;
581  
582  	if (channel == RAID_CHANNEL) {
583  		raid_device = device;
584  		sas_address = raid_device->wwid;
585  		device_name = 0;
586  		enclosure_logical_id = 0;
587  		slot = 0;
588  	} else if (channel == PCIE_CHANNEL) {
589  		pcie_device = device;
590  		sas_address = pcie_device->wwid;
591  		device_name = 0;
592  		enclosure_logical_id = 0;
593  		slot = 0;
594  	} else {
595  		sas_device = device;
596  		sas_address = sas_device->sas_address;
597  		device_name = sas_device->device_name;
598  		enclosure_logical_id = sas_device->enclosure_logical_id;
599  		slot = sas_device->slot;
600  	}
601  
602  	if (!ioc->req_boot_device.device) {
603  		if (_scsih_is_boot_device(sas_address, device_name,
604  		    enclosure_logical_id, slot,
605  		    (ioc->bios_pg2.ReqBootDeviceForm &
606  		    MPI2_BIOSPAGE2_FORM_MASK),
607  		    &ioc->bios_pg2.RequestedBootDevice)) {
608  			dinitprintk(ioc,
609  				    ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
610  					     __func__, (u64)sas_address));
611  			ioc->req_boot_device.device = device;
612  			ioc->req_boot_device.channel = channel;
613  		}
614  	}
615  
616  	if (!ioc->req_alt_boot_device.device) {
617  		if (_scsih_is_boot_device(sas_address, device_name,
618  		    enclosure_logical_id, slot,
619  		    (ioc->bios_pg2.ReqAltBootDeviceForm &
620  		    MPI2_BIOSPAGE2_FORM_MASK),
621  		    &ioc->bios_pg2.RequestedAltBootDevice)) {
622  			dinitprintk(ioc,
623  				    ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
624  					     __func__, (u64)sas_address));
625  			ioc->req_alt_boot_device.device = device;
626  			ioc->req_alt_boot_device.channel = channel;
627  		}
628  	}
629  
630  	if (!ioc->current_boot_device.device) {
631  		if (_scsih_is_boot_device(sas_address, device_name,
632  		    enclosure_logical_id, slot,
633  		    (ioc->bios_pg2.CurrentBootDeviceForm &
634  		    MPI2_BIOSPAGE2_FORM_MASK),
635  		    &ioc->bios_pg2.CurrentBootDevice)) {
636  			dinitprintk(ioc,
637  				    ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
638  					     __func__, (u64)sas_address));
639  			ioc->current_boot_device.device = device;
640  			ioc->current_boot_device.channel = channel;
641  		}
642  	}
643  }
644  
645  static struct _sas_device *
__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)646  __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
647  		struct MPT3SAS_TARGET *tgt_priv)
648  {
649  	struct _sas_device *ret;
650  
651  	assert_spin_locked(&ioc->sas_device_lock);
652  
653  	ret = tgt_priv->sas_dev;
654  	if (ret)
655  		sas_device_get(ret);
656  
657  	return ret;
658  }
659  
660  static struct _sas_device *
mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)661  mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
662  		struct MPT3SAS_TARGET *tgt_priv)
663  {
664  	struct _sas_device *ret;
665  	unsigned long flags;
666  
667  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
668  	ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
669  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
670  
671  	return ret;
672  }
673  
674  static struct _pcie_device *
__mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)675  __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
676  	struct MPT3SAS_TARGET *tgt_priv)
677  {
678  	struct _pcie_device *ret;
679  
680  	assert_spin_locked(&ioc->pcie_device_lock);
681  
682  	ret = tgt_priv->pcie_dev;
683  	if (ret)
684  		pcie_device_get(ret);
685  
686  	return ret;
687  }
688  
689  /**
690   * mpt3sas_get_pdev_from_target - pcie device search
691   * @ioc: per adapter object
692   * @tgt_priv: starget private object
693   *
694   * Context: This function will acquire ioc->pcie_device_lock and will release
695   * before returning the pcie_device object.
696   *
697   * This searches for pcie_device from target, then return pcie_device object.
698   */
699  static struct _pcie_device *
mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER * ioc,struct MPT3SAS_TARGET * tgt_priv)700  mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
701  	struct MPT3SAS_TARGET *tgt_priv)
702  {
703  	struct _pcie_device *ret;
704  	unsigned long flags;
705  
706  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
707  	ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
708  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
709  
710  	return ret;
711  }
712  
713  
714  /**
715   * __mpt3sas_get_sdev_by_rphy - sas device search
716   * @ioc: per adapter object
717   * @rphy: sas_rphy pointer
718   *
719   * Context: This function will acquire ioc->sas_device_lock and will release
720   * before returning the sas_device object.
721   *
722   * This searches for sas_device from rphy object
723   * then return sas_device object.
724   */
725  struct _sas_device *
__mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER * ioc,struct sas_rphy * rphy)726  __mpt3sas_get_sdev_by_rphy(struct MPT3SAS_ADAPTER *ioc,
727  	struct sas_rphy *rphy)
728  {
729  	struct _sas_device *sas_device;
730  
731  	assert_spin_locked(&ioc->sas_device_lock);
732  
733  	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
734  		if (sas_device->rphy != rphy)
735  			continue;
736  		sas_device_get(sas_device);
737  		return sas_device;
738  	}
739  
740  	sas_device = NULL;
741  	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
742  		if (sas_device->rphy != rphy)
743  			continue;
744  		sas_device_get(sas_device);
745  		return sas_device;
746  	}
747  
748  	return NULL;
749  }
750  
751  /**
752   * __mpt3sas_get_sdev_by_addr - get _sas_device object corresponding to provided
753   *				sas address from sas_device_list list
754   * @ioc: per adapter object
755   * @sas_address: device sas address
756   * @port: port number
757   *
758   * Search for _sas_device object corresponding to provided sas address,
759   * if available return _sas_device object address otherwise return NULL.
760   */
761  struct _sas_device *
__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)762  __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
763  	u64 sas_address, struct hba_port *port)
764  {
765  	struct _sas_device *sas_device;
766  
767  	if (!port)
768  		return NULL;
769  
770  	assert_spin_locked(&ioc->sas_device_lock);
771  
772  	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
773  		if (sas_device->sas_address != sas_address)
774  			continue;
775  		if (sas_device->port != port)
776  			continue;
777  		sas_device_get(sas_device);
778  		return sas_device;
779  	}
780  
781  	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) {
782  		if (sas_device->sas_address != sas_address)
783  			continue;
784  		if (sas_device->port != port)
785  			continue;
786  		sas_device_get(sas_device);
787  		return sas_device;
788  	}
789  
790  	return NULL;
791  }
792  
793  /**
794   * mpt3sas_get_sdev_by_addr - sas device search
795   * @ioc: per adapter object
796   * @sas_address: sas address
797   * @port: hba port entry
798   * Context: Calling function should acquire ioc->sas_device_lock
799   *
800   * This searches for sas_device based on sas_address & port number,
801   * then return sas_device object.
802   */
803  struct _sas_device *
mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)804  mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
805  	u64 sas_address, struct hba_port *port)
806  {
807  	struct _sas_device *sas_device;
808  	unsigned long flags;
809  
810  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
811  	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
812  	    sas_address, port);
813  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
814  
815  	return sas_device;
816  }
817  
818  static struct _sas_device *
__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)819  __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
820  {
821  	struct _sas_device *sas_device;
822  
823  	assert_spin_locked(&ioc->sas_device_lock);
824  
825  	list_for_each_entry(sas_device, &ioc->sas_device_list, list)
826  		if (sas_device->handle == handle)
827  			goto found_device;
828  
829  	list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
830  		if (sas_device->handle == handle)
831  			goto found_device;
832  
833  	return NULL;
834  
835  found_device:
836  	sas_device_get(sas_device);
837  	return sas_device;
838  }
839  
840  /**
841   * mpt3sas_get_sdev_by_handle - sas device search
842   * @ioc: per adapter object
843   * @handle: sas device handle (assigned by firmware)
844   * Context: Calling function should acquire ioc->sas_device_lock
845   *
846   * This searches for sas_device based on sas_address, then return sas_device
847   * object.
848   */
849  struct _sas_device *
mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)850  mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
851  {
852  	struct _sas_device *sas_device;
853  	unsigned long flags;
854  
855  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
856  	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
857  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
858  
859  	return sas_device;
860  }
861  
862  /**
863   * _scsih_display_enclosure_chassis_info - display device location info
864   * @ioc: per adapter object
865   * @sas_device: per sas device object
866   * @sdev: scsi device struct
867   * @starget: scsi target struct
868   */
869  static void
_scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device,struct scsi_device * sdev,struct scsi_target * starget)870  _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
871  	struct _sas_device *sas_device, struct scsi_device *sdev,
872  	struct scsi_target *starget)
873  {
874  	if (sdev) {
875  		if (sas_device->enclosure_handle != 0)
876  			sdev_printk(KERN_INFO, sdev,
877  			    "enclosure logical id (0x%016llx), slot(%d) \n",
878  			    (unsigned long long)
879  			    sas_device->enclosure_logical_id,
880  			    sas_device->slot);
881  		if (sas_device->connector_name[0] != '\0')
882  			sdev_printk(KERN_INFO, sdev,
883  			    "enclosure level(0x%04x), connector name( %s)\n",
884  			    sas_device->enclosure_level,
885  			    sas_device->connector_name);
886  		if (sas_device->is_chassis_slot_valid)
887  			sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
888  			    sas_device->chassis_slot);
889  	} else if (starget) {
890  		if (sas_device->enclosure_handle != 0)
891  			starget_printk(KERN_INFO, starget,
892  			    "enclosure logical id(0x%016llx), slot(%d) \n",
893  			    (unsigned long long)
894  			    sas_device->enclosure_logical_id,
895  			    sas_device->slot);
896  		if (sas_device->connector_name[0] != '\0')
897  			starget_printk(KERN_INFO, starget,
898  			    "enclosure level(0x%04x), connector name( %s)\n",
899  			    sas_device->enclosure_level,
900  			    sas_device->connector_name);
901  		if (sas_device->is_chassis_slot_valid)
902  			starget_printk(KERN_INFO, starget,
903  			    "chassis slot(0x%04x)\n",
904  			    sas_device->chassis_slot);
905  	} else {
906  		if (sas_device->enclosure_handle != 0)
907  			ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
908  				 (u64)sas_device->enclosure_logical_id,
909  				 sas_device->slot);
910  		if (sas_device->connector_name[0] != '\0')
911  			ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
912  				 sas_device->enclosure_level,
913  				 sas_device->connector_name);
914  		if (sas_device->is_chassis_slot_valid)
915  			ioc_info(ioc, "chassis slot(0x%04x)\n",
916  				 sas_device->chassis_slot);
917  	}
918  }
919  
920  /**
921   * _scsih_sas_device_remove - remove sas_device from list.
922   * @ioc: per adapter object
923   * @sas_device: the sas_device object
924   * Context: This function will acquire ioc->sas_device_lock.
925   *
926   * If sas_device is on the list, remove it and decrement its reference count.
927   */
928  static void
_scsih_sas_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)929  _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
930  	struct _sas_device *sas_device)
931  {
932  	unsigned long flags;
933  
934  	if (!sas_device)
935  		return;
936  	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
937  		 sas_device->handle, (u64)sas_device->sas_address);
938  
939  	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
940  
941  	/*
942  	 * The lock serializes access to the list, but we still need to verify
943  	 * that nobody removed the entry while we were waiting on the lock.
944  	 */
945  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
946  	if (!list_empty(&sas_device->list)) {
947  		list_del_init(&sas_device->list);
948  		sas_device_put(sas_device);
949  	}
950  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
951  }
952  
953  /**
954   * _scsih_device_remove_by_handle - removing device object by handle
955   * @ioc: per adapter object
956   * @handle: device handle
957   */
958  static void
_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)959  _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
960  {
961  	struct _sas_device *sas_device;
962  	unsigned long flags;
963  
964  	if (ioc->shost_recovery)
965  		return;
966  
967  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
968  	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
969  	if (sas_device) {
970  		list_del_init(&sas_device->list);
971  		sas_device_put(sas_device);
972  	}
973  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
974  	if (sas_device) {
975  		_scsih_remove_device(ioc, sas_device);
976  		sas_device_put(sas_device);
977  	}
978  }
979  
980  /**
981   * mpt3sas_device_remove_by_sas_address - removing device object by
982   *					sas address & port number
983   * @ioc: per adapter object
984   * @sas_address: device sas_address
985   * @port: hba port entry
986   *
987   * Return nothing.
988   */
989  void
mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)990  mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
991  	u64 sas_address, struct hba_port *port)
992  {
993  	struct _sas_device *sas_device;
994  	unsigned long flags;
995  
996  	if (ioc->shost_recovery)
997  		return;
998  
999  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1000  	sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address, port);
1001  	if (sas_device) {
1002  		list_del_init(&sas_device->list);
1003  		sas_device_put(sas_device);
1004  	}
1005  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1006  	if (sas_device) {
1007  		_scsih_remove_device(ioc, sas_device);
1008  		sas_device_put(sas_device);
1009  	}
1010  }
1011  
1012  /**
1013   * _scsih_sas_device_add - insert sas_device to the list.
1014   * @ioc: per adapter object
1015   * @sas_device: the sas_device object
1016   * Context: This function will acquire ioc->sas_device_lock.
1017   *
1018   * Adding new object to the ioc->sas_device_list.
1019   */
1020  static void
_scsih_sas_device_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1021  _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
1022  	struct _sas_device *sas_device)
1023  {
1024  	unsigned long flags;
1025  
1026  	dewtprintk(ioc,
1027  		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1028  			    __func__, sas_device->handle,
1029  			    (u64)sas_device->sas_address));
1030  
1031  	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1032  	    NULL, NULL));
1033  
1034  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1035  	sas_device_get(sas_device);
1036  	list_add_tail(&sas_device->list, &ioc->sas_device_list);
1037  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1038  
1039  	if (ioc->hide_drives) {
1040  		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1041  		return;
1042  	}
1043  
1044  	if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
1045  	     sas_device->sas_address_parent, sas_device->port)) {
1046  		_scsih_sas_device_remove(ioc, sas_device);
1047  	} else if (!sas_device->starget) {
1048  		/*
1049  		 * When asyn scanning is enabled, its not possible to remove
1050  		 * devices while scanning is turned on due to an oops in
1051  		 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
1052  		 */
1053  		if (!ioc->is_driver_loading) {
1054  			mpt3sas_transport_port_remove(ioc,
1055  			    sas_device->sas_address,
1056  			    sas_device->sas_address_parent,
1057  			    sas_device->port);
1058  			_scsih_sas_device_remove(ioc, sas_device);
1059  		}
1060  	} else
1061  		clear_bit(sas_device->handle, ioc->pend_os_device_add);
1062  }
1063  
1064  /**
1065   * _scsih_sas_device_init_add - insert sas_device to the list.
1066   * @ioc: per adapter object
1067   * @sas_device: the sas_device object
1068   * Context: This function will acquire ioc->sas_device_lock.
1069   *
1070   * Adding new object at driver load time to the ioc->sas_device_init_list.
1071   */
1072  static void
_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)1073  _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1074  	struct _sas_device *sas_device)
1075  {
1076  	unsigned long flags;
1077  
1078  	dewtprintk(ioc,
1079  		   ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
1080  			    __func__, sas_device->handle,
1081  			    (u64)sas_device->sas_address));
1082  
1083  	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
1084  	    NULL, NULL));
1085  
1086  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1087  	sas_device_get(sas_device);
1088  	list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
1089  	_scsih_determine_boot_device(ioc, sas_device, 0);
1090  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1091  }
1092  
1093  
1094  static struct _pcie_device *
__mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1095  __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1096  {
1097  	struct _pcie_device *pcie_device;
1098  
1099  	assert_spin_locked(&ioc->pcie_device_lock);
1100  
1101  	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1102  		if (pcie_device->wwid == wwid)
1103  			goto found_device;
1104  
1105  	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1106  		if (pcie_device->wwid == wwid)
1107  			goto found_device;
1108  
1109  	return NULL;
1110  
1111  found_device:
1112  	pcie_device_get(pcie_device);
1113  	return pcie_device;
1114  }
1115  
1116  
1117  /**
1118   * mpt3sas_get_pdev_by_wwid - pcie device search
1119   * @ioc: per adapter object
1120   * @wwid: wwid
1121   *
1122   * Context: This function will acquire ioc->pcie_device_lock and will release
1123   * before returning the pcie_device object.
1124   *
1125   * This searches for pcie_device based on wwid, then return pcie_device object.
1126   */
1127  static struct _pcie_device *
mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1128  mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1129  {
1130  	struct _pcie_device *pcie_device;
1131  	unsigned long flags;
1132  
1133  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1134  	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
1135  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1136  
1137  	return pcie_device;
1138  }
1139  
1140  
1141  static struct _pcie_device *
__mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1142  __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
1143  	int channel)
1144  {
1145  	struct _pcie_device *pcie_device;
1146  
1147  	assert_spin_locked(&ioc->pcie_device_lock);
1148  
1149  	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1150  		if (pcie_device->id == id && pcie_device->channel == channel)
1151  			goto found_device;
1152  
1153  	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1154  		if (pcie_device->id == id && pcie_device->channel == channel)
1155  			goto found_device;
1156  
1157  	return NULL;
1158  
1159  found_device:
1160  	pcie_device_get(pcie_device);
1161  	return pcie_device;
1162  }
1163  
1164  static struct _pcie_device *
__mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1165  __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1166  {
1167  	struct _pcie_device *pcie_device;
1168  
1169  	assert_spin_locked(&ioc->pcie_device_lock);
1170  
1171  	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1172  		if (pcie_device->handle == handle)
1173  			goto found_device;
1174  
1175  	list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1176  		if (pcie_device->handle == handle)
1177  			goto found_device;
1178  
1179  	return NULL;
1180  
1181  found_device:
1182  	pcie_device_get(pcie_device);
1183  	return pcie_device;
1184  }
1185  
1186  
1187  /**
1188   * mpt3sas_get_pdev_by_handle - pcie device search
1189   * @ioc: per adapter object
1190   * @handle: Firmware device handle
1191   *
1192   * Context: This function will acquire ioc->pcie_device_lock and will release
1193   * before returning the pcie_device object.
1194   *
1195   * This searches for pcie_device based on handle, then return pcie_device
1196   * object.
1197   */
1198  struct _pcie_device *
mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1199  mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1200  {
1201  	struct _pcie_device *pcie_device;
1202  	unsigned long flags;
1203  
1204  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1205  	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1206  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1207  
1208  	return pcie_device;
1209  }
1210  
1211  /**
1212   * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
1213   * @ioc: per adapter object
1214   * Context: This function will acquire ioc->pcie_device_lock
1215   *
1216   * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
1217   * which has reported maximum among all available NVMe drives.
1218   * Minimum max_shutdown_latency will be six seconds.
1219   */
1220  static void
_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER * ioc)1221  _scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
1222  {
1223  	struct _pcie_device *pcie_device;
1224  	unsigned long flags;
1225  	u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
1226  
1227  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1228  	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
1229  		if (pcie_device->shutdown_latency) {
1230  			if (shutdown_latency < pcie_device->shutdown_latency)
1231  				shutdown_latency =
1232  					pcie_device->shutdown_latency;
1233  		}
1234  	}
1235  	ioc->max_shutdown_latency = shutdown_latency;
1236  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1237  }
1238  
1239  /**
1240   * _scsih_pcie_device_remove - remove pcie_device from list.
1241   * @ioc: per adapter object
1242   * @pcie_device: the pcie_device object
1243   * Context: This function will acquire ioc->pcie_device_lock.
1244   *
1245   * If pcie_device is on the list, remove it and decrement its reference count.
1246   */
1247  static void
_scsih_pcie_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1248  _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1249  	struct _pcie_device *pcie_device)
1250  {
1251  	unsigned long flags;
1252  	int was_on_pcie_device_list = 0;
1253  	u8 update_latency = 0;
1254  
1255  	if (!pcie_device)
1256  		return;
1257  	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1258  		 pcie_device->handle, (u64)pcie_device->wwid);
1259  	if (pcie_device->enclosure_handle != 0)
1260  		ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1261  			 (u64)pcie_device->enclosure_logical_id,
1262  			 pcie_device->slot);
1263  	if (pcie_device->connector_name[0] != '\0')
1264  		ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1265  			 pcie_device->enclosure_level,
1266  			 pcie_device->connector_name);
1267  
1268  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1269  	if (!list_empty(&pcie_device->list)) {
1270  		list_del_init(&pcie_device->list);
1271  		was_on_pcie_device_list = 1;
1272  	}
1273  	if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1274  		update_latency = 1;
1275  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1276  	if (was_on_pcie_device_list) {
1277  		kfree(pcie_device->serial_number);
1278  		pcie_device_put(pcie_device);
1279  	}
1280  
1281  	/*
1282  	 * This device's RTD3 Entry Latency matches IOC's
1283  	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1284  	 * from the available drives as current drive is getting removed.
1285  	 */
1286  	if (update_latency)
1287  		_scsih_set_nvme_max_shutdown_latency(ioc);
1288  }
1289  
1290  
1291  /**
1292   * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1293   * @ioc: per adapter object
1294   * @handle: device handle
1295   */
1296  static void
_scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1297  _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1298  {
1299  	struct _pcie_device *pcie_device;
1300  	unsigned long flags;
1301  	int was_on_pcie_device_list = 0;
1302  	u8 update_latency = 0;
1303  
1304  	if (ioc->shost_recovery)
1305  		return;
1306  
1307  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1308  	pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1309  	if (pcie_device) {
1310  		if (!list_empty(&pcie_device->list)) {
1311  			list_del_init(&pcie_device->list);
1312  			was_on_pcie_device_list = 1;
1313  			pcie_device_put(pcie_device);
1314  		}
1315  		if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
1316  			update_latency = 1;
1317  	}
1318  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1319  	if (was_on_pcie_device_list) {
1320  		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1321  		pcie_device_put(pcie_device);
1322  	}
1323  
1324  	/*
1325  	 * This device's RTD3 Entry Latency matches IOC's
1326  	 * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
1327  	 * from the available drives as current drive is getting removed.
1328  	 */
1329  	if (update_latency)
1330  		_scsih_set_nvme_max_shutdown_latency(ioc);
1331  }
1332  
1333  /**
1334   * _scsih_pcie_device_add - add pcie_device object
1335   * @ioc: per adapter object
1336   * @pcie_device: pcie_device object
1337   *
1338   * This is added to the pcie_device_list link list.
1339   */
1340  static void
_scsih_pcie_device_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1341  _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1342  	struct _pcie_device *pcie_device)
1343  {
1344  	unsigned long flags;
1345  
1346  	dewtprintk(ioc,
1347  		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1348  			    __func__,
1349  			    pcie_device->handle, (u64)pcie_device->wwid));
1350  	if (pcie_device->enclosure_handle != 0)
1351  		dewtprintk(ioc,
1352  			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1353  				    __func__,
1354  				    (u64)pcie_device->enclosure_logical_id,
1355  				    pcie_device->slot));
1356  	if (pcie_device->connector_name[0] != '\0')
1357  		dewtprintk(ioc,
1358  			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1359  				    __func__, pcie_device->enclosure_level,
1360  				    pcie_device->connector_name));
1361  
1362  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1363  	pcie_device_get(pcie_device);
1364  	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1365  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1366  
1367  	if (pcie_device->access_status ==
1368  	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
1369  		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1370  		return;
1371  	}
1372  	if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1373  		_scsih_pcie_device_remove(ioc, pcie_device);
1374  	} else if (!pcie_device->starget) {
1375  		if (!ioc->is_driver_loading) {
1376  /*TODO-- Need to find out whether this condition will occur or not*/
1377  			clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1378  		}
1379  	} else
1380  		clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1381  }
1382  
1383  /*
1384   * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1385   * @ioc: per adapter object
1386   * @pcie_device: the pcie_device object
1387   * Context: This function will acquire ioc->pcie_device_lock.
1388   *
1389   * Adding new object at driver load time to the ioc->pcie_device_init_list.
1390   */
1391  static void
_scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)1392  _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1393  				struct _pcie_device *pcie_device)
1394  {
1395  	unsigned long flags;
1396  
1397  	dewtprintk(ioc,
1398  		   ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1399  			    __func__,
1400  			    pcie_device->handle, (u64)pcie_device->wwid));
1401  	if (pcie_device->enclosure_handle != 0)
1402  		dewtprintk(ioc,
1403  			   ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1404  				    __func__,
1405  				    (u64)pcie_device->enclosure_logical_id,
1406  				    pcie_device->slot));
1407  	if (pcie_device->connector_name[0] != '\0')
1408  		dewtprintk(ioc,
1409  			   ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1410  				    __func__, pcie_device->enclosure_level,
1411  				    pcie_device->connector_name));
1412  
1413  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1414  	pcie_device_get(pcie_device);
1415  	list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1416  	if (pcie_device->access_status !=
1417  	    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
1418  		_scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1419  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1420  }
1421  /**
1422   * _scsih_raid_device_find_by_id - raid device search
1423   * @ioc: per adapter object
1424   * @id: sas device target id
1425   * @channel: sas device channel
1426   * Context: Calling function should acquire ioc->raid_device_lock
1427   *
1428   * This searches for raid_device based on target id, then return raid_device
1429   * object.
1430   */
1431  static struct _raid_device *
_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1432  _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1433  {
1434  	struct _raid_device *raid_device, *r;
1435  
1436  	r = NULL;
1437  	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1438  		if (raid_device->id == id && raid_device->channel == channel) {
1439  			r = raid_device;
1440  			goto out;
1441  		}
1442  	}
1443  
1444   out:
1445  	return r;
1446  }
1447  
1448  /**
1449   * mpt3sas_raid_device_find_by_handle - raid device search
1450   * @ioc: per adapter object
1451   * @handle: sas device handle (assigned by firmware)
1452   * Context: Calling function should acquire ioc->raid_device_lock
1453   *
1454   * This searches for raid_device based on handle, then return raid_device
1455   * object.
1456   */
1457  struct _raid_device *
mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1458  mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1459  {
1460  	struct _raid_device *raid_device, *r;
1461  
1462  	r = NULL;
1463  	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1464  		if (raid_device->handle != handle)
1465  			continue;
1466  		r = raid_device;
1467  		goto out;
1468  	}
1469  
1470   out:
1471  	return r;
1472  }
1473  
1474  /**
1475   * _scsih_raid_device_find_by_wwid - raid device search
1476   * @ioc: per adapter object
1477   * @wwid: ?
1478   * Context: Calling function should acquire ioc->raid_device_lock
1479   *
1480   * This searches for raid_device based on wwid, then return raid_device
1481   * object.
1482   */
1483  static struct _raid_device *
_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER * ioc,u64 wwid)1484  _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1485  {
1486  	struct _raid_device *raid_device, *r;
1487  
1488  	r = NULL;
1489  	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1490  		if (raid_device->wwid != wwid)
1491  			continue;
1492  		r = raid_device;
1493  		goto out;
1494  	}
1495  
1496   out:
1497  	return r;
1498  }
1499  
1500  /**
1501   * _scsih_raid_device_add - add raid_device object
1502   * @ioc: per adapter object
1503   * @raid_device: raid_device object
1504   *
1505   * This is added to the raid_device_list link list.
1506   */
1507  static void
_scsih_raid_device_add(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1508  _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1509  	struct _raid_device *raid_device)
1510  {
1511  	unsigned long flags;
1512  
1513  	dewtprintk(ioc,
1514  		   ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1515  			    __func__,
1516  			    raid_device->handle, (u64)raid_device->wwid));
1517  
1518  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1519  	list_add_tail(&raid_device->list, &ioc->raid_device_list);
1520  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1521  }
1522  
1523  /**
1524   * _scsih_raid_device_remove - delete raid_device object
1525   * @ioc: per adapter object
1526   * @raid_device: raid_device object
1527   *
1528   */
1529  static void
_scsih_raid_device_remove(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)1530  _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1531  	struct _raid_device *raid_device)
1532  {
1533  	unsigned long flags;
1534  
1535  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
1536  	list_del(&raid_device->list);
1537  	kfree(raid_device);
1538  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1539  }
1540  
1541  /**
1542   * mpt3sas_scsih_expander_find_by_handle - expander device search
1543   * @ioc: per adapter object
1544   * @handle: expander handle (assigned by firmware)
1545   * Context: Calling function should acquire ioc->sas_device_lock
1546   *
1547   * This searches for expander device based on handle, then returns the
1548   * sas_node object.
1549   */
1550  struct _sas_node *
mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1551  mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1552  {
1553  	struct _sas_node *sas_expander, *r;
1554  
1555  	r = NULL;
1556  	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1557  		if (sas_expander->handle != handle)
1558  			continue;
1559  		r = sas_expander;
1560  		goto out;
1561  	}
1562   out:
1563  	return r;
1564  }
1565  
1566  /**
1567   * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1568   * @ioc: per adapter object
1569   * @handle: enclosure handle (assigned by firmware)
1570   * Context: Calling function should acquire ioc->sas_device_lock
1571   *
1572   * This searches for enclosure device based on handle, then returns the
1573   * enclosure object.
1574   */
1575  static struct _enclosure_node *
mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER * ioc,u16 handle)1576  mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1577  {
1578  	struct _enclosure_node *enclosure_dev, *r;
1579  
1580  	r = NULL;
1581  	list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1582  		if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1583  			continue;
1584  		r = enclosure_dev;
1585  		goto out;
1586  	}
1587  out:
1588  	return r;
1589  }
1590  /**
1591   * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1592   * @ioc: per adapter object
1593   * @sas_address: sas address
1594   * @port: hba port entry
1595   * Context: Calling function should acquire ioc->sas_node_lock.
1596   *
1597   * This searches for expander device based on sas_address & port number,
1598   * then returns the sas_node object.
1599   */
1600  struct _sas_node *
mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)1601  mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1602  	u64 sas_address, struct hba_port *port)
1603  {
1604  	struct _sas_node *sas_expander, *r = NULL;
1605  
1606  	if (!port)
1607  		return r;
1608  
1609  	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1610  		if (sas_expander->sas_address != sas_address)
1611  			continue;
1612  		if (sas_expander->port != port)
1613  			continue;
1614  		r = sas_expander;
1615  		goto out;
1616  	}
1617   out:
1618  	return r;
1619  }
1620  
1621  /**
1622   * _scsih_expander_node_add - insert expander device to the list.
1623   * @ioc: per adapter object
1624   * @sas_expander: the sas_device object
1625   * Context: This function will acquire ioc->sas_node_lock.
1626   *
1627   * Adding new object to the ioc->sas_expander_list.
1628   */
1629  static void
_scsih_expander_node_add(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)1630  _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1631  	struct _sas_node *sas_expander)
1632  {
1633  	unsigned long flags;
1634  
1635  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
1636  	list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1637  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1638  }
1639  
1640  /**
1641   * _scsih_is_end_device - determines if device is an end device
1642   * @device_info: bitfield providing information about the device.
1643   * Context: none
1644   *
1645   * Return: 1 if end device.
1646   */
1647  static int
_scsih_is_end_device(u32 device_info)1648  _scsih_is_end_device(u32 device_info)
1649  {
1650  	if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1651  		((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1652  		(device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1653  		(device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1654  		return 1;
1655  	else
1656  		return 0;
1657  }
1658  
1659  /**
1660   * _scsih_is_nvme_pciescsi_device - determines if
1661   *			device is an pcie nvme/scsi device
1662   * @device_info: bitfield providing information about the device.
1663   * Context: none
1664   *
1665   * Returns 1 if device is pcie device type nvme/scsi.
1666   */
1667  static int
_scsih_is_nvme_pciescsi_device(u32 device_info)1668  _scsih_is_nvme_pciescsi_device(u32 device_info)
1669  {
1670  	if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1671  	    == MPI26_PCIE_DEVINFO_NVME) ||
1672  	    ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1673  	    == MPI26_PCIE_DEVINFO_SCSI))
1674  		return 1;
1675  	else
1676  		return 0;
1677  }
1678  
1679  /**
1680   * _scsih_scsi_lookup_find_by_target - search for matching channel:id
1681   * @ioc: per adapter object
1682   * @id: target id
1683   * @channel: channel
1684   * Context: This function will acquire ioc->scsi_lookup_lock.
1685   *
1686   * This will search for a matching channel:id in the scsi_lookup array,
1687   * returning 1 if found.
1688   */
1689  static u8
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER * ioc,int id,int channel)1690  _scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
1691  	int channel)
1692  {
1693  	int smid;
1694  	struct scsi_cmnd *scmd;
1695  
1696  	for (smid = 1;
1697  	     smid <= ioc->shost->can_queue; smid++) {
1698  		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1699  		if (!scmd)
1700  			continue;
1701  		if (scmd->device->id == id &&
1702  		    scmd->device->channel == channel)
1703  			return 1;
1704  	}
1705  	return 0;
1706  }
1707  
1708  /**
1709   * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
1710   * @ioc: per adapter object
1711   * @id: target id
1712   * @lun: lun number
1713   * @channel: channel
1714   * Context: This function will acquire ioc->scsi_lookup_lock.
1715   *
1716   * This will search for a matching channel:id:lun in the scsi_lookup array,
1717   * returning 1 if found.
1718   */
1719  static u8
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER * ioc,int id,unsigned int lun,int channel)1720  _scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
1721  	unsigned int lun, int channel)
1722  {
1723  	int smid;
1724  	struct scsi_cmnd *scmd;
1725  
1726  	for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
1727  
1728  		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1729  		if (!scmd)
1730  			continue;
1731  		if (scmd->device->id == id &&
1732  		    scmd->device->channel == channel &&
1733  		    scmd->device->lun == lun)
1734  			return 1;
1735  	}
1736  	return 0;
1737  }
1738  
1739  /**
1740   * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1741   * @ioc: per adapter object
1742   * @smid: system request message index
1743   *
1744   * Return: the smid stored scmd pointer.
1745   * Then will dereference the stored scmd pointer.
1746   */
1747  struct scsi_cmnd *
mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER * ioc,u16 smid)1748  mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1749  {
1750  	struct scsi_cmnd *scmd = NULL;
1751  	struct scsiio_tracker *st;
1752  	Mpi25SCSIIORequest_t *mpi_request;
1753  	u16 tag = smid - 1;
1754  
1755  	if (smid > 0  &&
1756  	    smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1757  		u32 unique_tag =
1758  		    ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
1759  
1760  		mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1761  
1762  		/*
1763  		 * If SCSI IO request is outstanding at driver level then
1764  		 * DevHandle filed must be non-zero. If DevHandle is zero
1765  		 * then it means that this smid is free at driver level,
1766  		 * so return NULL.
1767  		 */
1768  		if (!mpi_request->DevHandle)
1769  			return scmd;
1770  
1771  		scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1772  		if (scmd) {
1773  			st = scsi_cmd_priv(scmd);
1774  			if (st->cb_idx == 0xFF || st->smid == 0)
1775  				scmd = NULL;
1776  		}
1777  	}
1778  	return scmd;
1779  }
1780  
1781  /**
1782   * scsih_change_queue_depth - setting device queue depth
1783   * @sdev: scsi device struct
1784   * @qdepth: requested queue depth
1785   *
1786   * Return: queue depth.
1787   */
1788  static int
scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1789  scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1790  {
1791  	struct Scsi_Host *shost = sdev->host;
1792  	int max_depth;
1793  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1794  	struct MPT3SAS_DEVICE *sas_device_priv_data;
1795  	struct MPT3SAS_TARGET *sas_target_priv_data;
1796  	struct _sas_device *sas_device;
1797  	unsigned long flags;
1798  
1799  	max_depth = shost->can_queue;
1800  
1801  	/*
1802  	 * limit max device queue for SATA to 32 if enable_sdev_max_qd
1803  	 * is disabled.
1804  	 */
1805  	if (ioc->enable_sdev_max_qd || ioc->is_gen35_ioc)
1806  		goto not_sata;
1807  
1808  	sas_device_priv_data = sdev->hostdata;
1809  	if (!sas_device_priv_data)
1810  		goto not_sata;
1811  	sas_target_priv_data = sas_device_priv_data->sas_target;
1812  	if (!sas_target_priv_data)
1813  		goto not_sata;
1814  	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1815  		goto not_sata;
1816  
1817  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1818  	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1819  	if (sas_device) {
1820  		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1821  			max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1822  
1823  		sas_device_put(sas_device);
1824  	}
1825  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1826  
1827   not_sata:
1828  
1829  	if (!sdev->tagged_supported)
1830  		max_depth = 1;
1831  	if (qdepth > max_depth)
1832  		qdepth = max_depth;
1833  	scsi_change_queue_depth(sdev, qdepth);
1834  	sdev_printk(KERN_INFO, sdev,
1835  	    "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
1836  	    sdev->queue_depth, sdev->tagged_supported,
1837  	    sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
1838  	return sdev->queue_depth;
1839  }
1840  
1841  /**
1842   * mpt3sas_scsih_change_queue_depth - setting device queue depth
1843   * @sdev: scsi device struct
1844   * @qdepth: requested queue depth
1845   *
1846   * Returns nothing.
1847   */
1848  void
mpt3sas_scsih_change_queue_depth(struct scsi_device * sdev,int qdepth)1849  mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1850  {
1851  	struct Scsi_Host *shost = sdev->host;
1852  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1853  
1854  	if (ioc->enable_sdev_max_qd)
1855  		qdepth = shost->can_queue;
1856  
1857  	scsih_change_queue_depth(sdev, qdepth);
1858  }
1859  
1860  /**
1861   * scsih_target_alloc - target add routine
1862   * @starget: scsi target struct
1863   *
1864   * Return: 0 if ok. Any other return is assumed to be an error and
1865   * the device is ignored.
1866   */
1867  static int
scsih_target_alloc(struct scsi_target * starget)1868  scsih_target_alloc(struct scsi_target *starget)
1869  {
1870  	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1871  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1872  	struct MPT3SAS_TARGET *sas_target_priv_data;
1873  	struct _sas_device *sas_device;
1874  	struct _raid_device *raid_device;
1875  	struct _pcie_device *pcie_device;
1876  	unsigned long flags;
1877  	struct sas_rphy *rphy;
1878  
1879  	sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1880  				       GFP_KERNEL);
1881  	if (!sas_target_priv_data)
1882  		return -ENOMEM;
1883  
1884  	starget->hostdata = sas_target_priv_data;
1885  	sas_target_priv_data->starget = starget;
1886  	sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1887  
1888  	/* RAID volumes */
1889  	if (starget->channel == RAID_CHANNEL) {
1890  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1891  		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1892  		    starget->channel);
1893  		if (raid_device) {
1894  			sas_target_priv_data->handle = raid_device->handle;
1895  			sas_target_priv_data->sas_address = raid_device->wwid;
1896  			sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1897  			if (ioc->is_warpdrive)
1898  				sas_target_priv_data->raid_device = raid_device;
1899  			raid_device->starget = starget;
1900  		}
1901  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1902  		return 0;
1903  	}
1904  
1905  	/* PCIe devices */
1906  	if (starget->channel == PCIE_CHANNEL) {
1907  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1908  		pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1909  			starget->channel);
1910  		if (pcie_device) {
1911  			sas_target_priv_data->handle = pcie_device->handle;
1912  			sas_target_priv_data->sas_address = pcie_device->wwid;
1913  			sas_target_priv_data->port = NULL;
1914  			sas_target_priv_data->pcie_dev = pcie_device;
1915  			pcie_device->starget = starget;
1916  			pcie_device->id = starget->id;
1917  			pcie_device->channel = starget->channel;
1918  			sas_target_priv_data->flags |=
1919  				MPT_TARGET_FLAGS_PCIE_DEVICE;
1920  			if (pcie_device->fast_path)
1921  				sas_target_priv_data->flags |=
1922  					MPT_TARGET_FASTPATH_IO;
1923  		}
1924  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1925  		return 0;
1926  	}
1927  
1928  	/* sas/sata devices */
1929  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
1930  	rphy = dev_to_rphy(starget->dev.parent);
1931  	sas_device = __mpt3sas_get_sdev_by_rphy(ioc, rphy);
1932  
1933  	if (sas_device) {
1934  		sas_target_priv_data->handle = sas_device->handle;
1935  		sas_target_priv_data->sas_address = sas_device->sas_address;
1936  		sas_target_priv_data->port = sas_device->port;
1937  		sas_target_priv_data->sas_dev = sas_device;
1938  		sas_device->starget = starget;
1939  		sas_device->id = starget->id;
1940  		sas_device->channel = starget->channel;
1941  		if (test_bit(sas_device->handle, ioc->pd_handles))
1942  			sas_target_priv_data->flags |=
1943  			    MPT_TARGET_FLAGS_RAID_COMPONENT;
1944  		if (sas_device->fast_path)
1945  			sas_target_priv_data->flags |=
1946  					MPT_TARGET_FASTPATH_IO;
1947  	}
1948  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1949  
1950  	return 0;
1951  }
1952  
1953  /**
1954   * scsih_target_destroy - target destroy routine
1955   * @starget: scsi target struct
1956   */
1957  static void
scsih_target_destroy(struct scsi_target * starget)1958  scsih_target_destroy(struct scsi_target *starget)
1959  {
1960  	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1961  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1962  	struct MPT3SAS_TARGET *sas_target_priv_data;
1963  	struct _sas_device *sas_device;
1964  	struct _raid_device *raid_device;
1965  	struct _pcie_device *pcie_device;
1966  	unsigned long flags;
1967  
1968  	sas_target_priv_data = starget->hostdata;
1969  	if (!sas_target_priv_data)
1970  		return;
1971  
1972  	if (starget->channel == RAID_CHANNEL) {
1973  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
1974  		raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1975  		    starget->channel);
1976  		if (raid_device) {
1977  			raid_device->starget = NULL;
1978  			raid_device->sdev = NULL;
1979  		}
1980  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1981  		goto out;
1982  	}
1983  
1984  	if (starget->channel == PCIE_CHANNEL) {
1985  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1986  		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1987  							sas_target_priv_data);
1988  		if (pcie_device && (pcie_device->starget == starget) &&
1989  			(pcie_device->id == starget->id) &&
1990  			(pcie_device->channel == starget->channel))
1991  			pcie_device->starget = NULL;
1992  
1993  		if (pcie_device) {
1994  			/*
1995  			 * Corresponding get() is in _scsih_target_alloc()
1996  			 */
1997  			sas_target_priv_data->pcie_dev = NULL;
1998  			pcie_device_put(pcie_device);
1999  			pcie_device_put(pcie_device);
2000  		}
2001  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2002  		goto out;
2003  	}
2004  
2005  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2006  	sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
2007  	if (sas_device && (sas_device->starget == starget) &&
2008  	    (sas_device->id == starget->id) &&
2009  	    (sas_device->channel == starget->channel))
2010  		sas_device->starget = NULL;
2011  
2012  	if (sas_device) {
2013  		/*
2014  		 * Corresponding get() is in _scsih_target_alloc()
2015  		 */
2016  		sas_target_priv_data->sas_dev = NULL;
2017  		sas_device_put(sas_device);
2018  
2019  		sas_device_put(sas_device);
2020  	}
2021  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2022  
2023   out:
2024  	kfree(sas_target_priv_data);
2025  	starget->hostdata = NULL;
2026  }
2027  
2028  /**
2029   * scsih_slave_alloc - device add routine
2030   * @sdev: scsi device struct
2031   *
2032   * Return: 0 if ok. Any other return is assumed to be an error and
2033   * the device is ignored.
2034   */
2035  static int
scsih_slave_alloc(struct scsi_device * sdev)2036  scsih_slave_alloc(struct scsi_device *sdev)
2037  {
2038  	struct Scsi_Host *shost;
2039  	struct MPT3SAS_ADAPTER *ioc;
2040  	struct MPT3SAS_TARGET *sas_target_priv_data;
2041  	struct MPT3SAS_DEVICE *sas_device_priv_data;
2042  	struct scsi_target *starget;
2043  	struct _raid_device *raid_device;
2044  	struct _sas_device *sas_device;
2045  	struct _pcie_device *pcie_device;
2046  	unsigned long flags;
2047  
2048  	sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
2049  				       GFP_KERNEL);
2050  	if (!sas_device_priv_data)
2051  		return -ENOMEM;
2052  
2053  	sas_device_priv_data->lun = sdev->lun;
2054  	sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
2055  
2056  	starget = scsi_target(sdev);
2057  	sas_target_priv_data = starget->hostdata;
2058  	sas_target_priv_data->num_luns++;
2059  	sas_device_priv_data->sas_target = sas_target_priv_data;
2060  	sdev->hostdata = sas_device_priv_data;
2061  	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
2062  		sdev->no_uld_attach = 1;
2063  
2064  	shost = dev_to_shost(&starget->dev);
2065  	ioc = shost_priv(shost);
2066  	if (starget->channel == RAID_CHANNEL) {
2067  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2068  		raid_device = _scsih_raid_device_find_by_id(ioc,
2069  		    starget->id, starget->channel);
2070  		if (raid_device)
2071  			raid_device->sdev = sdev; /* raid is single lun */
2072  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2073  	}
2074  	if (starget->channel == PCIE_CHANNEL) {
2075  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2076  		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2077  				sas_target_priv_data->sas_address);
2078  		if (pcie_device && (pcie_device->starget == NULL)) {
2079  			sdev_printk(KERN_INFO, sdev,
2080  			    "%s : pcie_device->starget set to starget @ %d\n",
2081  			    __func__, __LINE__);
2082  			pcie_device->starget = starget;
2083  		}
2084  
2085  		if (pcie_device)
2086  			pcie_device_put(pcie_device);
2087  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2088  
2089  	} else  if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2090  		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2091  		sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2092  		    sas_target_priv_data->sas_address,
2093  		    sas_target_priv_data->port);
2094  		if (sas_device && (sas_device->starget == NULL)) {
2095  			sdev_printk(KERN_INFO, sdev,
2096  			"%s : sas_device->starget set to starget @ %d\n",
2097  			     __func__, __LINE__);
2098  			sas_device->starget = starget;
2099  		}
2100  
2101  		if (sas_device)
2102  			sas_device_put(sas_device);
2103  
2104  		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2105  	}
2106  
2107  	return 0;
2108  }
2109  
2110  /**
2111   * scsih_slave_destroy - device destroy routine
2112   * @sdev: scsi device struct
2113   */
2114  static void
scsih_slave_destroy(struct scsi_device * sdev)2115  scsih_slave_destroy(struct scsi_device *sdev)
2116  {
2117  	struct MPT3SAS_TARGET *sas_target_priv_data;
2118  	struct scsi_target *starget;
2119  	struct Scsi_Host *shost;
2120  	struct MPT3SAS_ADAPTER *ioc;
2121  	struct _sas_device *sas_device;
2122  	struct _pcie_device *pcie_device;
2123  	unsigned long flags;
2124  
2125  	if (!sdev->hostdata)
2126  		return;
2127  
2128  	starget = scsi_target(sdev);
2129  	sas_target_priv_data = starget->hostdata;
2130  	sas_target_priv_data->num_luns--;
2131  
2132  	shost = dev_to_shost(&starget->dev);
2133  	ioc = shost_priv(shost);
2134  
2135  	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2136  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2137  		pcie_device = __mpt3sas_get_pdev_from_target(ioc,
2138  				sas_target_priv_data);
2139  		if (pcie_device && !sas_target_priv_data->num_luns)
2140  			pcie_device->starget = NULL;
2141  
2142  		if (pcie_device)
2143  			pcie_device_put(pcie_device);
2144  
2145  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2146  
2147  	} else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
2148  		spin_lock_irqsave(&ioc->sas_device_lock, flags);
2149  		sas_device = __mpt3sas_get_sdev_from_target(ioc,
2150  				sas_target_priv_data);
2151  		if (sas_device && !sas_target_priv_data->num_luns)
2152  			sas_device->starget = NULL;
2153  
2154  		if (sas_device)
2155  			sas_device_put(sas_device);
2156  		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2157  	}
2158  
2159  	kfree(sdev->hostdata);
2160  	sdev->hostdata = NULL;
2161  }
2162  
2163  /**
2164   * _scsih_display_sata_capabilities - sata capabilities
2165   * @ioc: per adapter object
2166   * @handle: device handle
2167   * @sdev: scsi device struct
2168   */
2169  static void
_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER * ioc,u16 handle,struct scsi_device * sdev)2170  _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
2171  	u16 handle, struct scsi_device *sdev)
2172  {
2173  	Mpi2ConfigReply_t mpi_reply;
2174  	Mpi2SasDevicePage0_t sas_device_pg0;
2175  	u32 ioc_status;
2176  	u16 flags;
2177  	u32 device_info;
2178  
2179  	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
2180  	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
2181  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2182  			__FILE__, __LINE__, __func__);
2183  		return;
2184  	}
2185  
2186  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
2187  	    MPI2_IOCSTATUS_MASK;
2188  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
2189  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2190  			__FILE__, __LINE__, __func__);
2191  		return;
2192  	}
2193  
2194  	flags = le16_to_cpu(sas_device_pg0.Flags);
2195  	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
2196  
2197  	sdev_printk(KERN_INFO, sdev,
2198  	    "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
2199  	    "sw_preserve(%s)\n",
2200  	    (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
2201  	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
2202  	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
2203  	    "n",
2204  	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
2205  	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
2206  	    (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
2207  }
2208  
2209  /*
2210   * raid transport support -
2211   * Enabled for SLES11 and newer, in older kernels the driver will panic when
2212   * unloading the driver followed by a load - I believe that the subroutine
2213   * raid_class_release() is not cleaning up properly.
2214   */
2215  
2216  /**
2217   * scsih_is_raid - return boolean indicating device is raid volume
2218   * @dev: the device struct object
2219   */
2220  static int
scsih_is_raid(struct device * dev)2221  scsih_is_raid(struct device *dev)
2222  {
2223  	struct scsi_device *sdev = to_scsi_device(dev);
2224  	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2225  
2226  	if (ioc->is_warpdrive)
2227  		return 0;
2228  	return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
2229  }
2230  
2231  static int
scsih_is_nvme(struct device * dev)2232  scsih_is_nvme(struct device *dev)
2233  {
2234  	struct scsi_device *sdev = to_scsi_device(dev);
2235  
2236  	return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
2237  }
2238  
2239  /**
2240   * scsih_get_resync - get raid volume resync percent complete
2241   * @dev: the device struct object
2242   */
2243  static void
scsih_get_resync(struct device * dev)2244  scsih_get_resync(struct device *dev)
2245  {
2246  	struct scsi_device *sdev = to_scsi_device(dev);
2247  	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2248  	static struct _raid_device *raid_device;
2249  	unsigned long flags;
2250  	Mpi2RaidVolPage0_t vol_pg0;
2251  	Mpi2ConfigReply_t mpi_reply;
2252  	u32 volume_status_flags;
2253  	u8 percent_complete;
2254  	u16 handle;
2255  
2256  	percent_complete = 0;
2257  	handle = 0;
2258  	if (ioc->is_warpdrive)
2259  		goto out;
2260  
2261  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2262  	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2263  	    sdev->channel);
2264  	if (raid_device) {
2265  		handle = raid_device->handle;
2266  		percent_complete = raid_device->percent_complete;
2267  	}
2268  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2269  
2270  	if (!handle)
2271  		goto out;
2272  
2273  	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2274  	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2275  	     sizeof(Mpi2RaidVolPage0_t))) {
2276  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2277  			__FILE__, __LINE__, __func__);
2278  		percent_complete = 0;
2279  		goto out;
2280  	}
2281  
2282  	volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2283  	if (!(volume_status_flags &
2284  	    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
2285  		percent_complete = 0;
2286  
2287   out:
2288  
2289  	switch (ioc->hba_mpi_version_belonged) {
2290  	case MPI2_VERSION:
2291  		raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
2292  		break;
2293  	case MPI25_VERSION:
2294  	case MPI26_VERSION:
2295  		raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
2296  		break;
2297  	}
2298  }
2299  
2300  /**
2301   * scsih_get_state - get raid volume level
2302   * @dev: the device struct object
2303   */
2304  static void
scsih_get_state(struct device * dev)2305  scsih_get_state(struct device *dev)
2306  {
2307  	struct scsi_device *sdev = to_scsi_device(dev);
2308  	struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
2309  	static struct _raid_device *raid_device;
2310  	unsigned long flags;
2311  	Mpi2RaidVolPage0_t vol_pg0;
2312  	Mpi2ConfigReply_t mpi_reply;
2313  	u32 volstate;
2314  	enum raid_state state = RAID_STATE_UNKNOWN;
2315  	u16 handle = 0;
2316  
2317  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
2318  	raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
2319  	    sdev->channel);
2320  	if (raid_device)
2321  		handle = raid_device->handle;
2322  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2323  
2324  	if (!raid_device)
2325  		goto out;
2326  
2327  	if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2328  	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2329  	     sizeof(Mpi2RaidVolPage0_t))) {
2330  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
2331  			__FILE__, __LINE__, __func__);
2332  		goto out;
2333  	}
2334  
2335  	volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2336  	if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2337  		state = RAID_STATE_RESYNCING;
2338  		goto out;
2339  	}
2340  
2341  	switch (vol_pg0.VolumeState) {
2342  	case MPI2_RAID_VOL_STATE_OPTIMAL:
2343  	case MPI2_RAID_VOL_STATE_ONLINE:
2344  		state = RAID_STATE_ACTIVE;
2345  		break;
2346  	case  MPI2_RAID_VOL_STATE_DEGRADED:
2347  		state = RAID_STATE_DEGRADED;
2348  		break;
2349  	case MPI2_RAID_VOL_STATE_FAILED:
2350  	case MPI2_RAID_VOL_STATE_MISSING:
2351  		state = RAID_STATE_OFFLINE;
2352  		break;
2353  	}
2354   out:
2355  	switch (ioc->hba_mpi_version_belonged) {
2356  	case MPI2_VERSION:
2357  		raid_set_state(mpt2sas_raid_template, dev, state);
2358  		break;
2359  	case MPI25_VERSION:
2360  	case MPI26_VERSION:
2361  		raid_set_state(mpt3sas_raid_template, dev, state);
2362  		break;
2363  	}
2364  }
2365  
2366  /**
2367   * _scsih_set_level - set raid level
2368   * @ioc: ?
2369   * @sdev: scsi device struct
2370   * @volume_type: volume type
2371   */
2372  static void
_scsih_set_level(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev,u8 volume_type)2373  _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2374  	struct scsi_device *sdev, u8 volume_type)
2375  {
2376  	enum raid_level level = RAID_LEVEL_UNKNOWN;
2377  
2378  	switch (volume_type) {
2379  	case MPI2_RAID_VOL_TYPE_RAID0:
2380  		level = RAID_LEVEL_0;
2381  		break;
2382  	case MPI2_RAID_VOL_TYPE_RAID10:
2383  		level = RAID_LEVEL_10;
2384  		break;
2385  	case MPI2_RAID_VOL_TYPE_RAID1E:
2386  		level = RAID_LEVEL_1E;
2387  		break;
2388  	case MPI2_RAID_VOL_TYPE_RAID1:
2389  		level = RAID_LEVEL_1;
2390  		break;
2391  	}
2392  
2393  	switch (ioc->hba_mpi_version_belonged) {
2394  	case MPI2_VERSION:
2395  		raid_set_level(mpt2sas_raid_template,
2396  			&sdev->sdev_gendev, level);
2397  		break;
2398  	case MPI25_VERSION:
2399  	case MPI26_VERSION:
2400  		raid_set_level(mpt3sas_raid_template,
2401  			&sdev->sdev_gendev, level);
2402  		break;
2403  	}
2404  }
2405  
2406  
2407  /**
2408   * _scsih_get_volume_capabilities - volume capabilities
2409   * @ioc: per adapter object
2410   * @raid_device: the raid_device object
2411   *
2412   * Return: 0 for success, else 1
2413   */
2414  static int
_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER * ioc,struct _raid_device * raid_device)2415  _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2416  	struct _raid_device *raid_device)
2417  {
2418  	Mpi2RaidVolPage0_t *vol_pg0;
2419  	Mpi2RaidPhysDiskPage0_t pd_pg0;
2420  	Mpi2SasDevicePage0_t sas_device_pg0;
2421  	Mpi2ConfigReply_t mpi_reply;
2422  	u16 sz;
2423  	u8 num_pds;
2424  
2425  	if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2426  	    &num_pds)) || !num_pds) {
2427  		dfailprintk(ioc,
2428  			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2429  				     __FILE__, __LINE__, __func__));
2430  		return 1;
2431  	}
2432  
2433  	raid_device->num_pds = num_pds;
2434  	sz = struct_size(vol_pg0, PhysDisk, num_pds);
2435  	vol_pg0 = kzalloc(sz, GFP_KERNEL);
2436  	if (!vol_pg0) {
2437  		dfailprintk(ioc,
2438  			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2439  				     __FILE__, __LINE__, __func__));
2440  		return 1;
2441  	}
2442  
2443  	if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2444  	     MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2445  		dfailprintk(ioc,
2446  			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2447  				     __FILE__, __LINE__, __func__));
2448  		kfree(vol_pg0);
2449  		return 1;
2450  	}
2451  
2452  	raid_device->volume_type = vol_pg0->VolumeType;
2453  
2454  	/* figure out what the underlying devices are by
2455  	 * obtaining the device_info bits for the 1st device
2456  	 */
2457  	if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2458  	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2459  	    vol_pg0->PhysDisk[0].PhysDiskNum))) {
2460  		if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2461  		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2462  		    le16_to_cpu(pd_pg0.DevHandle)))) {
2463  			raid_device->device_info =
2464  			    le32_to_cpu(sas_device_pg0.DeviceInfo);
2465  		}
2466  	}
2467  
2468  	kfree(vol_pg0);
2469  	return 0;
2470  }
2471  
2472  /**
2473   * _scsih_enable_tlr - setting TLR flags
2474   * @ioc: per adapter object
2475   * @sdev: scsi device struct
2476   *
2477   * Enabling Transaction Layer Retries for tape devices when
2478   * vpd page 0x90 is present
2479   *
2480   */
2481  static void
_scsih_enable_tlr(struct MPT3SAS_ADAPTER * ioc,struct scsi_device * sdev)2482  _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2483  {
2484  
2485  	/* only for TAPE */
2486  	if (sdev->type != TYPE_TAPE)
2487  		return;
2488  
2489  	if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2490  		return;
2491  
2492  	sas_enable_tlr(sdev);
2493  	sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2494  	    sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2495  	return;
2496  
2497  }
2498  
2499  /**
2500   * scsih_device_configure - device configure routine.
2501   * @sdev: scsi device struct
2502   * @lim: queue limits
2503   *
2504   * Return: 0 if ok. Any other return is assumed to be an error and
2505   * the device is ignored.
2506   */
2507  static int
scsih_device_configure(struct scsi_device * sdev,struct queue_limits * lim)2508  scsih_device_configure(struct scsi_device *sdev, struct queue_limits *lim)
2509  {
2510  	struct Scsi_Host *shost = sdev->host;
2511  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2512  	struct MPT3SAS_DEVICE *sas_device_priv_data;
2513  	struct MPT3SAS_TARGET *sas_target_priv_data;
2514  	struct _sas_device *sas_device;
2515  	struct _pcie_device *pcie_device;
2516  	struct _raid_device *raid_device;
2517  	unsigned long flags;
2518  	int qdepth;
2519  	u8 ssp_target = 0;
2520  	char *ds = "";
2521  	char *r_level = "";
2522  	u16 handle, volume_handle = 0;
2523  	u64 volume_wwid = 0;
2524  
2525  	qdepth = 1;
2526  	sas_device_priv_data = sdev->hostdata;
2527  	sas_device_priv_data->configured_lun = 1;
2528  	sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2529  	sas_target_priv_data = sas_device_priv_data->sas_target;
2530  	handle = sas_target_priv_data->handle;
2531  
2532  	/* raid volume handling */
2533  	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2534  
2535  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
2536  		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2537  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2538  		if (!raid_device) {
2539  			dfailprintk(ioc,
2540  				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2541  					     __FILE__, __LINE__, __func__));
2542  			return 1;
2543  		}
2544  
2545  		if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2546  			dfailprintk(ioc,
2547  				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2548  					     __FILE__, __LINE__, __func__));
2549  			return 1;
2550  		}
2551  
2552  		/*
2553  		 * WARPDRIVE: Initialize the required data for Direct IO
2554  		 */
2555  		mpt3sas_init_warpdrive_properties(ioc, raid_device);
2556  
2557  		/* RAID Queue Depth Support
2558  		 * IS volume = underlying qdepth of drive type, either
2559  		 *    MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2560  		 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2561  		 */
2562  		if (raid_device->device_info &
2563  		    MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2564  			qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2565  			ds = "SSP";
2566  		} else {
2567  			qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2568  			if (raid_device->device_info &
2569  			    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2570  				ds = "SATA";
2571  			else
2572  				ds = "STP";
2573  		}
2574  
2575  		switch (raid_device->volume_type) {
2576  		case MPI2_RAID_VOL_TYPE_RAID0:
2577  			r_level = "RAID0";
2578  			break;
2579  		case MPI2_RAID_VOL_TYPE_RAID1E:
2580  			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2581  			if (ioc->manu_pg10.OEMIdentifier &&
2582  			    (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2583  			    MFG10_GF0_R10_DISPLAY) &&
2584  			    !(raid_device->num_pds % 2))
2585  				r_level = "RAID10";
2586  			else
2587  				r_level = "RAID1E";
2588  			break;
2589  		case MPI2_RAID_VOL_TYPE_RAID1:
2590  			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2591  			r_level = "RAID1";
2592  			break;
2593  		case MPI2_RAID_VOL_TYPE_RAID10:
2594  			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2595  			r_level = "RAID10";
2596  			break;
2597  		case MPI2_RAID_VOL_TYPE_UNKNOWN:
2598  		default:
2599  			qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2600  			r_level = "RAIDX";
2601  			break;
2602  		}
2603  
2604  		if (!ioc->hide_ir_msg)
2605  			sdev_printk(KERN_INFO, sdev,
2606  			   "%s: handle(0x%04x), wwid(0x%016llx),"
2607  			    " pd_count(%d), type(%s)\n",
2608  			    r_level, raid_device->handle,
2609  			    (unsigned long long)raid_device->wwid,
2610  			    raid_device->num_pds, ds);
2611  
2612  		if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2613  			lim->max_hw_sectors = MPT3SAS_RAID_MAX_SECTORS;
2614  			sdev_printk(KERN_INFO, sdev,
2615  					"Set queue's max_sector to: %u\n",
2616  						MPT3SAS_RAID_MAX_SECTORS);
2617  		}
2618  
2619  		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2620  
2621  		/* raid transport support */
2622  		if (!ioc->is_warpdrive)
2623  			_scsih_set_level(ioc, sdev, raid_device->volume_type);
2624  		return 0;
2625  	}
2626  
2627  	/* non-raid handling */
2628  	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2629  		if (mpt3sas_config_get_volume_handle(ioc, handle,
2630  		    &volume_handle)) {
2631  			dfailprintk(ioc,
2632  				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2633  					     __FILE__, __LINE__, __func__));
2634  			return 1;
2635  		}
2636  		if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2637  		    volume_handle, &volume_wwid)) {
2638  			dfailprintk(ioc,
2639  				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2640  					     __FILE__, __LINE__, __func__));
2641  			return 1;
2642  		}
2643  	}
2644  
2645  	/* PCIe handling */
2646  	if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2647  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2648  		pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2649  				sas_device_priv_data->sas_target->sas_address);
2650  		if (!pcie_device) {
2651  			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2652  			dfailprintk(ioc,
2653  				    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2654  					     __FILE__, __LINE__, __func__));
2655  			return 1;
2656  		}
2657  
2658  		qdepth = ioc->max_nvme_qd;
2659  		ds = "NVMe";
2660  		sdev_printk(KERN_INFO, sdev,
2661  			"%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2662  			ds, handle, (unsigned long long)pcie_device->wwid,
2663  			pcie_device->port_num);
2664  		if (pcie_device->enclosure_handle != 0)
2665  			sdev_printk(KERN_INFO, sdev,
2666  			"%s: enclosure logical id(0x%016llx), slot(%d)\n",
2667  			ds,
2668  			(unsigned long long)pcie_device->enclosure_logical_id,
2669  			pcie_device->slot);
2670  		if (pcie_device->connector_name[0] != '\0')
2671  			sdev_printk(KERN_INFO, sdev,
2672  				"%s: enclosure level(0x%04x),"
2673  				"connector name( %s)\n", ds,
2674  				pcie_device->enclosure_level,
2675  				pcie_device->connector_name);
2676  
2677  		if (pcie_device->nvme_mdts)
2678  			lim->max_hw_sectors = pcie_device->nvme_mdts / 512;
2679  
2680  		pcie_device_put(pcie_device);
2681  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2682  		mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2683  		lim->virt_boundary_mask = ioc->page_size - 1;
2684  		return 0;
2685  	}
2686  
2687  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
2688  	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2689  	   sas_device_priv_data->sas_target->sas_address,
2690  	   sas_device_priv_data->sas_target->port);
2691  	if (!sas_device) {
2692  		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2693  		dfailprintk(ioc,
2694  			    ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2695  				     __FILE__, __LINE__, __func__));
2696  		return 1;
2697  	}
2698  
2699  	sas_device->volume_handle = volume_handle;
2700  	sas_device->volume_wwid = volume_wwid;
2701  	if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2702  		qdepth = (sas_device->port_type > 1) ?
2703  			ioc->max_wideport_qd : ioc->max_narrowport_qd;
2704  		ssp_target = 1;
2705  		if (sas_device->device_info &
2706  				MPI2_SAS_DEVICE_INFO_SEP) {
2707  			sdev_printk(KERN_WARNING, sdev,
2708  			"set ignore_delay_remove for handle(0x%04x)\n",
2709  			sas_device_priv_data->sas_target->handle);
2710  			sas_device_priv_data->ignore_delay_remove = 1;
2711  			ds = "SES";
2712  		} else
2713  			ds = "SSP";
2714  	} else {
2715  		qdepth = ioc->max_sata_qd;
2716  		if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2717  			ds = "STP";
2718  		else if (sas_device->device_info &
2719  		    MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2720  			ds = "SATA";
2721  	}
2722  
2723  	sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2724  	    "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2725  	    ds, handle, (unsigned long long)sas_device->sas_address,
2726  	    sas_device->phy, (unsigned long long)sas_device->device_name);
2727  
2728  	_scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2729  
2730  	sas_device_put(sas_device);
2731  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2732  
2733  	if (!ssp_target)
2734  		_scsih_display_sata_capabilities(ioc, handle, sdev);
2735  
2736  
2737  	mpt3sas_scsih_change_queue_depth(sdev, qdepth);
2738  
2739  	if (ssp_target) {
2740  		sas_read_port_mode_page(sdev);
2741  		_scsih_enable_tlr(ioc, sdev);
2742  	}
2743  
2744  	return 0;
2745  }
2746  
2747  /**
2748   * scsih_bios_param - fetch head, sector, cylinder info for a disk
2749   * @sdev: scsi device struct
2750   * @bdev: pointer to block device context
2751   * @capacity: device size (in 512 byte sectors)
2752   * @params: three element array to place output:
2753   *              params[0] number of heads (max 255)
2754   *              params[1] number of sectors (max 63)
2755   *              params[2] number of cylinders
2756   */
2757  static int
scsih_bios_param(struct scsi_device * sdev,struct block_device * bdev,sector_t capacity,int params[])2758  scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2759  	sector_t capacity, int params[])
2760  {
2761  	int		heads;
2762  	int		sectors;
2763  	sector_t	cylinders;
2764  	ulong		dummy;
2765  
2766  	heads = 64;
2767  	sectors = 32;
2768  
2769  	dummy = heads * sectors;
2770  	cylinders = capacity;
2771  	sector_div(cylinders, dummy);
2772  
2773  	/*
2774  	 * Handle extended translation size for logical drives
2775  	 * > 1Gb
2776  	 */
2777  	if ((ulong)capacity >= 0x200000) {
2778  		heads = 255;
2779  		sectors = 63;
2780  		dummy = heads * sectors;
2781  		cylinders = capacity;
2782  		sector_div(cylinders, dummy);
2783  	}
2784  
2785  	/* return result */
2786  	params[0] = heads;
2787  	params[1] = sectors;
2788  	params[2] = cylinders;
2789  
2790  	return 0;
2791  }
2792  
2793  /**
2794   * _scsih_response_code - translation of device response code
2795   * @ioc: per adapter object
2796   * @response_code: response code returned by the device
2797   */
2798  static void
_scsih_response_code(struct MPT3SAS_ADAPTER * ioc,u8 response_code)2799  _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2800  {
2801  	char *desc;
2802  
2803  	switch (response_code) {
2804  	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2805  		desc = "task management request completed";
2806  		break;
2807  	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2808  		desc = "invalid frame";
2809  		break;
2810  	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2811  		desc = "task management request not supported";
2812  		break;
2813  	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2814  		desc = "task management request failed";
2815  		break;
2816  	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2817  		desc = "task management request succeeded";
2818  		break;
2819  	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2820  		desc = "invalid lun";
2821  		break;
2822  	case 0xA:
2823  		desc = "overlapped tag attempted";
2824  		break;
2825  	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2826  		desc = "task queued, however not sent to target";
2827  		break;
2828  	default:
2829  		desc = "unknown";
2830  		break;
2831  	}
2832  	ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2833  }
2834  
2835  /**
2836   * _scsih_tm_done - tm completion routine
2837   * @ioc: per adapter object
2838   * @smid: system request message index
2839   * @msix_index: MSIX table index supplied by the OS
2840   * @reply: reply message frame(lower 32bit addr)
2841   * Context: none.
2842   *
2843   * The callback handler when using scsih_issue_tm.
2844   *
2845   * Return: 1 meaning mf should be freed from _base_interrupt
2846   *         0 means the mf is freed from this function.
2847   */
2848  static u8
_scsih_tm_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)2849  _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2850  {
2851  	MPI2DefaultReply_t *mpi_reply;
2852  
2853  	if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2854  		return 1;
2855  	if (ioc->tm_cmds.smid != smid)
2856  		return 1;
2857  	ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2858  	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
2859  	if (mpi_reply) {
2860  		memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2861  		ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2862  	}
2863  	ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2864  	complete(&ioc->tm_cmds.done);
2865  	return 1;
2866  }
2867  
2868  /**
2869   * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2870   * @ioc: per adapter object
2871   * @handle: device handle
2872   *
2873   * During taskmangement request, we need to freeze the device queue.
2874   */
2875  void
mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2876  mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2877  {
2878  	struct MPT3SAS_DEVICE *sas_device_priv_data;
2879  	struct scsi_device *sdev;
2880  	u8 skip = 0;
2881  
2882  	shost_for_each_device(sdev, ioc->shost) {
2883  		if (skip)
2884  			continue;
2885  		sas_device_priv_data = sdev->hostdata;
2886  		if (!sas_device_priv_data)
2887  			continue;
2888  		if (sas_device_priv_data->sas_target->handle == handle) {
2889  			sas_device_priv_data->sas_target->tm_busy = 1;
2890  			skip = 1;
2891  			ioc->ignore_loginfos = 1;
2892  		}
2893  	}
2894  }
2895  
2896  /**
2897   * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2898   * @ioc: per adapter object
2899   * @handle: device handle
2900   *
2901   * During taskmangement request, we need to freeze the device queue.
2902   */
2903  void
mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)2904  mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2905  {
2906  	struct MPT3SAS_DEVICE *sas_device_priv_data;
2907  	struct scsi_device *sdev;
2908  	u8 skip = 0;
2909  
2910  	shost_for_each_device(sdev, ioc->shost) {
2911  		if (skip)
2912  			continue;
2913  		sas_device_priv_data = sdev->hostdata;
2914  		if (!sas_device_priv_data)
2915  			continue;
2916  		if (sas_device_priv_data->sas_target->handle == handle) {
2917  			sas_device_priv_data->sas_target->tm_busy = 0;
2918  			skip = 1;
2919  			ioc->ignore_loginfos = 0;
2920  		}
2921  	}
2922  }
2923  
2924  /**
2925   * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
2926   * @ioc: per adapter object
2927   * @channel: the channel assigned by the OS
2928   * @id: the id assigned by the OS
2929   * @lun: lun number
2930   * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2931   * @smid_task: smid assigned to the task
2932   *
2933   * Look whether TM has aborted the timed out SCSI command, if
2934   * TM has aborted the IO then return SUCCESS else return FAILED.
2935   */
2936  static int
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER * ioc,uint channel,uint id,uint lun,u8 type,u16 smid_task)2937  scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
2938  	uint id, uint lun, u8 type, u16 smid_task)
2939  {
2940  
2941  	if (smid_task <= ioc->shost->can_queue) {
2942  		switch (type) {
2943  		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
2944  			if (!(_scsih_scsi_lookup_find_by_target(ioc,
2945  			    id, channel)))
2946  				return SUCCESS;
2947  			break;
2948  		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
2949  		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
2950  			if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
2951  			    lun, channel)))
2952  				return SUCCESS;
2953  			break;
2954  		default:
2955  			return SUCCESS;
2956  		}
2957  	} else if (smid_task == ioc->scsih_cmds.smid) {
2958  		if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
2959  		    (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
2960  			return SUCCESS;
2961  	} else if (smid_task == ioc->ctl_cmds.smid) {
2962  		if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
2963  		    (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
2964  			return SUCCESS;
2965  	}
2966  
2967  	return FAILED;
2968  }
2969  
2970  /**
2971   * scsih_tm_post_processing - post processing of target & LUN reset
2972   * @ioc: per adapter object
2973   * @handle: device handle
2974   * @channel: the channel assigned by the OS
2975   * @id: the id assigned by the OS
2976   * @lun: lun number
2977   * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2978   * @smid_task: smid assigned to the task
2979   *
2980   * Post processing of target & LUN reset. Due to interrupt latency
2981   * issue it possible that interrupt for aborted IO might not be
2982   * received yet. So before returning failure status, poll the
2983   * reply descriptor pools for the reply of timed out SCSI command.
2984   * Return FAILED status if reply for timed out is not received
2985   * otherwise return SUCCESS.
2986   */
2987  static int
scsih_tm_post_processing(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,uint lun,u8 type,u16 smid_task)2988  scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2989  	uint channel, uint id, uint lun, u8 type, u16 smid_task)
2990  {
2991  	int rc;
2992  
2993  	rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
2994  	if (rc == SUCCESS)
2995  		return rc;
2996  
2997  	ioc_info(ioc,
2998  	    "Poll ReplyDescriptor queues for completion of"
2999  	    " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
3000  	    smid_task, type, handle);
3001  
3002  	/*
3003  	 * Due to interrupt latency issues, driver may receive interrupt for
3004  	 * TM first and then for aborted SCSI IO command. So, poll all the
3005  	 * ReplyDescriptor pools before returning the FAILED status to SML.
3006  	 */
3007  	mpt3sas_base_mask_interrupts(ioc);
3008  	mpt3sas_base_sync_reply_irqs(ioc, 1);
3009  	mpt3sas_base_unmask_interrupts(ioc);
3010  
3011  	return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
3012  }
3013  
3014  /**
3015   * mpt3sas_scsih_issue_tm - main routine for sending tm requests
3016   * @ioc: per adapter struct
3017   * @handle: device handle
3018   * @channel: the channel assigned by the OS
3019   * @id: the id assigned by the OS
3020   * @lun: lun number
3021   * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
3022   * @smid_task: smid assigned to the task
3023   * @msix_task: MSIX table index supplied by the OS
3024   * @timeout: timeout in seconds
3025   * @tr_method: Target Reset Method
3026   * Context: user
3027   *
3028   * A generic API for sending task management requests to firmware.
3029   *
3030   * The callback index is set inside `ioc->tm_cb_idx`.
3031   * The caller is responsible to check for outstanding commands.
3032   *
3033   * Return: SUCCESS or FAILED.
3034   */
3035  int
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3036  mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
3037  	uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
3038  	u8 timeout, u8 tr_method)
3039  {
3040  	Mpi2SCSITaskManagementRequest_t *mpi_request;
3041  	Mpi2SCSITaskManagementReply_t *mpi_reply;
3042  	Mpi25SCSIIORequest_t *request;
3043  	u16 smid = 0;
3044  	u32 ioc_state;
3045  	int rc;
3046  	u8 issue_reset = 0;
3047  
3048  	lockdep_assert_held(&ioc->tm_cmds.mutex);
3049  
3050  	if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
3051  		ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
3052  		return FAILED;
3053  	}
3054  
3055  	if (ioc->shost_recovery || ioc->remove_host ||
3056  	    ioc->pci_error_recovery) {
3057  		ioc_info(ioc, "%s: host reset in progress!\n", __func__);
3058  		return FAILED;
3059  	}
3060  
3061  	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3062  	if (ioc_state & MPI2_DOORBELL_USED) {
3063  		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
3064  		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3065  		return (!rc) ? SUCCESS : FAILED;
3066  	}
3067  
3068  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3069  		mpt3sas_print_fault_code(ioc, ioc_state &
3070  		    MPI2_DOORBELL_DATA_MASK);
3071  		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3072  		return (!rc) ? SUCCESS : FAILED;
3073  	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3074  	    MPI2_IOC_STATE_COREDUMP) {
3075  		mpt3sas_print_coredump_info(ioc, ioc_state &
3076  		    MPI2_DOORBELL_DATA_MASK);
3077  		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3078  		return (!rc) ? SUCCESS : FAILED;
3079  	}
3080  
3081  	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
3082  	if (!smid) {
3083  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
3084  		return FAILED;
3085  	}
3086  
3087  	dtmprintk(ioc,
3088  		  ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
3089  			   handle, type, smid_task, timeout, tr_method));
3090  	ioc->tm_cmds.status = MPT3_CMD_PENDING;
3091  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3092  	ioc->tm_cmds.smid = smid;
3093  	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3094  	memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
3095  	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3096  	mpi_request->DevHandle = cpu_to_le16(handle);
3097  	mpi_request->TaskType = type;
3098  	if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
3099  	    type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
3100  		mpi_request->MsgFlags = tr_method;
3101  	mpi_request->TaskMID = cpu_to_le16(smid_task);
3102  	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
3103  	mpt3sas_scsih_set_tm_flag(ioc, handle);
3104  	init_completion(&ioc->tm_cmds.done);
3105  	ioc->put_smid_hi_priority(ioc, smid, msix_task);
3106  	wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
3107  	if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
3108  		mpt3sas_check_cmd_timeout(ioc,
3109  		    ioc->tm_cmds.status, mpi_request,
3110  		    sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
3111  		if (issue_reset) {
3112  			rc = mpt3sas_base_hard_reset_handler(ioc,
3113  					FORCE_BIG_HAMMER);
3114  			rc = (!rc) ? SUCCESS : FAILED;
3115  			goto out;
3116  		}
3117  	}
3118  
3119  	/* sync IRQs in case those were busy during flush. */
3120  	mpt3sas_base_sync_reply_irqs(ioc, 0);
3121  
3122  	if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
3123  		mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3124  		mpi_reply = ioc->tm_cmds.reply;
3125  		dtmprintk(ioc,
3126  			  ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
3127  				   le16_to_cpu(mpi_reply->IOCStatus),
3128  				   le32_to_cpu(mpi_reply->IOCLogInfo),
3129  				   le32_to_cpu(mpi_reply->TerminationCount)));
3130  		if (ioc->logging_level & MPT_DEBUG_TM) {
3131  			_scsih_response_code(ioc, mpi_reply->ResponseCode);
3132  			if (mpi_reply->IOCStatus)
3133  				_debug_dump_mf(mpi_request,
3134  				    sizeof(Mpi2SCSITaskManagementRequest_t)/4);
3135  		}
3136  	}
3137  
3138  	switch (type) {
3139  	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
3140  		rc = SUCCESS;
3141  		/*
3142  		 * If DevHandle filed in smid_task's entry of request pool
3143  		 * doesn't match with device handle on which this task abort
3144  		 * TM is received then it means that TM has successfully
3145  		 * aborted the timed out command. Since smid_task's entry in
3146  		 * request pool will be memset to zero once the timed out
3147  		 * command is returned to the SML. If the command is not
3148  		 * aborted then smid_task’s entry won’t be cleared and it
3149  		 * will have same DevHandle value on which this task abort TM
3150  		 * is received and driver will return the TM status as FAILED.
3151  		 */
3152  		request = mpt3sas_base_get_msg_frame(ioc, smid_task);
3153  		if (le16_to_cpu(request->DevHandle) != handle)
3154  			break;
3155  
3156  		ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
3157  		    "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
3158  		    handle, timeout, tr_method, smid_task, msix_task);
3159  		rc = FAILED;
3160  		break;
3161  
3162  	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
3163  	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
3164  	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
3165  		rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
3166  		    type, smid_task);
3167  		break;
3168  	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
3169  		rc = SUCCESS;
3170  		break;
3171  	default:
3172  		rc = FAILED;
3173  		break;
3174  	}
3175  
3176  out:
3177  	mpt3sas_scsih_clear_tm_flag(ioc, handle);
3178  	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
3179  	return rc;
3180  }
3181  
mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER * ioc,u16 handle,uint channel,uint id,u64 lun,u8 type,u16 smid_task,u16 msix_task,u8 timeout,u8 tr_method)3182  int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
3183  		uint channel, uint id, u64 lun, u8 type, u16 smid_task,
3184  		u16 msix_task, u8 timeout, u8 tr_method)
3185  {
3186  	int ret;
3187  
3188  	mutex_lock(&ioc->tm_cmds.mutex);
3189  	ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
3190  			smid_task, msix_task, timeout, tr_method);
3191  	mutex_unlock(&ioc->tm_cmds.mutex);
3192  
3193  	return ret;
3194  }
3195  
3196  /**
3197   * _scsih_tm_display_info - displays info about the device
3198   * @ioc: per adapter struct
3199   * @scmd: pointer to scsi command object
3200   *
3201   * Called by task management callback handlers.
3202   */
3203  static void
_scsih_tm_display_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3204  _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
3205  {
3206  	struct scsi_target *starget = scmd->device->sdev_target;
3207  	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
3208  	struct _sas_device *sas_device = NULL;
3209  	struct _pcie_device *pcie_device = NULL;
3210  	unsigned long flags;
3211  	char *device_str = NULL;
3212  
3213  	if (!priv_target)
3214  		return;
3215  	if (ioc->hide_ir_msg)
3216  		device_str = "WarpDrive";
3217  	else
3218  		device_str = "volume";
3219  
3220  	scsi_print_command(scmd);
3221  	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3222  		starget_printk(KERN_INFO, starget,
3223  			"%s handle(0x%04x), %s wwid(0x%016llx)\n",
3224  			device_str, priv_target->handle,
3225  		    device_str, (unsigned long long)priv_target->sas_address);
3226  
3227  	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
3228  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3229  		pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
3230  		if (pcie_device) {
3231  			starget_printk(KERN_INFO, starget,
3232  				"handle(0x%04x), wwid(0x%016llx), port(%d)\n",
3233  				pcie_device->handle,
3234  				(unsigned long long)pcie_device->wwid,
3235  				pcie_device->port_num);
3236  			if (pcie_device->enclosure_handle != 0)
3237  				starget_printk(KERN_INFO, starget,
3238  					"enclosure logical id(0x%016llx), slot(%d)\n",
3239  					(unsigned long long)
3240  					pcie_device->enclosure_logical_id,
3241  					pcie_device->slot);
3242  			if (pcie_device->connector_name[0] != '\0')
3243  				starget_printk(KERN_INFO, starget,
3244  					"enclosure level(0x%04x), connector name( %s)\n",
3245  					pcie_device->enclosure_level,
3246  					pcie_device->connector_name);
3247  			pcie_device_put(pcie_device);
3248  		}
3249  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3250  
3251  	} else {
3252  		spin_lock_irqsave(&ioc->sas_device_lock, flags);
3253  		sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
3254  		if (sas_device) {
3255  			if (priv_target->flags &
3256  			    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3257  				starget_printk(KERN_INFO, starget,
3258  				    "volume handle(0x%04x), "
3259  				    "volume wwid(0x%016llx)\n",
3260  				    sas_device->volume_handle,
3261  				   (unsigned long long)sas_device->volume_wwid);
3262  			}
3263  			starget_printk(KERN_INFO, starget,
3264  			    "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
3265  			    sas_device->handle,
3266  			    (unsigned long long)sas_device->sas_address,
3267  			    sas_device->phy);
3268  
3269  			_scsih_display_enclosure_chassis_info(NULL, sas_device,
3270  			    NULL, starget);
3271  
3272  			sas_device_put(sas_device);
3273  		}
3274  		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3275  	}
3276  }
3277  
3278  /**
3279   * scsih_abort - eh threads main abort routine
3280   * @scmd: pointer to scsi command object
3281   *
3282   * Return: SUCCESS if command aborted else FAILED
3283   */
3284  static int
scsih_abort(struct scsi_cmnd * scmd)3285  scsih_abort(struct scsi_cmnd *scmd)
3286  {
3287  	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3288  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3289  	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
3290  	u16 handle;
3291  	int r;
3292  
3293  	u8 timeout = 30;
3294  	struct _pcie_device *pcie_device = NULL;
3295  	sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
3296  	    "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
3297  	    scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
3298  	    (scsi_cmd_to_rq(scmd)->timeout / HZ) * 1000);
3299  	_scsih_tm_display_info(ioc, scmd);
3300  
3301  	sas_device_priv_data = scmd->device->hostdata;
3302  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3303  	    ioc->remove_host) {
3304  		sdev_printk(KERN_INFO, scmd->device,
3305  		    "device been deleted! scmd(0x%p)\n", scmd);
3306  		scmd->result = DID_NO_CONNECT << 16;
3307  		scsi_done(scmd);
3308  		r = SUCCESS;
3309  		goto out;
3310  	}
3311  
3312  	/* check for completed command */
3313  	if (st == NULL || st->cb_idx == 0xFF) {
3314  		sdev_printk(KERN_INFO, scmd->device, "No reference found at "
3315  		    "driver, assuming scmd(0x%p) might have completed\n", scmd);
3316  		scmd->result = DID_RESET << 16;
3317  		r = SUCCESS;
3318  		goto out;
3319  	}
3320  
3321  	/* for hidden raid components and volumes this is not supported */
3322  	if (sas_device_priv_data->sas_target->flags &
3323  	    MPT_TARGET_FLAGS_RAID_COMPONENT ||
3324  	    sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
3325  		scmd->result = DID_RESET << 16;
3326  		r = FAILED;
3327  		goto out;
3328  	}
3329  
3330  	mpt3sas_halt_firmware(ioc);
3331  
3332  	handle = sas_device_priv_data->sas_target->handle;
3333  	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3334  	if (pcie_device && (!ioc->tm_custom_handling) &&
3335  	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
3336  		timeout = ioc->nvme_abort_timeout;
3337  	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3338  		scmd->device->id, scmd->device->lun,
3339  		MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
3340  		st->smid, st->msix_io, timeout, 0);
3341  	/* Command must be cleared after abort */
3342  	if (r == SUCCESS && st->cb_idx != 0xFF)
3343  		r = FAILED;
3344   out:
3345  	sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
3346  	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3347  	if (pcie_device)
3348  		pcie_device_put(pcie_device);
3349  	return r;
3350  }
3351  
3352  /**
3353   * scsih_dev_reset - eh threads main device reset routine
3354   * @scmd: pointer to scsi command object
3355   *
3356   * Return: SUCCESS if command aborted else FAILED
3357   */
3358  static int
scsih_dev_reset(struct scsi_cmnd * scmd)3359  scsih_dev_reset(struct scsi_cmnd *scmd)
3360  {
3361  	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3362  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3363  	struct _sas_device *sas_device = NULL;
3364  	struct _pcie_device *pcie_device = NULL;
3365  	u16	handle;
3366  	u8	tr_method = 0;
3367  	u8	tr_timeout = 30;
3368  	int r;
3369  
3370  	struct scsi_target *starget = scmd->device->sdev_target;
3371  	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3372  
3373  	sdev_printk(KERN_INFO, scmd->device,
3374  	    "attempting device reset! scmd(0x%p)\n", scmd);
3375  	_scsih_tm_display_info(ioc, scmd);
3376  
3377  	sas_device_priv_data = scmd->device->hostdata;
3378  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3379  	    ioc->remove_host) {
3380  		sdev_printk(KERN_INFO, scmd->device,
3381  		    "device been deleted! scmd(0x%p)\n", scmd);
3382  		scmd->result = DID_NO_CONNECT << 16;
3383  		scsi_done(scmd);
3384  		r = SUCCESS;
3385  		goto out;
3386  	}
3387  
3388  	/* for hidden raid components obtain the volume_handle */
3389  	handle = 0;
3390  	if (sas_device_priv_data->sas_target->flags &
3391  	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3392  		sas_device = mpt3sas_get_sdev_from_target(ioc,
3393  				target_priv_data);
3394  		if (sas_device)
3395  			handle = sas_device->volume_handle;
3396  	} else
3397  		handle = sas_device_priv_data->sas_target->handle;
3398  
3399  	if (!handle) {
3400  		scmd->result = DID_RESET << 16;
3401  		r = FAILED;
3402  		goto out;
3403  	}
3404  
3405  	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3406  
3407  	if (pcie_device && (!ioc->tm_custom_handling) &&
3408  	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3409  		tr_timeout = pcie_device->reset_timeout;
3410  		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3411  	} else
3412  		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3413  
3414  	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3415  		scmd->device->id, scmd->device->lun,
3416  		MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
3417  		tr_timeout, tr_method);
3418  	/* Check for busy commands after reset */
3419  	if (r == SUCCESS && scsi_device_busy(scmd->device))
3420  		r = FAILED;
3421   out:
3422  	sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
3423  	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3424  
3425  	if (sas_device)
3426  		sas_device_put(sas_device);
3427  	if (pcie_device)
3428  		pcie_device_put(pcie_device);
3429  
3430  	return r;
3431  }
3432  
3433  /**
3434   * scsih_target_reset - eh threads main target reset routine
3435   * @scmd: pointer to scsi command object
3436   *
3437   * Return: SUCCESS if command aborted else FAILED
3438   */
3439  static int
scsih_target_reset(struct scsi_cmnd * scmd)3440  scsih_target_reset(struct scsi_cmnd *scmd)
3441  {
3442  	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3443  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3444  	struct _sas_device *sas_device = NULL;
3445  	struct _pcie_device *pcie_device = NULL;
3446  	u16	handle;
3447  	u8	tr_method = 0;
3448  	u8	tr_timeout = 30;
3449  	int r;
3450  	struct scsi_target *starget = scmd->device->sdev_target;
3451  	struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
3452  
3453  	starget_printk(KERN_INFO, starget,
3454  	    "attempting target reset! scmd(0x%p)\n", scmd);
3455  	_scsih_tm_display_info(ioc, scmd);
3456  
3457  	sas_device_priv_data = scmd->device->hostdata;
3458  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
3459  	    ioc->remove_host) {
3460  		starget_printk(KERN_INFO, starget,
3461  		    "target been deleted! scmd(0x%p)\n", scmd);
3462  		scmd->result = DID_NO_CONNECT << 16;
3463  		scsi_done(scmd);
3464  		r = SUCCESS;
3465  		goto out;
3466  	}
3467  
3468  	/* for hidden raid components obtain the volume_handle */
3469  	handle = 0;
3470  	if (sas_device_priv_data->sas_target->flags &
3471  	    MPT_TARGET_FLAGS_RAID_COMPONENT) {
3472  		sas_device = mpt3sas_get_sdev_from_target(ioc,
3473  				target_priv_data);
3474  		if (sas_device)
3475  			handle = sas_device->volume_handle;
3476  	} else
3477  		handle = sas_device_priv_data->sas_target->handle;
3478  
3479  	if (!handle) {
3480  		scmd->result = DID_RESET << 16;
3481  		r = FAILED;
3482  		goto out;
3483  	}
3484  
3485  	pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3486  
3487  	if (pcie_device && (!ioc->tm_custom_handling) &&
3488  	    (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
3489  		tr_timeout = pcie_device->reset_timeout;
3490  		tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3491  	} else
3492  		tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3493  	r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
3494  		scmd->device->id, 0,
3495  		MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3496  	    tr_timeout, tr_method);
3497  	/* Check for busy commands after reset */
3498  	if (r == SUCCESS && atomic_read(&starget->target_busy))
3499  		r = FAILED;
3500   out:
3501  	starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
3502  	    ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3503  
3504  	if (sas_device)
3505  		sas_device_put(sas_device);
3506  	if (pcie_device)
3507  		pcie_device_put(pcie_device);
3508  	return r;
3509  }
3510  
3511  
3512  /**
3513   * scsih_host_reset - eh threads main host reset routine
3514   * @scmd: pointer to scsi command object
3515   *
3516   * Return: SUCCESS if command aborted else FAILED
3517   */
3518  static int
scsih_host_reset(struct scsi_cmnd * scmd)3519  scsih_host_reset(struct scsi_cmnd *scmd)
3520  {
3521  	struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3522  	int r, retval;
3523  
3524  	ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
3525  	scsi_print_command(scmd);
3526  
3527  	if (ioc->is_driver_loading || ioc->remove_host) {
3528  		ioc_info(ioc, "Blocking the host reset\n");
3529  		r = FAILED;
3530  		goto out;
3531  	}
3532  
3533  	retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3534  	r = (retval < 0) ? FAILED : SUCCESS;
3535  out:
3536  	ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
3537  		 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3538  
3539  	return r;
3540  }
3541  
3542  /**
3543   * _scsih_fw_event_add - insert and queue up fw_event
3544   * @ioc: per adapter object
3545   * @fw_event: object describing the event
3546   * Context: This function will acquire ioc->fw_event_lock.
3547   *
3548   * This adds the firmware event object into link list, then queues it up to
3549   * be processed from user context.
3550   */
3551  static void
_scsih_fw_event_add(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3552  _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3553  {
3554  	unsigned long flags;
3555  
3556  	if (ioc->firmware_event_thread == NULL)
3557  		return;
3558  
3559  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3560  	fw_event_work_get(fw_event);
3561  	INIT_LIST_HEAD(&fw_event->list);
3562  	list_add_tail(&fw_event->list, &ioc->fw_event_list);
3563  	INIT_WORK(&fw_event->work, _firmware_event_work);
3564  	fw_event_work_get(fw_event);
3565  	queue_work(ioc->firmware_event_thread, &fw_event->work);
3566  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3567  }
3568  
3569  /**
3570   * _scsih_fw_event_del_from_list - delete fw_event from the list
3571   * @ioc: per adapter object
3572   * @fw_event: object describing the event
3573   * Context: This function will acquire ioc->fw_event_lock.
3574   *
3575   * If the fw_event is on the fw_event_list, remove it and do a put.
3576   */
3577  static void
_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)3578  _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3579  	*fw_event)
3580  {
3581  	unsigned long flags;
3582  
3583  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3584  	if (!list_empty(&fw_event->list)) {
3585  		list_del_init(&fw_event->list);
3586  		fw_event_work_put(fw_event);
3587  	}
3588  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3589  }
3590  
3591  
3592   /**
3593   * mpt3sas_send_trigger_data_event - send event for processing trigger data
3594   * @ioc: per adapter object
3595   * @event_data: trigger event data
3596   */
3597  void
mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER * ioc,struct SL_WH_TRIGGERS_EVENT_DATA_T * event_data)3598  mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3599  	struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3600  {
3601  	struct fw_event_work *fw_event;
3602  	u16 sz;
3603  
3604  	if (ioc->is_driver_loading)
3605  		return;
3606  	sz = sizeof(*event_data);
3607  	fw_event = alloc_fw_event_work(sz);
3608  	if (!fw_event)
3609  		return;
3610  	fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3611  	fw_event->ioc = ioc;
3612  	memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3613  	_scsih_fw_event_add(ioc, fw_event);
3614  	fw_event_work_put(fw_event);
3615  }
3616  
3617  /**
3618   * _scsih_error_recovery_delete_devices - remove devices not responding
3619   * @ioc: per adapter object
3620   */
3621  static void
_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER * ioc)3622  _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3623  {
3624  	struct fw_event_work *fw_event;
3625  
3626  	fw_event = alloc_fw_event_work(0);
3627  	if (!fw_event)
3628  		return;
3629  	fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3630  	fw_event->ioc = ioc;
3631  	_scsih_fw_event_add(ioc, fw_event);
3632  	fw_event_work_put(fw_event);
3633  }
3634  
3635  /**
3636   * mpt3sas_port_enable_complete - port enable completed (fake event)
3637   * @ioc: per adapter object
3638   */
3639  void
mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER * ioc)3640  mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3641  {
3642  	struct fw_event_work *fw_event;
3643  
3644  	fw_event = alloc_fw_event_work(0);
3645  	if (!fw_event)
3646  		return;
3647  	fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3648  	fw_event->ioc = ioc;
3649  	_scsih_fw_event_add(ioc, fw_event);
3650  	fw_event_work_put(fw_event);
3651  }
3652  
dequeue_next_fw_event(struct MPT3SAS_ADAPTER * ioc)3653  static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3654  {
3655  	unsigned long flags;
3656  	struct fw_event_work *fw_event = NULL;
3657  
3658  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
3659  	if (!list_empty(&ioc->fw_event_list)) {
3660  		fw_event = list_first_entry(&ioc->fw_event_list,
3661  				struct fw_event_work, list);
3662  		list_del_init(&fw_event->list);
3663  		fw_event_work_put(fw_event);
3664  	}
3665  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3666  
3667  	return fw_event;
3668  }
3669  
3670  /**
3671   * _scsih_fw_event_cleanup_queue - cleanup event queue
3672   * @ioc: per adapter object
3673   *
3674   * Walk the firmware event queue, either killing timers, or waiting
3675   * for outstanding events to complete
3676   *
3677   * Context: task, can sleep
3678   */
3679  static void
_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER * ioc)3680  _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3681  {
3682  	struct fw_event_work *fw_event;
3683  
3684  	if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
3685  	    !ioc->firmware_event_thread)
3686  		return;
3687  	/*
3688  	 * Set current running event as ignore, so that
3689  	 * current running event will exit quickly.
3690  	 * As diag reset has occurred it is of no use
3691  	 * to process remaining stale event data entries.
3692  	 */
3693  	if (ioc->shost_recovery && ioc->current_event)
3694  		ioc->current_event->ignore = 1;
3695  
3696  	ioc->fw_events_cleanup = 1;
3697  	while ((fw_event = dequeue_next_fw_event(ioc)) ||
3698  	     (fw_event = ioc->current_event)) {
3699  
3700  		/*
3701  		 * Don't call cancel_work_sync() for current_event
3702  		 * other than MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3703  		 * otherwise we may observe deadlock if current
3704  		 * hard reset issued as part of processing the current_event.
3705  		 *
3706  		 * Orginal logic of cleaning the current_event is added
3707  		 * for handling the back to back host reset issued by the user.
3708  		 * i.e. during back to back host reset, driver use to process
3709  		 * the two instances of MPT3SAS_REMOVE_UNRESPONDING_DEVICES
3710  		 * event back to back and this made the drives to unregister
3711  		 * the devices from SML.
3712  		 */
3713  
3714  		if (fw_event == ioc->current_event &&
3715  		    ioc->current_event->event !=
3716  		    MPT3SAS_REMOVE_UNRESPONDING_DEVICES) {
3717  			ioc->current_event = NULL;
3718  			continue;
3719  		}
3720  
3721  		/*
3722  		 * Driver has to clear ioc->start_scan flag when
3723  		 * it is cleaning up MPT3SAS_PORT_ENABLE_COMPLETE,
3724  		 * otherwise scsi_scan_host() API waits for the
3725  		 * 5 minute timer to expire. If we exit from
3726  		 * scsi_scan_host() early then we can issue the
3727  		 * new port enable request as part of current diag reset.
3728  		 */
3729  		if (fw_event->event == MPT3SAS_PORT_ENABLE_COMPLETE) {
3730  			ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
3731  			ioc->start_scan = 0;
3732  		}
3733  
3734  		/*
3735  		 * Wait on the fw_event to complete. If this returns 1, then
3736  		 * the event was never executed, and we need a put for the
3737  		 * reference the work had on the fw_event.
3738  		 *
3739  		 * If it did execute, we wait for it to finish, and the put will
3740  		 * happen from _firmware_event_work()
3741  		 */
3742  		if (cancel_work_sync(&fw_event->work))
3743  			fw_event_work_put(fw_event);
3744  
3745  	}
3746  	ioc->fw_events_cleanup = 0;
3747  }
3748  
3749  /**
3750   * _scsih_internal_device_block - block the sdev device
3751   * @sdev: per device object
3752   * @sas_device_priv_data : per device driver private data
3753   *
3754   * make sure device is blocked without error, if not
3755   * print an error
3756   */
3757  static void
_scsih_internal_device_block(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3758  _scsih_internal_device_block(struct scsi_device *sdev,
3759  			struct MPT3SAS_DEVICE *sas_device_priv_data)
3760  {
3761  	int r = 0;
3762  
3763  	sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3764  	    sas_device_priv_data->sas_target->handle);
3765  	sas_device_priv_data->block = 1;
3766  
3767  	r = scsi_internal_device_block_nowait(sdev);
3768  	if (r == -EINVAL)
3769  		sdev_printk(KERN_WARNING, sdev,
3770  		    "device_block failed with return(%d) for handle(0x%04x)\n",
3771  		    r, sas_device_priv_data->sas_target->handle);
3772  }
3773  
3774  /**
3775   * _scsih_internal_device_unblock - unblock the sdev device
3776   * @sdev: per device object
3777   * @sas_device_priv_data : per device driver private data
3778   * make sure device is unblocked without error, if not retry
3779   * by blocking and then unblocking
3780   */
3781  
3782  static void
_scsih_internal_device_unblock(struct scsi_device * sdev,struct MPT3SAS_DEVICE * sas_device_priv_data)3783  _scsih_internal_device_unblock(struct scsi_device *sdev,
3784  			struct MPT3SAS_DEVICE *sas_device_priv_data)
3785  {
3786  	int r = 0;
3787  
3788  	sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3789  	    "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3790  	sas_device_priv_data->block = 0;
3791  	r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3792  	if (r == -EINVAL) {
3793  		/* The device has been set to SDEV_RUNNING by SD layer during
3794  		 * device addition but the request queue is still stopped by
3795  		 * our earlier block call. We need to perform a block again
3796  		 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3797  
3798  		sdev_printk(KERN_WARNING, sdev,
3799  		    "device_unblock failed with return(%d) for handle(0x%04x) "
3800  		    "performing a block followed by an unblock\n",
3801  		    r, sas_device_priv_data->sas_target->handle);
3802  		sas_device_priv_data->block = 1;
3803  		r = scsi_internal_device_block_nowait(sdev);
3804  		if (r)
3805  			sdev_printk(KERN_WARNING, sdev, "retried device_block "
3806  			    "failed with return(%d) for handle(0x%04x)\n",
3807  			    r, sas_device_priv_data->sas_target->handle);
3808  
3809  		sas_device_priv_data->block = 0;
3810  		r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3811  		if (r)
3812  			sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3813  			    " failed with return(%d) for handle(0x%04x)\n",
3814  			    r, sas_device_priv_data->sas_target->handle);
3815  	}
3816  }
3817  
3818  /**
3819   * _scsih_ublock_io_all_device - unblock every device
3820   * @ioc: per adapter object
3821   *
3822   * change the device state from block to running
3823   */
3824  static void
_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER * ioc)3825  _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3826  {
3827  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3828  	struct scsi_device *sdev;
3829  
3830  	shost_for_each_device(sdev, ioc->shost) {
3831  		sas_device_priv_data = sdev->hostdata;
3832  		if (!sas_device_priv_data)
3833  			continue;
3834  		if (!sas_device_priv_data->block)
3835  			continue;
3836  
3837  		dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3838  			"device_running, handle(0x%04x)\n",
3839  		    sas_device_priv_data->sas_target->handle));
3840  		_scsih_internal_device_unblock(sdev, sas_device_priv_data);
3841  	}
3842  }
3843  
3844  
3845  /**
3846   * _scsih_ublock_io_device - prepare device to be deleted
3847   * @ioc: per adapter object
3848   * @sas_address: sas address
3849   * @port: hba port entry
3850   *
3851   * unblock then put device in offline state
3852   */
3853  static void
_scsih_ublock_io_device(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)3854  _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc,
3855  	u64 sas_address, struct hba_port *port)
3856  {
3857  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3858  	struct scsi_device *sdev;
3859  
3860  	shost_for_each_device(sdev, ioc->shost) {
3861  		sas_device_priv_data = sdev->hostdata;
3862  		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
3863  			continue;
3864  		if (sas_device_priv_data->sas_target->sas_address
3865  		    != sas_address)
3866  			continue;
3867  		if (sas_device_priv_data->sas_target->port != port)
3868  			continue;
3869  		if (sas_device_priv_data->block)
3870  			_scsih_internal_device_unblock(sdev,
3871  				sas_device_priv_data);
3872  	}
3873  }
3874  
3875  /**
3876   * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3877   * @ioc: per adapter object
3878   *
3879   * During device pull we need to appropriately set the sdev state.
3880   */
3881  static void
_scsih_block_io_all_device(struct MPT3SAS_ADAPTER * ioc)3882  _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3883  {
3884  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3885  	struct scsi_device *sdev;
3886  
3887  	shost_for_each_device(sdev, ioc->shost) {
3888  		sas_device_priv_data = sdev->hostdata;
3889  		if (!sas_device_priv_data)
3890  			continue;
3891  		if (sas_device_priv_data->block)
3892  			continue;
3893  		if (sas_device_priv_data->ignore_delay_remove) {
3894  			sdev_printk(KERN_INFO, sdev,
3895  			"%s skip device_block for SES handle(0x%04x)\n",
3896  			__func__, sas_device_priv_data->sas_target->handle);
3897  			continue;
3898  		}
3899  		_scsih_internal_device_block(sdev, sas_device_priv_data);
3900  	}
3901  }
3902  
3903  /**
3904   * _scsih_block_io_device - set the device state to SDEV_BLOCK
3905   * @ioc: per adapter object
3906   * @handle: device handle
3907   *
3908   * During device pull we need to appropriately set the sdev state.
3909   */
3910  static void
_scsih_block_io_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)3911  _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3912  {
3913  	struct MPT3SAS_DEVICE *sas_device_priv_data;
3914  	struct scsi_device *sdev;
3915  	struct _sas_device *sas_device;
3916  
3917  	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3918  
3919  	shost_for_each_device(sdev, ioc->shost) {
3920  		sas_device_priv_data = sdev->hostdata;
3921  		if (!sas_device_priv_data)
3922  			continue;
3923  		if (sas_device_priv_data->sas_target->handle != handle)
3924  			continue;
3925  		if (sas_device_priv_data->block)
3926  			continue;
3927  		if (sas_device && sas_device->pend_sas_rphy_add)
3928  			continue;
3929  		if (sas_device_priv_data->ignore_delay_remove) {
3930  			sdev_printk(KERN_INFO, sdev,
3931  			"%s skip device_block for SES handle(0x%04x)\n",
3932  			__func__, sas_device_priv_data->sas_target->handle);
3933  			continue;
3934  		}
3935  		_scsih_internal_device_block(sdev, sas_device_priv_data);
3936  	}
3937  
3938  	if (sas_device)
3939  		sas_device_put(sas_device);
3940  }
3941  
3942  /**
3943   * _scsih_block_io_to_children_attached_to_ex
3944   * @ioc: per adapter object
3945   * @sas_expander: the sas_device object
3946   *
3947   * This routine set sdev state to SDEV_BLOCK for all devices
3948   * attached to this expander. This function called when expander is
3949   * pulled.
3950   */
3951  static void
_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)3952  _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3953  	struct _sas_node *sas_expander)
3954  {
3955  	struct _sas_port *mpt3sas_port;
3956  	struct _sas_device *sas_device;
3957  	struct _sas_node *expander_sibling;
3958  	unsigned long flags;
3959  
3960  	if (!sas_expander)
3961  		return;
3962  
3963  	list_for_each_entry(mpt3sas_port,
3964  	   &sas_expander->sas_port_list, port_list) {
3965  		if (mpt3sas_port->remote_identify.device_type ==
3966  		    SAS_END_DEVICE) {
3967  			spin_lock_irqsave(&ioc->sas_device_lock, flags);
3968  			sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3969  			    mpt3sas_port->remote_identify.sas_address,
3970  			    mpt3sas_port->hba_port);
3971  			if (sas_device) {
3972  				set_bit(sas_device->handle,
3973  						ioc->blocking_handles);
3974  				sas_device_put(sas_device);
3975  			}
3976  			spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3977  		}
3978  	}
3979  
3980  	list_for_each_entry(mpt3sas_port,
3981  	   &sas_expander->sas_port_list, port_list) {
3982  
3983  		if (mpt3sas_port->remote_identify.device_type ==
3984  		    SAS_EDGE_EXPANDER_DEVICE ||
3985  		    mpt3sas_port->remote_identify.device_type ==
3986  		    SAS_FANOUT_EXPANDER_DEVICE) {
3987  			expander_sibling =
3988  			    mpt3sas_scsih_expander_find_by_sas_address(
3989  			    ioc, mpt3sas_port->remote_identify.sas_address,
3990  			    mpt3sas_port->hba_port);
3991  			_scsih_block_io_to_children_attached_to_ex(ioc,
3992  			    expander_sibling);
3993  		}
3994  	}
3995  }
3996  
3997  /**
3998   * _scsih_block_io_to_children_attached_directly
3999   * @ioc: per adapter object
4000   * @event_data: topology change event data
4001   *
4002   * This routine set sdev state to SDEV_BLOCK for all devices
4003   * direct attached during device pull.
4004   */
4005  static void
_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4006  _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4007  	Mpi2EventDataSasTopologyChangeList_t *event_data)
4008  {
4009  	int i;
4010  	u16 handle;
4011  	u16 reason_code;
4012  
4013  	for (i = 0; i < event_data->NumEntries; i++) {
4014  		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4015  		if (!handle)
4016  			continue;
4017  		reason_code = event_data->PHY[i].PhyStatus &
4018  		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4019  		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
4020  			_scsih_block_io_device(ioc, handle);
4021  	}
4022  }
4023  
4024  /**
4025   * _scsih_block_io_to_pcie_children_attached_directly
4026   * @ioc: per adapter object
4027   * @event_data: topology change event data
4028   *
4029   * This routine set sdev state to SDEV_BLOCK for all devices
4030   * direct attached during device pull/reconnect.
4031   */
4032  static void
_scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4033  _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
4034  		Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4035  {
4036  	int i;
4037  	u16 handle;
4038  	u16 reason_code;
4039  
4040  	for (i = 0; i < event_data->NumEntries; i++) {
4041  		handle =
4042  			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4043  		if (!handle)
4044  			continue;
4045  		reason_code = event_data->PortEntry[i].PortStatus;
4046  		if (reason_code ==
4047  				MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
4048  			_scsih_block_io_device(ioc, handle);
4049  	}
4050  }
4051  /**
4052   * _scsih_tm_tr_send - send task management request
4053   * @ioc: per adapter object
4054   * @handle: device handle
4055   * Context: interrupt time.
4056   *
4057   * This code is to initiate the device removal handshake protocol
4058   * with controller firmware.  This function will issue target reset
4059   * using high priority request queue.  It will send a sas iounit
4060   * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
4061   *
4062   * This is designed to send muliple task management request at the same
4063   * time to the fifo. If the fifo is full, we will append the request,
4064   * and process it in a future completion.
4065   */
4066  static void
_scsih_tm_tr_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4067  _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4068  {
4069  	Mpi2SCSITaskManagementRequest_t *mpi_request;
4070  	u16 smid;
4071  	struct _sas_device *sas_device = NULL;
4072  	struct _pcie_device *pcie_device = NULL;
4073  	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
4074  	u64 sas_address = 0;
4075  	unsigned long flags;
4076  	struct _tr_list *delayed_tr;
4077  	u32 ioc_state;
4078  	u8 tr_method = 0;
4079  	struct hba_port *port = NULL;
4080  
4081  	if (ioc->pci_error_recovery) {
4082  		dewtprintk(ioc,
4083  			   ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
4084  				    __func__, handle));
4085  		return;
4086  	}
4087  	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4088  	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4089  		dewtprintk(ioc,
4090  			   ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
4091  				    __func__, handle));
4092  		return;
4093  	}
4094  
4095  	/* if PD, then return */
4096  	if (test_bit(handle, ioc->pd_handles))
4097  		return;
4098  
4099  	clear_bit(handle, ioc->pend_os_device_add);
4100  
4101  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
4102  	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
4103  	if (sas_device && sas_device->starget &&
4104  	    sas_device->starget->hostdata) {
4105  		sas_target_priv_data = sas_device->starget->hostdata;
4106  		sas_target_priv_data->deleted = 1;
4107  		sas_address = sas_device->sas_address;
4108  		port = sas_device->port;
4109  	}
4110  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
4111  	if (!sas_device) {
4112  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
4113  		pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
4114  		if (pcie_device && pcie_device->starget &&
4115  			pcie_device->starget->hostdata) {
4116  			sas_target_priv_data = pcie_device->starget->hostdata;
4117  			sas_target_priv_data->deleted = 1;
4118  			sas_address = pcie_device->wwid;
4119  		}
4120  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
4121  		if (pcie_device && (!ioc->tm_custom_handling) &&
4122  		    (!(mpt3sas_scsih_is_pcie_scsi_device(
4123  		    pcie_device->device_info))))
4124  			tr_method =
4125  			    MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
4126  		else
4127  			tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
4128  	}
4129  	if (sas_target_priv_data) {
4130  		dewtprintk(ioc,
4131  			   ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
4132  				    handle, (u64)sas_address));
4133  		if (sas_device) {
4134  			if (sas_device->enclosure_handle != 0)
4135  				dewtprintk(ioc,
4136  					   ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
4137  						    (u64)sas_device->enclosure_logical_id,
4138  						    sas_device->slot));
4139  			if (sas_device->connector_name[0] != '\0')
4140  				dewtprintk(ioc,
4141  					   ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
4142  						    sas_device->enclosure_level,
4143  						    sas_device->connector_name));
4144  		} else if (pcie_device) {
4145  			if (pcie_device->enclosure_handle != 0)
4146  				dewtprintk(ioc,
4147  					   ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
4148  						    (u64)pcie_device->enclosure_logical_id,
4149  						    pcie_device->slot));
4150  			if (pcie_device->connector_name[0] != '\0')
4151  				dewtprintk(ioc,
4152  					   ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
4153  						    pcie_device->enclosure_level,
4154  						    pcie_device->connector_name));
4155  		}
4156  		_scsih_ublock_io_device(ioc, sas_address, port);
4157  		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
4158  	}
4159  
4160  	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
4161  	if (!smid) {
4162  		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4163  		if (!delayed_tr)
4164  			goto out;
4165  		INIT_LIST_HEAD(&delayed_tr->list);
4166  		delayed_tr->handle = handle;
4167  		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4168  		dewtprintk(ioc,
4169  			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4170  				    handle));
4171  		goto out;
4172  	}
4173  
4174  	dewtprintk(ioc,
4175  		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4176  			    handle, smid, ioc->tm_tr_cb_idx));
4177  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4178  	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4179  	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4180  	mpi_request->DevHandle = cpu_to_le16(handle);
4181  	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4182  	mpi_request->MsgFlags = tr_method;
4183  	set_bit(handle, ioc->device_remove_in_progress);
4184  	ioc->put_smid_hi_priority(ioc, smid, 0);
4185  	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
4186  
4187  out:
4188  	if (sas_device)
4189  		sas_device_put(sas_device);
4190  	if (pcie_device)
4191  		pcie_device_put(pcie_device);
4192  }
4193  
4194  /**
4195   * _scsih_tm_tr_complete -
4196   * @ioc: per adapter object
4197   * @smid: system request message index
4198   * @msix_index: MSIX table index supplied by the OS
4199   * @reply: reply message frame(lower 32bit addr)
4200   * Context: interrupt time.
4201   *
4202   * This is the target reset completion routine.
4203   * This code is part of the code to initiate the device removal
4204   * handshake protocol with controller firmware.
4205   * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
4206   *
4207   * Return: 1 meaning mf should be freed from _base_interrupt
4208   *         0 means the mf is freed from this function.
4209   */
4210  static u8
_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4211  _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
4212  	u32 reply)
4213  {
4214  	u16 handle;
4215  	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4216  	Mpi2SCSITaskManagementReply_t *mpi_reply =
4217  	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4218  	Mpi2SasIoUnitControlRequest_t *mpi_request;
4219  	u16 smid_sas_ctrl;
4220  	u32 ioc_state;
4221  	struct _sc_list *delayed_sc;
4222  
4223  	if (ioc->pci_error_recovery) {
4224  		dewtprintk(ioc,
4225  			   ioc_info(ioc, "%s: host in pci error recovery\n",
4226  				    __func__));
4227  		return 1;
4228  	}
4229  	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4230  	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4231  		dewtprintk(ioc,
4232  			   ioc_info(ioc, "%s: host is not operational\n",
4233  				    __func__));
4234  		return 1;
4235  	}
4236  	if (unlikely(!mpi_reply)) {
4237  		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4238  			__FILE__, __LINE__, __func__);
4239  		return 1;
4240  	}
4241  	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4242  	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4243  	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4244  		dewtprintk(ioc,
4245  			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4246  				   handle,
4247  				   le16_to_cpu(mpi_reply->DevHandle), smid));
4248  		return 0;
4249  	}
4250  
4251  	mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
4252  	dewtprintk(ioc,
4253  		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4254  			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4255  			    le32_to_cpu(mpi_reply->IOCLogInfo),
4256  			    le32_to_cpu(mpi_reply->TerminationCount)));
4257  
4258  	smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
4259  	if (!smid_sas_ctrl) {
4260  		delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
4261  		if (!delayed_sc)
4262  			return _scsih_check_for_pending_tm(ioc, smid);
4263  		INIT_LIST_HEAD(&delayed_sc->list);
4264  		delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
4265  		list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
4266  		dewtprintk(ioc,
4267  			   ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
4268  				    handle));
4269  		return _scsih_check_for_pending_tm(ioc, smid);
4270  	}
4271  
4272  	dewtprintk(ioc,
4273  		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4274  			    handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
4275  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
4276  	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4277  	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4278  	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4279  	mpi_request->DevHandle = mpi_request_tm->DevHandle;
4280  	ioc->put_smid_default(ioc, smid_sas_ctrl);
4281  
4282  	return _scsih_check_for_pending_tm(ioc, smid);
4283  }
4284  
4285  /** _scsih_allow_scmd_to_device - check whether scmd needs to
4286   *				 issue to IOC or not.
4287   * @ioc: per adapter object
4288   * @scmd: pointer to scsi command object
4289   *
4290   * Returns true if scmd can be issued to IOC otherwise returns false.
4291   */
_scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)4292  inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
4293  	struct scsi_cmnd *scmd)
4294  {
4295  
4296  	if (ioc->pci_error_recovery)
4297  		return false;
4298  
4299  	if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
4300  		if (ioc->remove_host)
4301  			return false;
4302  
4303  		return true;
4304  	}
4305  
4306  	if (ioc->remove_host) {
4307  
4308  		switch (scmd->cmnd[0]) {
4309  		case SYNCHRONIZE_CACHE:
4310  		case START_STOP:
4311  			return true;
4312  		default:
4313  			return false;
4314  		}
4315  	}
4316  
4317  	return true;
4318  }
4319  
4320  /**
4321   * _scsih_sas_control_complete - completion routine
4322   * @ioc: per adapter object
4323   * @smid: system request message index
4324   * @msix_index: MSIX table index supplied by the OS
4325   * @reply: reply message frame(lower 32bit addr)
4326   * Context: interrupt time.
4327   *
4328   * This is the sas iounit control completion routine.
4329   * This code is part of the code to initiate the device removal
4330   * handshake protocol with controller firmware.
4331   *
4332   * Return: 1 meaning mf should be freed from _base_interrupt
4333   *         0 means the mf is freed from this function.
4334   */
4335  static u8
_scsih_sas_control_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4336  _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4337  	u8 msix_index, u32 reply)
4338  {
4339  	Mpi2SasIoUnitControlReply_t *mpi_reply =
4340  	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4341  
4342  	if (likely(mpi_reply)) {
4343  		dewtprintk(ioc,
4344  			   ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
4345  				    le16_to_cpu(mpi_reply->DevHandle), smid,
4346  				    le16_to_cpu(mpi_reply->IOCStatus),
4347  				    le32_to_cpu(mpi_reply->IOCLogInfo)));
4348  		if (le16_to_cpu(mpi_reply->IOCStatus) ==
4349  		     MPI2_IOCSTATUS_SUCCESS) {
4350  			clear_bit(le16_to_cpu(mpi_reply->DevHandle),
4351  			    ioc->device_remove_in_progress);
4352  		}
4353  	} else {
4354  		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4355  			__FILE__, __LINE__, __func__);
4356  	}
4357  	return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
4358  }
4359  
4360  /**
4361   * _scsih_tm_tr_volume_send - send target reset request for volumes
4362   * @ioc: per adapter object
4363   * @handle: device handle
4364   * Context: interrupt time.
4365   *
4366   * This is designed to send muliple task management request at the same
4367   * time to the fifo. If the fifo is full, we will append the request,
4368   * and process it in a future completion.
4369   */
4370  static void
_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER * ioc,u16 handle)4371  _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4372  {
4373  	Mpi2SCSITaskManagementRequest_t *mpi_request;
4374  	u16 smid;
4375  	struct _tr_list *delayed_tr;
4376  
4377  	if (ioc->pci_error_recovery) {
4378  		dewtprintk(ioc,
4379  			   ioc_info(ioc, "%s: host reset in progress!\n",
4380  				    __func__));
4381  		return;
4382  	}
4383  
4384  	smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
4385  	if (!smid) {
4386  		delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4387  		if (!delayed_tr)
4388  			return;
4389  		INIT_LIST_HEAD(&delayed_tr->list);
4390  		delayed_tr->handle = handle;
4391  		list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
4392  		dewtprintk(ioc,
4393  			   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4394  				    handle));
4395  		return;
4396  	}
4397  
4398  	dewtprintk(ioc,
4399  		   ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4400  			    handle, smid, ioc->tm_tr_volume_cb_idx));
4401  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4402  	memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
4403  	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4404  	mpi_request->DevHandle = cpu_to_le16(handle);
4405  	mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
4406  	ioc->put_smid_hi_priority(ioc, smid, 0);
4407  }
4408  
4409  /**
4410   * _scsih_tm_volume_tr_complete - target reset completion
4411   * @ioc: per adapter object
4412   * @smid: system request message index
4413   * @msix_index: MSIX table index supplied by the OS
4414   * @reply: reply message frame(lower 32bit addr)
4415   * Context: interrupt time.
4416   *
4417   * Return: 1 meaning mf should be freed from _base_interrupt
4418   *         0 means the mf is freed from this function.
4419   */
4420  static u8
_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)4421  _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4422  	u8 msix_index, u32 reply)
4423  {
4424  	u16 handle;
4425  	Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
4426  	Mpi2SCSITaskManagementReply_t *mpi_reply =
4427  	    mpt3sas_base_get_reply_virt_addr(ioc, reply);
4428  
4429  	if (ioc->shost_recovery || ioc->pci_error_recovery) {
4430  		dewtprintk(ioc,
4431  			   ioc_info(ioc, "%s: host reset in progress!\n",
4432  				    __func__));
4433  		return 1;
4434  	}
4435  	if (unlikely(!mpi_reply)) {
4436  		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
4437  			__FILE__, __LINE__, __func__);
4438  		return 1;
4439  	}
4440  
4441  	mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
4442  	handle = le16_to_cpu(mpi_request_tm->DevHandle);
4443  	if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
4444  		dewtprintk(ioc,
4445  			   ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
4446  				   handle, le16_to_cpu(mpi_reply->DevHandle),
4447  				   smid));
4448  		return 0;
4449  	}
4450  
4451  	dewtprintk(ioc,
4452  		   ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
4453  			    handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
4454  			    le32_to_cpu(mpi_reply->IOCLogInfo),
4455  			    le32_to_cpu(mpi_reply->TerminationCount)));
4456  
4457  	return _scsih_check_for_pending_tm(ioc, smid);
4458  }
4459  
4460  /**
4461   * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
4462   * @ioc: per adapter object
4463   * @smid: system request message index
4464   * @event: Event ID
4465   * @event_context: used to track events uniquely
4466   *
4467   * Context - processed in interrupt context.
4468   */
4469  static void
_scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER * ioc,u16 smid,U16 event,U32 event_context)4470  _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
4471  				U32 event_context)
4472  {
4473  	Mpi2EventAckRequest_t *ack_request;
4474  	int i = smid - ioc->internal_smid;
4475  	unsigned long flags;
4476  
4477  	/* Without releasing the smid just update the
4478  	 * call back index and reuse the same smid for
4479  	 * processing this delayed request
4480  	 */
4481  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4482  	ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
4483  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4484  
4485  	dewtprintk(ioc,
4486  		   ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
4487  			    le16_to_cpu(event), smid, ioc->base_cb_idx));
4488  	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
4489  	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
4490  	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
4491  	ack_request->Event = event;
4492  	ack_request->EventContext = event_context;
4493  	ack_request->VF_ID = 0;  /* TODO */
4494  	ack_request->VP_ID = 0;
4495  	ioc->put_smid_default(ioc, smid);
4496  }
4497  
4498  /**
4499   * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
4500   *				sas_io_unit_ctrl messages
4501   * @ioc: per adapter object
4502   * @smid: system request message index
4503   * @handle: device handle
4504   *
4505   * Context - processed in interrupt context.
4506   */
4507  static void
_scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4508  _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
4509  					u16 smid, u16 handle)
4510  {
4511  	Mpi2SasIoUnitControlRequest_t *mpi_request;
4512  	u32 ioc_state;
4513  	int i = smid - ioc->internal_smid;
4514  	unsigned long flags;
4515  
4516  	if (ioc->remove_host) {
4517  		dewtprintk(ioc,
4518  			   ioc_info(ioc, "%s: host has been removed\n",
4519  				    __func__));
4520  		return;
4521  	} else if (ioc->pci_error_recovery) {
4522  		dewtprintk(ioc,
4523  			   ioc_info(ioc, "%s: host in pci error recovery\n",
4524  				    __func__));
4525  		return;
4526  	}
4527  	ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4528  	if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4529  		dewtprintk(ioc,
4530  			   ioc_info(ioc, "%s: host is not operational\n",
4531  				    __func__));
4532  		return;
4533  	}
4534  
4535  	/* Without releasing the smid just update the
4536  	 * call back index and reuse the same smid for
4537  	 * processing this delayed request
4538  	 */
4539  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4540  	ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4541  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4542  
4543  	dewtprintk(ioc,
4544  		   ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4545  			    handle, smid, ioc->tm_sas_control_cb_idx));
4546  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4547  	memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4548  	mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4549  	mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4550  	mpi_request->DevHandle = cpu_to_le16(handle);
4551  	ioc->put_smid_default(ioc, smid);
4552  }
4553  
4554  /**
4555   * mpt3sas_check_for_pending_internal_cmds - check for pending internal messages
4556   * @ioc: per adapter object
4557   * @smid: system request message index
4558   *
4559   * Context: Executed in interrupt context
4560   *
4561   * This will check delayed internal messages list, and process the
4562   * next request.
4563   *
4564   * Return: 1 meaning mf should be freed from _base_interrupt
4565   *         0 means the mf is freed from this function.
4566   */
4567  u8
mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER * ioc,u16 smid)4568  mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4569  {
4570  	struct _sc_list *delayed_sc;
4571  	struct _event_ack_list *delayed_event_ack;
4572  
4573  	if (!list_empty(&ioc->delayed_event_ack_list)) {
4574  		delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4575  						struct _event_ack_list, list);
4576  		_scsih_issue_delayed_event_ack(ioc, smid,
4577  		  delayed_event_ack->Event, delayed_event_ack->EventContext);
4578  		list_del(&delayed_event_ack->list);
4579  		kfree(delayed_event_ack);
4580  		return 0;
4581  	}
4582  
4583  	if (!list_empty(&ioc->delayed_sc_list)) {
4584  		delayed_sc = list_entry(ioc->delayed_sc_list.next,
4585  						struct _sc_list, list);
4586  		_scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4587  						 delayed_sc->handle);
4588  		list_del(&delayed_sc->list);
4589  		kfree(delayed_sc);
4590  		return 0;
4591  	}
4592  	return 1;
4593  }
4594  
4595  /**
4596   * _scsih_check_for_pending_tm - check for pending task management
4597   * @ioc: per adapter object
4598   * @smid: system request message index
4599   *
4600   * This will check delayed target reset list, and feed the
4601   * next reqeust.
4602   *
4603   * Return: 1 meaning mf should be freed from _base_interrupt
4604   *         0 means the mf is freed from this function.
4605   */
4606  static u8
_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER * ioc,u16 smid)4607  _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4608  {
4609  	struct _tr_list *delayed_tr;
4610  
4611  	if (!list_empty(&ioc->delayed_tr_volume_list)) {
4612  		delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4613  		    struct _tr_list, list);
4614  		mpt3sas_base_free_smid(ioc, smid);
4615  		_scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4616  		list_del(&delayed_tr->list);
4617  		kfree(delayed_tr);
4618  		return 0;
4619  	}
4620  
4621  	if (!list_empty(&ioc->delayed_tr_list)) {
4622  		delayed_tr = list_entry(ioc->delayed_tr_list.next,
4623  		    struct _tr_list, list);
4624  		mpt3sas_base_free_smid(ioc, smid);
4625  		_scsih_tm_tr_send(ioc, delayed_tr->handle);
4626  		list_del(&delayed_tr->list);
4627  		kfree(delayed_tr);
4628  		return 0;
4629  	}
4630  
4631  	return 1;
4632  }
4633  
4634  /**
4635   * _scsih_check_topo_delete_events - sanity check on topo events
4636   * @ioc: per adapter object
4637   * @event_data: the event data payload
4638   *
4639   * This routine added to better handle cable breaker.
4640   *
4641   * This handles the case where driver receives multiple expander
4642   * add and delete events in a single shot.  When there is a delete event
4643   * the routine will void any pending add events waiting in the event queue.
4644   */
4645  static void
_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)4646  _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4647  	Mpi2EventDataSasTopologyChangeList_t *event_data)
4648  {
4649  	struct fw_event_work *fw_event;
4650  	Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4651  	u16 expander_handle;
4652  	struct _sas_node *sas_expander;
4653  	unsigned long flags;
4654  	int i, reason_code;
4655  	u16 handle;
4656  
4657  	for (i = 0 ; i < event_data->NumEntries; i++) {
4658  		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4659  		if (!handle)
4660  			continue;
4661  		reason_code = event_data->PHY[i].PhyStatus &
4662  		    MPI2_EVENT_SAS_TOPO_RC_MASK;
4663  		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4664  			_scsih_tm_tr_send(ioc, handle);
4665  	}
4666  
4667  	expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4668  	if (expander_handle < ioc->sas_hba.num_phys) {
4669  		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4670  		return;
4671  	}
4672  	if (event_data->ExpStatus ==
4673  	    MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4674  		/* put expander attached devices into blocking state */
4675  		spin_lock_irqsave(&ioc->sas_node_lock, flags);
4676  		sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4677  		    expander_handle);
4678  		_scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4679  		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4680  		do {
4681  			handle = find_first_bit(ioc->blocking_handles,
4682  			    ioc->facts.MaxDevHandle);
4683  			if (handle < ioc->facts.MaxDevHandle)
4684  				_scsih_block_io_device(ioc, handle);
4685  		} while (test_and_clear_bit(handle, ioc->blocking_handles));
4686  	} else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4687  		_scsih_block_io_to_children_attached_directly(ioc, event_data);
4688  
4689  	if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4690  		return;
4691  
4692  	/* mark ignore flag for pending events */
4693  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4694  	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4695  		if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4696  		    fw_event->ignore)
4697  			continue;
4698  		local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4699  				   fw_event->event_data;
4700  		if (local_event_data->ExpStatus ==
4701  		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4702  		    local_event_data->ExpStatus ==
4703  		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4704  			if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4705  			    expander_handle) {
4706  				dewtprintk(ioc,
4707  					   ioc_info(ioc, "setting ignoring flag\n"));
4708  				fw_event->ignore = 1;
4709  			}
4710  		}
4711  	}
4712  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4713  }
4714  
4715  /**
4716   * _scsih_check_pcie_topo_remove_events - sanity check on topo
4717   * events
4718   * @ioc: per adapter object
4719   * @event_data: the event data payload
4720   *
4721   * This handles the case where driver receives multiple switch
4722   * or device add and delete events in a single shot.  When there
4723   * is a delete event the routine will void any pending add
4724   * events waiting in the event queue.
4725   */
4726  static void
_scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)4727  _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4728  	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4729  {
4730  	struct fw_event_work *fw_event;
4731  	Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4732  	unsigned long flags;
4733  	int i, reason_code;
4734  	u16 handle, switch_handle;
4735  
4736  	for (i = 0; i < event_data->NumEntries; i++) {
4737  		handle =
4738  			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4739  		if (!handle)
4740  			continue;
4741  		reason_code = event_data->PortEntry[i].PortStatus;
4742  		if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4743  			_scsih_tm_tr_send(ioc, handle);
4744  	}
4745  
4746  	switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4747  	if (!switch_handle) {
4748  		_scsih_block_io_to_pcie_children_attached_directly(
4749  							ioc, event_data);
4750  		return;
4751  	}
4752      /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4753  	if ((event_data->SwitchStatus
4754  		== MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4755  		(event_data->SwitchStatus ==
4756  					MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4757  		_scsih_block_io_to_pcie_children_attached_directly(
4758  							ioc, event_data);
4759  
4760  	if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4761  		return;
4762  
4763  	/* mark ignore flag for pending events */
4764  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
4765  	list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4766  		if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4767  			fw_event->ignore)
4768  			continue;
4769  		local_event_data =
4770  			(Mpi26EventDataPCIeTopologyChangeList_t *)
4771  			fw_event->event_data;
4772  		if (local_event_data->SwitchStatus ==
4773  		    MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4774  		    local_event_data->SwitchStatus ==
4775  		    MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4776  			if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4777  				switch_handle) {
4778  				dewtprintk(ioc,
4779  					   ioc_info(ioc, "setting ignoring flag for switch event\n"));
4780  				fw_event->ignore = 1;
4781  			}
4782  		}
4783  	}
4784  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4785  }
4786  
4787  /**
4788   * _scsih_set_volume_delete_flag - setting volume delete flag
4789   * @ioc: per adapter object
4790   * @handle: device handle
4791   *
4792   * This returns nothing.
4793   */
4794  static void
_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER * ioc,u16 handle)4795  _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4796  {
4797  	struct _raid_device *raid_device;
4798  	struct MPT3SAS_TARGET *sas_target_priv_data;
4799  	unsigned long flags;
4800  
4801  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
4802  	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4803  	if (raid_device && raid_device->starget &&
4804  	    raid_device->starget->hostdata) {
4805  		sas_target_priv_data =
4806  		    raid_device->starget->hostdata;
4807  		sas_target_priv_data->deleted = 1;
4808  		dewtprintk(ioc,
4809  			   ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4810  				    handle, (u64)raid_device->wwid));
4811  	}
4812  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4813  }
4814  
4815  /**
4816   * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4817   * @handle: input handle
4818   * @a: handle for volume a
4819   * @b: handle for volume b
4820   *
4821   * IR firmware only supports two raid volumes.  The purpose of this
4822   * routine is to set the volume handle in either a or b. When the given
4823   * input handle is non-zero, or when a and b have not been set before.
4824   */
4825  static void
_scsih_set_volume_handle_for_tr(u16 handle,u16 * a,u16 * b)4826  _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4827  {
4828  	if (!handle || handle == *a || handle == *b)
4829  		return;
4830  	if (!*a)
4831  		*a = handle;
4832  	else if (!*b)
4833  		*b = handle;
4834  }
4835  
4836  /**
4837   * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4838   * @ioc: per adapter object
4839   * @event_data: the event data payload
4840   * Context: interrupt time.
4841   *
4842   * This routine will send target reset to volume, followed by target
4843   * resets to the PDs. This is called when a PD has been removed, or
4844   * volume has been deleted or removed. When the target reset is sent
4845   * to volume, the PD target resets need to be queued to start upon
4846   * completion of the volume target reset.
4847   */
4848  static void
_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)4849  _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4850  	Mpi2EventDataIrConfigChangeList_t *event_data)
4851  {
4852  	Mpi2EventIrConfigElement_t *element;
4853  	int i;
4854  	u16 handle, volume_handle, a, b;
4855  	struct _tr_list *delayed_tr;
4856  
4857  	a = 0;
4858  	b = 0;
4859  
4860  	if (ioc->is_warpdrive)
4861  		return;
4862  
4863  	/* Volume Resets for Deleted or Removed */
4864  	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4865  	for (i = 0; i < event_data->NumElements; i++, element++) {
4866  		if (le32_to_cpu(event_data->Flags) &
4867  		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4868  			continue;
4869  		if (element->ReasonCode ==
4870  		    MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4871  		    element->ReasonCode ==
4872  		    MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4873  			volume_handle = le16_to_cpu(element->VolDevHandle);
4874  			_scsih_set_volume_delete_flag(ioc, volume_handle);
4875  			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4876  		}
4877  	}
4878  
4879  	/* Volume Resets for UNHIDE events */
4880  	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4881  	for (i = 0; i < event_data->NumElements; i++, element++) {
4882  		if (le32_to_cpu(event_data->Flags) &
4883  		    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4884  			continue;
4885  		if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4886  			volume_handle = le16_to_cpu(element->VolDevHandle);
4887  			_scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4888  		}
4889  	}
4890  
4891  	if (a)
4892  		_scsih_tm_tr_volume_send(ioc, a);
4893  	if (b)
4894  		_scsih_tm_tr_volume_send(ioc, b);
4895  
4896  	/* PD target resets */
4897  	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4898  	for (i = 0; i < event_data->NumElements; i++, element++) {
4899  		if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4900  			continue;
4901  		handle = le16_to_cpu(element->PhysDiskDevHandle);
4902  		volume_handle = le16_to_cpu(element->VolDevHandle);
4903  		clear_bit(handle, ioc->pd_handles);
4904  		if (!volume_handle)
4905  			_scsih_tm_tr_send(ioc, handle);
4906  		else if (volume_handle == a || volume_handle == b) {
4907  			delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4908  			BUG_ON(!delayed_tr);
4909  			INIT_LIST_HEAD(&delayed_tr->list);
4910  			delayed_tr->handle = handle;
4911  			list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4912  			dewtprintk(ioc,
4913  				   ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4914  					    handle));
4915  		} else
4916  			_scsih_tm_tr_send(ioc, handle);
4917  	}
4918  }
4919  
4920  
4921  /**
4922   * _scsih_check_volume_delete_events - set delete flag for volumes
4923   * @ioc: per adapter object
4924   * @event_data: the event data payload
4925   * Context: interrupt time.
4926   *
4927   * This will handle the case when the cable connected to entire volume is
4928   * pulled. We will take care of setting the deleted flag so normal IO will
4929   * not be sent.
4930   */
4931  static void
_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrVolume_t * event_data)4932  _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4933  	Mpi2EventDataIrVolume_t *event_data)
4934  {
4935  	u32 state;
4936  
4937  	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4938  		return;
4939  	state = le32_to_cpu(event_data->NewValue);
4940  	if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4941  	    MPI2_RAID_VOL_STATE_FAILED)
4942  		_scsih_set_volume_delete_flag(ioc,
4943  		    le16_to_cpu(event_data->VolDevHandle));
4944  }
4945  
4946  /**
4947   * _scsih_temp_threshold_events - display temperature threshold exceeded events
4948   * @ioc: per adapter object
4949   * @event_data: the temp threshold event data
4950   * Context: interrupt time.
4951   */
4952  static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataTemperature_t * event_data)4953  _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4954  	Mpi2EventDataTemperature_t *event_data)
4955  {
4956  	u32 doorbell;
4957  	if (ioc->temp_sensors_count >= event_data->SensorNum) {
4958  		ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4959  			le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4960  			le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4961  			le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4962  			le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4963  			event_data->SensorNum);
4964  		ioc_err(ioc, "Current Temp In Celsius: %d\n",
4965  			event_data->CurrentTemperature);
4966  		if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4967  			doorbell = mpt3sas_base_get_iocstate(ioc, 0);
4968  			if ((doorbell & MPI2_IOC_STATE_MASK) ==
4969  			    MPI2_IOC_STATE_FAULT) {
4970  				mpt3sas_print_fault_code(ioc,
4971  				    doorbell & MPI2_DOORBELL_DATA_MASK);
4972  			} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
4973  			    MPI2_IOC_STATE_COREDUMP) {
4974  				mpt3sas_print_coredump_info(ioc,
4975  				    doorbell & MPI2_DOORBELL_DATA_MASK);
4976  			}
4977  		}
4978  	}
4979  }
4980  
_scsih_set_satl_pending(struct scsi_cmnd * scmd,bool pending)4981  static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4982  {
4983  	struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4984  
4985  	if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4986  		return 0;
4987  
4988  	if (pending)
4989  		return test_and_set_bit(0, &priv->ata_command_pending);
4990  
4991  	clear_bit(0, &priv->ata_command_pending);
4992  	return 0;
4993  }
4994  
4995  /**
4996   * _scsih_flush_running_cmds - completing outstanding commands.
4997   * @ioc: per adapter object
4998   *
4999   * The flushing out of all pending scmd commands following host reset,
5000   * where all IO is dropped to the floor.
5001   */
5002  static void
_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER * ioc)5003  _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
5004  {
5005  	struct scsi_cmnd *scmd;
5006  	struct scsiio_tracker *st;
5007  	u16 smid;
5008  	int count = 0;
5009  
5010  	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
5011  		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5012  		if (!scmd)
5013  			continue;
5014  		count++;
5015  		_scsih_set_satl_pending(scmd, false);
5016  		st = scsi_cmd_priv(scmd);
5017  		mpt3sas_base_clear_st(ioc, st);
5018  		scsi_dma_unmap(scmd);
5019  		if (ioc->pci_error_recovery || ioc->remove_host)
5020  			scmd->result = DID_NO_CONNECT << 16;
5021  		else
5022  			scmd->result = DID_RESET << 16;
5023  		scsi_done(scmd);
5024  	}
5025  	dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
5026  }
5027  
5028  /**
5029   * _scsih_setup_eedp - setup MPI request for EEDP transfer
5030   * @ioc: per adapter object
5031   * @scmd: pointer to scsi command object
5032   * @mpi_request: pointer to the SCSI_IO request message frame
5033   *
5034   * Supporting protection 1 and 3.
5035   */
5036  static void
_scsih_setup_eedp(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request)5037  _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5038  	Mpi25SCSIIORequest_t *mpi_request)
5039  {
5040  	u16 eedp_flags;
5041  	Mpi25SCSIIORequest_t *mpi_request_3v =
5042  	   (Mpi25SCSIIORequest_t *)mpi_request;
5043  
5044  	switch (scsi_get_prot_op(scmd)) {
5045  	case SCSI_PROT_READ_STRIP:
5046  		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
5047  		break;
5048  	case SCSI_PROT_WRITE_INSERT:
5049  		eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
5050  		break;
5051  	default:
5052  		return;
5053  	}
5054  
5055  	if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
5056  		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
5057  
5058  	if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
5059  		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
5060  
5061  	if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
5062  		eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
5063  
5064  		mpi_request->CDB.EEDP32.PrimaryReferenceTag =
5065  			cpu_to_be32(scsi_prot_ref_tag(scmd));
5066  	}
5067  
5068  	mpi_request_3v->EEDPBlockSize = cpu_to_le16(scsi_prot_interval(scmd));
5069  
5070  	if (ioc->is_gen35_ioc)
5071  		eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
5072  	mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
5073  }
5074  
5075  /**
5076   * _scsih_eedp_error_handling - return sense code for EEDP errors
5077   * @scmd: pointer to scsi command object
5078   * @ioc_status: ioc status
5079   */
5080  static void
_scsih_eedp_error_handling(struct scsi_cmnd * scmd,u16 ioc_status)5081  _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
5082  {
5083  	u8 ascq;
5084  
5085  	switch (ioc_status) {
5086  	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5087  		ascq = 0x01;
5088  		break;
5089  	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5090  		ascq = 0x02;
5091  		break;
5092  	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5093  		ascq = 0x03;
5094  		break;
5095  	default:
5096  		ascq = 0x00;
5097  		break;
5098  	}
5099  	scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x10, ascq);
5100  	set_host_byte(scmd, DID_ABORT);
5101  }
5102  
5103  /**
5104   * scsih_qcmd - main scsi request entry point
5105   * @shost: SCSI host pointer
5106   * @scmd: pointer to scsi command object
5107   *
5108   * The callback index is set inside `ioc->scsi_io_cb_idx`.
5109   *
5110   * Return: 0 on success.  If there's a failure, return either:
5111   * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
5112   * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
5113   */
5114  static int
scsih_qcmd(struct Scsi_Host * shost,struct scsi_cmnd * scmd)5115  scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
5116  {
5117  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
5118  	struct MPT3SAS_DEVICE *sas_device_priv_data;
5119  	struct MPT3SAS_TARGET *sas_target_priv_data;
5120  	struct _raid_device *raid_device;
5121  	struct request *rq = scsi_cmd_to_rq(scmd);
5122  	int class;
5123  	Mpi25SCSIIORequest_t *mpi_request;
5124  	struct _pcie_device *pcie_device = NULL;
5125  	u32 mpi_control;
5126  	u16 smid;
5127  	u16 handle;
5128  
5129  	if (ioc->logging_level & MPT_DEBUG_SCSI)
5130  		scsi_print_command(scmd);
5131  
5132  	sas_device_priv_data = scmd->device->hostdata;
5133  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
5134  		scmd->result = DID_NO_CONNECT << 16;
5135  		scsi_done(scmd);
5136  		return 0;
5137  	}
5138  
5139  	if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
5140  		scmd->result = DID_NO_CONNECT << 16;
5141  		scsi_done(scmd);
5142  		return 0;
5143  	}
5144  
5145  	sas_target_priv_data = sas_device_priv_data->sas_target;
5146  
5147  	/* invalid device handle */
5148  	handle = sas_target_priv_data->handle;
5149  
5150  	/*
5151  	 * Avoid error handling escallation when device is disconnected
5152  	 */
5153  	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE || sas_device_priv_data->block) {
5154  		if (scmd->device->host->shost_state == SHOST_RECOVERY &&
5155  		    scmd->cmnd[0] == TEST_UNIT_READY) {
5156  			scsi_build_sense(scmd, 0, UNIT_ATTENTION, 0x29, 0x07);
5157  			scsi_done(scmd);
5158  			return 0;
5159  		}
5160  	}
5161  
5162  	if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
5163  		scmd->result = DID_NO_CONNECT << 16;
5164  		scsi_done(scmd);
5165  		return 0;
5166  	}
5167  
5168  
5169  	if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
5170  		/* host recovery or link resets sent via IOCTLs */
5171  		return SCSI_MLQUEUE_HOST_BUSY;
5172  	} else if (sas_target_priv_data->deleted) {
5173  		/* device has been deleted */
5174  		scmd->result = DID_NO_CONNECT << 16;
5175  		scsi_done(scmd);
5176  		return 0;
5177  	} else if (sas_target_priv_data->tm_busy ||
5178  		   sas_device_priv_data->block) {
5179  		/* device busy with task management */
5180  		return SCSI_MLQUEUE_DEVICE_BUSY;
5181  	}
5182  
5183  	/*
5184  	 * Bug work around for firmware SATL handling.  The loop
5185  	 * is based on atomic operations and ensures consistency
5186  	 * since we're lockless at this point
5187  	 */
5188  	do {
5189  		if (test_bit(0, &sas_device_priv_data->ata_command_pending))
5190  			return SCSI_MLQUEUE_DEVICE_BUSY;
5191  	} while (_scsih_set_satl_pending(scmd, true));
5192  
5193  	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
5194  		mpi_control = MPI2_SCSIIO_CONTROL_READ;
5195  	else if (scmd->sc_data_direction == DMA_TO_DEVICE)
5196  		mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
5197  	else
5198  		mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
5199  
5200  	/* set tags */
5201  	mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
5202  	/* NCQ Prio supported, make sure control indicated high priority */
5203  	if (sas_device_priv_data->ncq_prio_enable) {
5204  		class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
5205  		if (class == IOPRIO_CLASS_RT)
5206  			mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
5207  	}
5208  	/* Make sure Device is not raid volume.
5209  	 * We do not expose raid functionality to upper layer for warpdrive.
5210  	 */
5211  	if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
5212  		&& !scsih_is_nvme(&scmd->device->sdev_gendev))
5213  		&& sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
5214  		mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
5215  
5216  	smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
5217  	if (!smid) {
5218  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5219  		_scsih_set_satl_pending(scmd, false);
5220  		goto out;
5221  	}
5222  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5223  	memset(mpi_request, 0, ioc->request_sz);
5224  	_scsih_setup_eedp(ioc, scmd, mpi_request);
5225  
5226  	if (scmd->cmd_len == 32)
5227  		mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
5228  	mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5229  	if (sas_device_priv_data->sas_target->flags &
5230  	    MPT_TARGET_FLAGS_RAID_COMPONENT)
5231  		mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
5232  	else
5233  		mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
5234  	mpi_request->DevHandle = cpu_to_le16(handle);
5235  	mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
5236  	mpi_request->Control = cpu_to_le32(mpi_control);
5237  	mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
5238  	mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
5239  	mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
5240  	mpi_request->SenseBufferLowAddress =
5241  	    mpt3sas_base_get_sense_buffer_dma(ioc, smid);
5242  	mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
5243  	int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
5244  	    mpi_request->LUN);
5245  	memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5246  
5247  	if (mpi_request->DataLength) {
5248  		pcie_device = sas_target_priv_data->pcie_dev;
5249  		if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
5250  			mpt3sas_base_free_smid(ioc, smid);
5251  			_scsih_set_satl_pending(scmd, false);
5252  			goto out;
5253  		}
5254  	} else
5255  		ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
5256  
5257  	raid_device = sas_target_priv_data->raid_device;
5258  	if (raid_device && raid_device->direct_io_enabled)
5259  		mpt3sas_setup_direct_io(ioc, scmd,
5260  			raid_device, mpi_request);
5261  
5262  	if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
5263  		if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
5264  			mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
5265  			    MPI25_SCSIIO_IOFLAGS_FAST_PATH);
5266  			ioc->put_smid_fast_path(ioc, smid, handle);
5267  		} else
5268  			ioc->put_smid_scsi_io(ioc, smid,
5269  			    le16_to_cpu(mpi_request->DevHandle));
5270  	} else
5271  		ioc->put_smid_default(ioc, smid);
5272  	return 0;
5273  
5274   out:
5275  	return SCSI_MLQUEUE_HOST_BUSY;
5276  }
5277  
5278  /**
5279   * _scsih_normalize_sense - normalize descriptor and fixed format sense data
5280   * @sense_buffer: sense data returned by target
5281   * @data: normalized skey/asc/ascq
5282   */
5283  static void
_scsih_normalize_sense(char * sense_buffer,struct sense_info * data)5284  _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
5285  {
5286  	if ((sense_buffer[0] & 0x7F) >= 0x72) {
5287  		/* descriptor format */
5288  		data->skey = sense_buffer[1] & 0x0F;
5289  		data->asc = sense_buffer[2];
5290  		data->ascq = sense_buffer[3];
5291  	} else {
5292  		/* fixed format */
5293  		data->skey = sense_buffer[2] & 0x0F;
5294  		data->asc = sense_buffer[12];
5295  		data->ascq = sense_buffer[13];
5296  	}
5297  }
5298  
5299  /**
5300   * _scsih_scsi_ioc_info - translated non-successful SCSI_IO request
5301   * @ioc: per adapter object
5302   * @scmd: pointer to scsi command object
5303   * @mpi_reply: reply mf payload returned from firmware
5304   * @smid: ?
5305   *
5306   * scsi_status - SCSI Status code returned from target device
5307   * scsi_state - state info associated with SCSI_IO determined by ioc
5308   * ioc_status - ioc supplied status info
5309   */
5310  static void
_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi2SCSIIOReply_t * mpi_reply,u16 smid)5311  _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
5312  	Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
5313  {
5314  	u32 response_info;
5315  	u8 *response_bytes;
5316  	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
5317  	    MPI2_IOCSTATUS_MASK;
5318  	u8 scsi_state = mpi_reply->SCSIState;
5319  	u8 scsi_status = mpi_reply->SCSIStatus;
5320  	char *desc_ioc_state = NULL;
5321  	char *desc_scsi_status = NULL;
5322  	char *desc_scsi_state = ioc->tmp_string;
5323  	u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5324  	struct _sas_device *sas_device = NULL;
5325  	struct _pcie_device *pcie_device = NULL;
5326  	struct scsi_target *starget = scmd->device->sdev_target;
5327  	struct MPT3SAS_TARGET *priv_target = starget->hostdata;
5328  	char *device_str = NULL;
5329  
5330  	if (!priv_target)
5331  		return;
5332  	if (ioc->hide_ir_msg)
5333  		device_str = "WarpDrive";
5334  	else
5335  		device_str = "volume";
5336  
5337  	if (log_info == 0x31170000)
5338  		return;
5339  
5340  	switch (ioc_status) {
5341  	case MPI2_IOCSTATUS_SUCCESS:
5342  		desc_ioc_state = "success";
5343  		break;
5344  	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5345  		desc_ioc_state = "invalid function";
5346  		break;
5347  	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5348  		desc_ioc_state = "scsi recovered error";
5349  		break;
5350  	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
5351  		desc_ioc_state = "scsi invalid dev handle";
5352  		break;
5353  	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5354  		desc_ioc_state = "scsi device not there";
5355  		break;
5356  	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5357  		desc_ioc_state = "scsi data overrun";
5358  		break;
5359  	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5360  		desc_ioc_state = "scsi data underrun";
5361  		break;
5362  	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5363  		desc_ioc_state = "scsi io data error";
5364  		break;
5365  	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5366  		desc_ioc_state = "scsi protocol error";
5367  		break;
5368  	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5369  		desc_ioc_state = "scsi task terminated";
5370  		break;
5371  	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5372  		desc_ioc_state = "scsi residual mismatch";
5373  		break;
5374  	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5375  		desc_ioc_state = "scsi task mgmt failed";
5376  		break;
5377  	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5378  		desc_ioc_state = "scsi ioc terminated";
5379  		break;
5380  	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5381  		desc_ioc_state = "scsi ext terminated";
5382  		break;
5383  	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5384  		desc_ioc_state = "eedp guard error";
5385  		break;
5386  	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5387  		desc_ioc_state = "eedp ref tag error";
5388  		break;
5389  	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5390  		desc_ioc_state = "eedp app tag error";
5391  		break;
5392  	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5393  		desc_ioc_state = "insufficient power";
5394  		break;
5395  	default:
5396  		desc_ioc_state = "unknown";
5397  		break;
5398  	}
5399  
5400  	switch (scsi_status) {
5401  	case MPI2_SCSI_STATUS_GOOD:
5402  		desc_scsi_status = "good";
5403  		break;
5404  	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5405  		desc_scsi_status = "check condition";
5406  		break;
5407  	case MPI2_SCSI_STATUS_CONDITION_MET:
5408  		desc_scsi_status = "condition met";
5409  		break;
5410  	case MPI2_SCSI_STATUS_BUSY:
5411  		desc_scsi_status = "busy";
5412  		break;
5413  	case MPI2_SCSI_STATUS_INTERMEDIATE:
5414  		desc_scsi_status = "intermediate";
5415  		break;
5416  	case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
5417  		desc_scsi_status = "intermediate condmet";
5418  		break;
5419  	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5420  		desc_scsi_status = "reservation conflict";
5421  		break;
5422  	case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
5423  		desc_scsi_status = "command terminated";
5424  		break;
5425  	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5426  		desc_scsi_status = "task set full";
5427  		break;
5428  	case MPI2_SCSI_STATUS_ACA_ACTIVE:
5429  		desc_scsi_status = "aca active";
5430  		break;
5431  	case MPI2_SCSI_STATUS_TASK_ABORTED:
5432  		desc_scsi_status = "task aborted";
5433  		break;
5434  	default:
5435  		desc_scsi_status = "unknown";
5436  		break;
5437  	}
5438  
5439  	desc_scsi_state[0] = '\0';
5440  	if (!scsi_state)
5441  		desc_scsi_state = " ";
5442  	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5443  		strcat(desc_scsi_state, "response info ");
5444  	if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5445  		strcat(desc_scsi_state, "state terminated ");
5446  	if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
5447  		strcat(desc_scsi_state, "no status ");
5448  	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
5449  		strcat(desc_scsi_state, "autosense failed ");
5450  	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
5451  		strcat(desc_scsi_state, "autosense valid ");
5452  
5453  	scsi_print_command(scmd);
5454  
5455  	if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
5456  		ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
5457  			 device_str, (u64)priv_target->sas_address);
5458  	} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
5459  		pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
5460  		if (pcie_device) {
5461  			ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
5462  				 (u64)pcie_device->wwid, pcie_device->port_num);
5463  			if (pcie_device->enclosure_handle != 0)
5464  				ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
5465  					 (u64)pcie_device->enclosure_logical_id,
5466  					 pcie_device->slot);
5467  			if (pcie_device->connector_name[0])
5468  				ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
5469  					 pcie_device->enclosure_level,
5470  					 pcie_device->connector_name);
5471  			pcie_device_put(pcie_device);
5472  		}
5473  	} else {
5474  		sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
5475  		if (sas_device) {
5476  			ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
5477  				 (u64)sas_device->sas_address, sas_device->phy);
5478  
5479  			_scsih_display_enclosure_chassis_info(ioc, sas_device,
5480  			    NULL, NULL);
5481  
5482  			sas_device_put(sas_device);
5483  		}
5484  	}
5485  
5486  	ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
5487  		 le16_to_cpu(mpi_reply->DevHandle),
5488  		 desc_ioc_state, ioc_status, smid);
5489  	ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
5490  		 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
5491  	ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
5492  		 le16_to_cpu(mpi_reply->TaskTag),
5493  		 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
5494  	ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
5495  		 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
5496  
5497  	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5498  		struct sense_info data;
5499  		_scsih_normalize_sense(scmd->sense_buffer, &data);
5500  		ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
5501  			 data.skey, data.asc, data.ascq,
5502  			 le32_to_cpu(mpi_reply->SenseCount));
5503  	}
5504  	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5505  		response_info = le32_to_cpu(mpi_reply->ResponseInfo);
5506  		response_bytes = (u8 *)&response_info;
5507  		_scsih_response_code(ioc, response_bytes[0]);
5508  	}
5509  }
5510  
5511  /**
5512   * _scsih_turn_on_pfa_led - illuminate PFA LED
5513   * @ioc: per adapter object
5514   * @handle: device handle
5515   * Context: process
5516   */
5517  static void
_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5518  _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5519  {
5520  	Mpi2SepReply_t mpi_reply;
5521  	Mpi2SepRequest_t mpi_request;
5522  	struct _sas_device *sas_device;
5523  
5524  	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
5525  	if (!sas_device)
5526  		return;
5527  
5528  	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5529  	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5530  	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5531  	mpi_request.SlotStatus =
5532  	    cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5533  	mpi_request.DevHandle = cpu_to_le16(handle);
5534  	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5535  	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5536  	    &mpi_request)) != 0) {
5537  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5538  			__FILE__, __LINE__, __func__);
5539  		goto out;
5540  	}
5541  	sas_device->pfa_led_on = 1;
5542  
5543  	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5544  		dewtprintk(ioc,
5545  			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5546  				    le16_to_cpu(mpi_reply.IOCStatus),
5547  				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5548  		goto out;
5549  	}
5550  out:
5551  	sas_device_put(sas_device);
5552  }
5553  
5554  /**
5555   * _scsih_turn_off_pfa_led - turn off Fault LED
5556   * @ioc: per adapter object
5557   * @sas_device: sas device whose PFA LED has to turned off
5558   * Context: process
5559   */
5560  static void
_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)5561  _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5562  	struct _sas_device *sas_device)
5563  {
5564  	Mpi2SepReply_t mpi_reply;
5565  	Mpi2SepRequest_t mpi_request;
5566  
5567  	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5568  	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5569  	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5570  	mpi_request.SlotStatus = 0;
5571  	mpi_request.Slot = cpu_to_le16(sas_device->slot);
5572  	mpi_request.DevHandle = 0;
5573  	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5574  	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5575  	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5576  		&mpi_request)) != 0) {
5577  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5578  			__FILE__, __LINE__, __func__);
5579  		return;
5580  	}
5581  
5582  	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5583  		dewtprintk(ioc,
5584  			   ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5585  				    le16_to_cpu(mpi_reply.IOCStatus),
5586  				    le32_to_cpu(mpi_reply.IOCLogInfo)));
5587  		return;
5588  	}
5589  }
5590  
5591  /**
5592   * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5593   * @ioc: per adapter object
5594   * @handle: device handle
5595   * Context: interrupt.
5596   */
5597  static void
_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER * ioc,u16 handle)5598  _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5599  {
5600  	struct fw_event_work *fw_event;
5601  
5602  	fw_event = alloc_fw_event_work(0);
5603  	if (!fw_event)
5604  		return;
5605  	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5606  	fw_event->device_handle = handle;
5607  	fw_event->ioc = ioc;
5608  	_scsih_fw_event_add(ioc, fw_event);
5609  	fw_event_work_put(fw_event);
5610  }
5611  
5612  /**
5613   * _scsih_smart_predicted_fault - process smart errors
5614   * @ioc: per adapter object
5615   * @handle: device handle
5616   * Context: interrupt.
5617   */
5618  static void
_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER * ioc,u16 handle)5619  _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5620  {
5621  	struct scsi_target *starget;
5622  	struct MPT3SAS_TARGET *sas_target_priv_data;
5623  	Mpi2EventNotificationReply_t *event_reply;
5624  	Mpi2EventDataSasDeviceStatusChange_t *event_data;
5625  	struct _sas_device *sas_device;
5626  	ssize_t sz;
5627  	unsigned long flags;
5628  
5629  	/* only handle non-raid devices */
5630  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
5631  	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5632  	if (!sas_device)
5633  		goto out_unlock;
5634  
5635  	starget = sas_device->starget;
5636  	sas_target_priv_data = starget->hostdata;
5637  
5638  	if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5639  	   ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5640  		goto out_unlock;
5641  
5642  	_scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5643  
5644  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5645  
5646  	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5647  		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5648  
5649  	/* insert into event log */
5650  	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5651  	     sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5652  	event_reply = kzalloc(sz, GFP_ATOMIC);
5653  	if (!event_reply) {
5654  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5655  			__FILE__, __LINE__, __func__);
5656  		goto out;
5657  	}
5658  
5659  	event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5660  	event_reply->Event =
5661  	    cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5662  	event_reply->MsgLength = sz/4;
5663  	event_reply->EventDataLength =
5664  	    cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5665  	event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5666  	    event_reply->EventData;
5667  	event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5668  	event_data->ASC = 0x5D;
5669  	event_data->DevHandle = cpu_to_le16(handle);
5670  	event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5671  	mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5672  	kfree(event_reply);
5673  out:
5674  	if (sas_device)
5675  		sas_device_put(sas_device);
5676  	return;
5677  
5678  out_unlock:
5679  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5680  	goto out;
5681  }
5682  
5683  /**
5684   * _scsih_io_done - scsi request callback
5685   * @ioc: per adapter object
5686   * @smid: system request message index
5687   * @msix_index: MSIX table index supplied by the OS
5688   * @reply: reply message frame(lower 32bit addr)
5689   *
5690   * Callback handler when using _scsih_qcmd.
5691   *
5692   * Return: 1 meaning mf should be freed from _base_interrupt
5693   *         0 means the mf is freed from this function.
5694   */
5695  static u8
_scsih_io_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)5696  _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5697  {
5698  	Mpi25SCSIIORequest_t *mpi_request;
5699  	Mpi2SCSIIOReply_t *mpi_reply;
5700  	struct scsi_cmnd *scmd;
5701  	struct scsiio_tracker *st;
5702  	u16 ioc_status;
5703  	u32 xfer_cnt;
5704  	u8 scsi_state;
5705  	u8 scsi_status;
5706  	u32 log_info;
5707  	struct MPT3SAS_DEVICE *sas_device_priv_data;
5708  	u32 response_code = 0;
5709  
5710  	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5711  
5712  	scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5713  	if (scmd == NULL)
5714  		return 1;
5715  
5716  	_scsih_set_satl_pending(scmd, false);
5717  
5718  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5719  
5720  	if (mpi_reply == NULL) {
5721  		scmd->result = DID_OK << 16;
5722  		goto out;
5723  	}
5724  
5725  	sas_device_priv_data = scmd->device->hostdata;
5726  	if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5727  	     sas_device_priv_data->sas_target->deleted) {
5728  		scmd->result = DID_NO_CONNECT << 16;
5729  		goto out;
5730  	}
5731  	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5732  
5733  	/*
5734  	 * WARPDRIVE: If direct_io is set then it is directIO,
5735  	 * the failed direct I/O should be redirected to volume
5736  	 */
5737  	st = scsi_cmd_priv(scmd);
5738  	if (st->direct_io &&
5739  	     ((ioc_status & MPI2_IOCSTATUS_MASK)
5740  	      != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5741  		st->direct_io = 0;
5742  		st->scmd = scmd;
5743  		memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5744  		mpi_request->DevHandle =
5745  		    cpu_to_le16(sas_device_priv_data->sas_target->handle);
5746  		ioc->put_smid_scsi_io(ioc, smid,
5747  		    sas_device_priv_data->sas_target->handle);
5748  		return 0;
5749  	}
5750  	/* turning off TLR */
5751  	scsi_state = mpi_reply->SCSIState;
5752  	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5753  		response_code =
5754  		    le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5755  	if (!sas_device_priv_data->tlr_snoop_check) {
5756  		sas_device_priv_data->tlr_snoop_check++;
5757  		if ((!ioc->is_warpdrive &&
5758  		    !scsih_is_raid(&scmd->device->sdev_gendev) &&
5759  		    !scsih_is_nvme(&scmd->device->sdev_gendev))
5760  		    && sas_is_tlr_enabled(scmd->device) &&
5761  		    response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5762  			sas_disable_tlr(scmd->device);
5763  			sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5764  		}
5765  	}
5766  
5767  	xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5768  	scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5769  	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5770  		log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
5771  	else
5772  		log_info = 0;
5773  	ioc_status &= MPI2_IOCSTATUS_MASK;
5774  	scsi_status = mpi_reply->SCSIStatus;
5775  
5776  	if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5777  	    (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5778  	     scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5779  	     scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5780  		ioc_status = MPI2_IOCSTATUS_SUCCESS;
5781  	}
5782  
5783  	if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5784  		struct sense_info data;
5785  		const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5786  		    smid);
5787  		u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5788  		    le32_to_cpu(mpi_reply->SenseCount));
5789  		memcpy(scmd->sense_buffer, sense_data, sz);
5790  		_scsih_normalize_sense(scmd->sense_buffer, &data);
5791  		/* failure prediction threshold exceeded */
5792  		if (data.asc == 0x5D)
5793  			_scsih_smart_predicted_fault(ioc,
5794  			    le16_to_cpu(mpi_reply->DevHandle));
5795  		mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5796  
5797  		if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5798  		     ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5799  		     (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5800  		     (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5801  			_scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5802  	}
5803  	switch (ioc_status) {
5804  	case MPI2_IOCSTATUS_BUSY:
5805  	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5806  		scmd->result = SAM_STAT_BUSY;
5807  		break;
5808  
5809  	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5810  		scmd->result = DID_NO_CONNECT << 16;
5811  		break;
5812  
5813  	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5814  		if (sas_device_priv_data->block) {
5815  			scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5816  			goto out;
5817  		}
5818  		if (log_info == 0x31110630) {
5819  			if (scmd->retries > 2) {
5820  				scmd->result = DID_NO_CONNECT << 16;
5821  				scsi_device_set_state(scmd->device,
5822  				    SDEV_OFFLINE);
5823  			} else {
5824  				scmd->result = DID_SOFT_ERROR << 16;
5825  				scmd->device->expecting_cc_ua = 1;
5826  			}
5827  			break;
5828  		} else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5829  			scmd->result = DID_RESET << 16;
5830  			break;
5831  		} else if ((scmd->device->channel == RAID_CHANNEL) &&
5832  		   (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5833  		   MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5834  			scmd->result = DID_RESET << 16;
5835  			break;
5836  		}
5837  		scmd->result = DID_SOFT_ERROR << 16;
5838  		break;
5839  	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5840  	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5841  		scmd->result = DID_RESET << 16;
5842  		break;
5843  
5844  	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5845  		if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5846  			scmd->result = DID_SOFT_ERROR << 16;
5847  		else
5848  			scmd->result = (DID_OK << 16) | scsi_status;
5849  		break;
5850  
5851  	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5852  		scmd->result = (DID_OK << 16) | scsi_status;
5853  
5854  		if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5855  			break;
5856  
5857  		if (xfer_cnt < scmd->underflow) {
5858  			if (scsi_status == SAM_STAT_BUSY)
5859  				scmd->result = SAM_STAT_BUSY;
5860  			else
5861  				scmd->result = DID_SOFT_ERROR << 16;
5862  		} else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5863  		     MPI2_SCSI_STATE_NO_SCSI_STATUS))
5864  			scmd->result = DID_SOFT_ERROR << 16;
5865  		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5866  			scmd->result = DID_RESET << 16;
5867  		else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5868  			mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5869  			mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5870  			scsi_build_sense(scmd, 0, ILLEGAL_REQUEST,
5871  					 0x20, 0);
5872  		}
5873  		break;
5874  
5875  	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5876  		scsi_set_resid(scmd, 0);
5877  		fallthrough;
5878  	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5879  	case MPI2_IOCSTATUS_SUCCESS:
5880  		scmd->result = (DID_OK << 16) | scsi_status;
5881  		if (response_code ==
5882  		    MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5883  		    (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5884  		     MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5885  			scmd->result = DID_SOFT_ERROR << 16;
5886  		else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5887  			scmd->result = DID_RESET << 16;
5888  		break;
5889  
5890  	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5891  	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5892  	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5893  		_scsih_eedp_error_handling(scmd, ioc_status);
5894  		break;
5895  
5896  	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5897  	case MPI2_IOCSTATUS_INVALID_FUNCTION:
5898  	case MPI2_IOCSTATUS_INVALID_SGL:
5899  	case MPI2_IOCSTATUS_INTERNAL_ERROR:
5900  	case MPI2_IOCSTATUS_INVALID_FIELD:
5901  	case MPI2_IOCSTATUS_INVALID_STATE:
5902  	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5903  	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5904  	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5905  	default:
5906  		scmd->result = DID_SOFT_ERROR << 16;
5907  		break;
5908  
5909  	}
5910  
5911  	if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5912  		_scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5913  
5914   out:
5915  
5916  	scsi_dma_unmap(scmd);
5917  	mpt3sas_base_free_smid(ioc, smid);
5918  	scsi_done(scmd);
5919  	return 0;
5920  }
5921  
5922  /**
5923   * _scsih_update_vphys_after_reset - update the Port's
5924   *			vphys_list after reset
5925   * @ioc: per adapter object
5926   *
5927   * Returns nothing.
5928   */
5929  static void
_scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER * ioc)5930  _scsih_update_vphys_after_reset(struct MPT3SAS_ADAPTER *ioc)
5931  {
5932  	u16 sz, ioc_status;
5933  	int i;
5934  	Mpi2ConfigReply_t mpi_reply;
5935  	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5936  	u16 attached_handle;
5937  	u64 attached_sas_addr;
5938  	u8 found = 0, port_id;
5939  	Mpi2SasPhyPage0_t phy_pg0;
5940  	struct hba_port *port, *port_next, *mport;
5941  	struct virtual_phy *vphy, *vphy_next;
5942  	struct _sas_device *sas_device;
5943  
5944  	/*
5945  	 * Mark all the vphys objects as dirty.
5946  	 */
5947  	list_for_each_entry_safe(port, port_next,
5948  	    &ioc->port_table_list, list) {
5949  		if (!port->vphys_mask)
5950  			continue;
5951  		list_for_each_entry_safe(vphy, vphy_next,
5952  		    &port->vphys_list, list) {
5953  			vphy->flags |= MPT_VPHY_FLAG_DIRTY_PHY;
5954  		}
5955  	}
5956  
5957  	/*
5958  	 * Read SASIOUnitPage0 to get each HBA Phy's data.
5959  	 */
5960  	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
5961  	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5962  	if (!sas_iounit_pg0) {
5963  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
5964  		    __FILE__, __LINE__, __func__);
5965  		return;
5966  	}
5967  	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5968  	    sas_iounit_pg0, sz)) != 0)
5969  		goto out;
5970  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5971  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5972  		goto out;
5973  	/*
5974  	 * Loop over each HBA Phy.
5975  	 */
5976  	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
5977  		/*
5978  		 * Check whether Phy's Negotiation Link Rate is > 1.5G or not.
5979  		 */
5980  		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
5981  		    MPI2_SAS_NEG_LINK_RATE_1_5)
5982  			continue;
5983  		/*
5984  		 * Check whether Phy is connected to SEP device or not,
5985  		 * if it is SEP device then read the Phy's SASPHYPage0 data to
5986  		 * determine whether Phy is a virtual Phy or not. if it is
5987  		 * virtual phy then it is conformed that the attached remote
5988  		 * device is a HBA's vSES device.
5989  		 */
5990  		if (!(le32_to_cpu(
5991  		    sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
5992  		    MPI2_SAS_DEVICE_INFO_SEP))
5993  			continue;
5994  
5995  		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5996  		    i))) {
5997  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
5998  			    __FILE__, __LINE__, __func__);
5999  			continue;
6000  		}
6001  
6002  		if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6003  		    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6004  			continue;
6005  		/*
6006  		 * Get the vSES device's SAS Address.
6007  		 */
6008  		attached_handle = le16_to_cpu(
6009  		    sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6010  		if (_scsih_get_sas_address(ioc, attached_handle,
6011  		    &attached_sas_addr) != 0) {
6012  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6013  			    __FILE__, __LINE__, __func__);
6014  			continue;
6015  		}
6016  
6017  		found = 0;
6018  		port = port_next = NULL;
6019  		/*
6020  		 * Loop over each virtual_phy object from
6021  		 * each port's vphys_list.
6022  		 */
6023  		list_for_each_entry_safe(port,
6024  		    port_next, &ioc->port_table_list, list) {
6025  			if (!port->vphys_mask)
6026  				continue;
6027  			list_for_each_entry_safe(vphy, vphy_next,
6028  			    &port->vphys_list, list) {
6029  				/*
6030  				 * Continue with next virtual_phy object
6031  				 * if the object is not marked as dirty.
6032  				 */
6033  				if (!(vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY))
6034  					continue;
6035  
6036  				/*
6037  				 * Continue with next virtual_phy object
6038  				 * if the object's SAS Address is not equals
6039  				 * to current Phy's vSES device SAS Address.
6040  				 */
6041  				if (vphy->sas_address != attached_sas_addr)
6042  					continue;
6043  				/*
6044  				 * Enable current Phy number bit in object's
6045  				 * phy_mask field.
6046  				 */
6047  				if (!(vphy->phy_mask & (1 << i)))
6048  					vphy->phy_mask = (1 << i);
6049  				/*
6050  				 * Get hba_port object from hba_port table
6051  				 * corresponding to current phy's Port ID.
6052  				 * if there is no hba_port object corresponding
6053  				 * to Phy's Port ID then create a new hba_port
6054  				 * object & add to hba_port table.
6055  				 */
6056  				port_id = sas_iounit_pg0->PhyData[i].Port;
6057  				mport = mpt3sas_get_port_by_id(ioc, port_id, 1);
6058  				if (!mport) {
6059  					mport = kzalloc(
6060  					    sizeof(struct hba_port), GFP_KERNEL);
6061  					if (!mport)
6062  						break;
6063  					mport->port_id = port_id;
6064  					ioc_info(ioc,
6065  					    "%s: hba_port entry: %p, port: %d is added to hba_port list\n",
6066  					    __func__, mport, mport->port_id);
6067  					list_add_tail(&mport->list,
6068  						&ioc->port_table_list);
6069  				}
6070  				/*
6071  				 * If mport & port pointers are not pointing to
6072  				 * same hba_port object then it means that vSES
6073  				 * device's Port ID got changed after reset and
6074  				 * hence move current virtual_phy object from
6075  				 * port's vphys_list to mport's vphys_list.
6076  				 */
6077  				if (port != mport) {
6078  					if (!mport->vphys_mask)
6079  						INIT_LIST_HEAD(
6080  						    &mport->vphys_list);
6081  					mport->vphys_mask |= (1 << i);
6082  					port->vphys_mask &= ~(1 << i);
6083  					list_move(&vphy->list,
6084  					    &mport->vphys_list);
6085  					sas_device = mpt3sas_get_sdev_by_addr(
6086  					    ioc, attached_sas_addr, port);
6087  					if (sas_device)
6088  						sas_device->port = mport;
6089  				}
6090  				/*
6091  				 * Earlier while updating the hba_port table,
6092  				 * it is determined that there is no other
6093  				 * direct attached device with mport's Port ID,
6094  				 * Hence mport was marked as dirty. Only vSES
6095  				 * device has this Port ID, so unmark the mport
6096  				 * as dirt.
6097  				 */
6098  				if (mport->flags & HBA_PORT_FLAG_DIRTY_PORT) {
6099  					mport->sas_address = 0;
6100  					mport->phy_mask = 0;
6101  					mport->flags &=
6102  					    ~HBA_PORT_FLAG_DIRTY_PORT;
6103  				}
6104  				/*
6105  				 * Unmark current virtual_phy object as dirty.
6106  				 */
6107  				vphy->flags &= ~MPT_VPHY_FLAG_DIRTY_PHY;
6108  				found = 1;
6109  				break;
6110  			}
6111  			if (found)
6112  				break;
6113  		}
6114  	}
6115  out:
6116  	kfree(sas_iounit_pg0);
6117  }
6118  
6119  /**
6120   * _scsih_get_port_table_after_reset - Construct temporary port table
6121   * @ioc: per adapter object
6122   * @port_table: address where port table needs to be constructed
6123   *
6124   * return number of HBA port entries available after reset.
6125   */
6126  static int
_scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table)6127  _scsih_get_port_table_after_reset(struct MPT3SAS_ADAPTER *ioc,
6128  	struct hba_port *port_table)
6129  {
6130  	u16 sz, ioc_status;
6131  	int i, j;
6132  	Mpi2ConfigReply_t mpi_reply;
6133  	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6134  	u16 attached_handle;
6135  	u64 attached_sas_addr;
6136  	u8 found = 0, port_count = 0, port_id;
6137  
6138  	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6139  	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6140  	if (!sas_iounit_pg0) {
6141  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6142  		    __FILE__, __LINE__, __func__);
6143  		return port_count;
6144  	}
6145  
6146  	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6147  	    sas_iounit_pg0, sz)) != 0)
6148  		goto out;
6149  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6150  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6151  		goto out;
6152  	for (i = 0; i < ioc->sas_hba.num_phys; i++) {
6153  		found = 0;
6154  		if ((sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4) <
6155  		    MPI2_SAS_NEG_LINK_RATE_1_5)
6156  			continue;
6157  		attached_handle =
6158  		    le16_to_cpu(sas_iounit_pg0->PhyData[i].AttachedDevHandle);
6159  		if (_scsih_get_sas_address(
6160  		    ioc, attached_handle, &attached_sas_addr) != 0) {
6161  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6162  			    __FILE__, __LINE__, __func__);
6163  			continue;
6164  		}
6165  
6166  		for (j = 0; j < port_count; j++) {
6167  			port_id = sas_iounit_pg0->PhyData[i].Port;
6168  			if (port_table[j].port_id == port_id &&
6169  			    port_table[j].sas_address == attached_sas_addr) {
6170  				port_table[j].phy_mask |= (1 << i);
6171  				found = 1;
6172  				break;
6173  			}
6174  		}
6175  
6176  		if (found)
6177  			continue;
6178  
6179  		port_id = sas_iounit_pg0->PhyData[i].Port;
6180  		port_table[port_count].port_id = port_id;
6181  		port_table[port_count].phy_mask = (1 << i);
6182  		port_table[port_count].sas_address = attached_sas_addr;
6183  		port_count++;
6184  	}
6185  out:
6186  	kfree(sas_iounit_pg0);
6187  	return port_count;
6188  }
6189  
6190  enum hba_port_matched_codes {
6191  	NOT_MATCHED = 0,
6192  	MATCHED_WITH_ADDR_AND_PHYMASK,
6193  	MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT,
6194  	MATCHED_WITH_ADDR_AND_SUBPHYMASK,
6195  	MATCHED_WITH_ADDR,
6196  };
6197  
6198  /**
6199   * _scsih_look_and_get_matched_port_entry - Get matched hba port entry
6200   *					from HBA port table
6201   * @ioc: per adapter object
6202   * @port_entry: hba port entry from temporary port table which needs to be
6203   *		searched for matched entry in the HBA port table
6204   * @matched_port_entry: save matched hba port entry here
6205   * @count: count of matched entries
6206   *
6207   * return type of matched entry found.
6208   */
6209  static enum hba_port_matched_codes
_scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_entry,struct hba_port ** matched_port_entry,int * count)6210  _scsih_look_and_get_matched_port_entry(struct MPT3SAS_ADAPTER *ioc,
6211  	struct hba_port *port_entry,
6212  	struct hba_port **matched_port_entry, int *count)
6213  {
6214  	struct hba_port *port_table_entry, *matched_port = NULL;
6215  	enum hba_port_matched_codes matched_code = NOT_MATCHED;
6216  	int lcount = 0;
6217  	*matched_port_entry = NULL;
6218  
6219  	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6220  		if (!(port_table_entry->flags & HBA_PORT_FLAG_DIRTY_PORT))
6221  			continue;
6222  
6223  		if ((port_table_entry->sas_address == port_entry->sas_address)
6224  		    && (port_table_entry->phy_mask == port_entry->phy_mask)) {
6225  			matched_code = MATCHED_WITH_ADDR_AND_PHYMASK;
6226  			matched_port = port_table_entry;
6227  			break;
6228  		}
6229  
6230  		if ((port_table_entry->sas_address == port_entry->sas_address)
6231  		    && (port_table_entry->phy_mask & port_entry->phy_mask)
6232  		    && (port_table_entry->port_id == port_entry->port_id)) {
6233  			matched_code = MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT;
6234  			matched_port = port_table_entry;
6235  			continue;
6236  		}
6237  
6238  		if ((port_table_entry->sas_address == port_entry->sas_address)
6239  		    && (port_table_entry->phy_mask & port_entry->phy_mask)) {
6240  			if (matched_code ==
6241  			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6242  				continue;
6243  			matched_code = MATCHED_WITH_ADDR_AND_SUBPHYMASK;
6244  			matched_port = port_table_entry;
6245  			continue;
6246  		}
6247  
6248  		if (port_table_entry->sas_address == port_entry->sas_address) {
6249  			if (matched_code ==
6250  			    MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT)
6251  				continue;
6252  			if (matched_code == MATCHED_WITH_ADDR_AND_SUBPHYMASK)
6253  				continue;
6254  			matched_code = MATCHED_WITH_ADDR;
6255  			matched_port = port_table_entry;
6256  			lcount++;
6257  		}
6258  	}
6259  
6260  	*matched_port_entry = matched_port;
6261  	if (matched_code ==  MATCHED_WITH_ADDR)
6262  		*count = lcount;
6263  	return matched_code;
6264  }
6265  
6266  /**
6267   * _scsih_del_phy_part_of_anther_port - remove phy if it
6268   *				is a part of anther port
6269   *@ioc: per adapter object
6270   *@port_table: port table after reset
6271   *@index: hba port entry index
6272   *@port_count: number of ports available after host reset
6273   *@offset: HBA phy bit offset
6274   *
6275   */
6276  static void
_scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * port_table,int index,u8 port_count,int offset)6277  _scsih_del_phy_part_of_anther_port(struct MPT3SAS_ADAPTER *ioc,
6278  	struct hba_port *port_table,
6279  	int index, u8 port_count, int offset)
6280  {
6281  	struct _sas_node *sas_node = &ioc->sas_hba;
6282  	u32 i, found = 0;
6283  
6284  	for (i = 0; i < port_count; i++) {
6285  		if (i == index)
6286  			continue;
6287  
6288  		if (port_table[i].phy_mask & (1 << offset)) {
6289  			mpt3sas_transport_del_phy_from_an_existing_port(
6290  			    ioc, sas_node, &sas_node->phy[offset]);
6291  			found = 1;
6292  			break;
6293  		}
6294  	}
6295  	if (!found)
6296  		port_table[index].phy_mask |= (1 << offset);
6297  }
6298  
6299  /**
6300   * _scsih_add_or_del_phys_from_existing_port - add/remove phy to/from
6301   *						right port
6302   *@ioc: per adapter object
6303   *@hba_port_entry: hba port table entry
6304   *@port_table: temporary port table
6305   *@index: hba port entry index
6306   *@port_count: number of ports available after host reset
6307   *
6308   */
6309  static void
_scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER * ioc,struct hba_port * hba_port_entry,struct hba_port * port_table,int index,int port_count)6310  _scsih_add_or_del_phys_from_existing_port(struct MPT3SAS_ADAPTER *ioc,
6311  	struct hba_port *hba_port_entry, struct hba_port *port_table,
6312  	int index, int port_count)
6313  {
6314  	u32 phy_mask, offset = 0;
6315  	struct _sas_node *sas_node = &ioc->sas_hba;
6316  
6317  	phy_mask = hba_port_entry->phy_mask ^ port_table[index].phy_mask;
6318  
6319  	for (offset = 0; offset < ioc->sas_hba.num_phys; offset++) {
6320  		if (phy_mask & (1 << offset)) {
6321  			if (!(port_table[index].phy_mask & (1 << offset))) {
6322  				_scsih_del_phy_part_of_anther_port(
6323  				    ioc, port_table, index, port_count,
6324  				    offset);
6325  				continue;
6326  			}
6327  			if (sas_node->phy[offset].phy_belongs_to_port)
6328  				mpt3sas_transport_del_phy_from_an_existing_port(
6329  				    ioc, sas_node, &sas_node->phy[offset]);
6330  			mpt3sas_transport_add_phy_to_an_existing_port(
6331  			    ioc, sas_node, &sas_node->phy[offset],
6332  			    hba_port_entry->sas_address,
6333  			    hba_port_entry);
6334  		}
6335  	}
6336  }
6337  
6338  /**
6339   * _scsih_del_dirty_vphy - delete virtual_phy objects marked as dirty.
6340   * @ioc: per adapter object
6341   *
6342   * Returns nothing.
6343   */
6344  static void
_scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER * ioc)6345  _scsih_del_dirty_vphy(struct MPT3SAS_ADAPTER *ioc)
6346  {
6347  	struct hba_port *port, *port_next;
6348  	struct virtual_phy *vphy, *vphy_next;
6349  
6350  	list_for_each_entry_safe(port, port_next,
6351  	    &ioc->port_table_list, list) {
6352  		if (!port->vphys_mask)
6353  			continue;
6354  		list_for_each_entry_safe(vphy, vphy_next,
6355  		    &port->vphys_list, list) {
6356  			if (vphy->flags & MPT_VPHY_FLAG_DIRTY_PHY) {
6357  				drsprintk(ioc, ioc_info(ioc,
6358  				    "Deleting vphy %p entry from port id: %d\t, Phy_mask 0x%08x\n",
6359  				    vphy, port->port_id,
6360  				    vphy->phy_mask));
6361  				port->vphys_mask &= ~vphy->phy_mask;
6362  				list_del(&vphy->list);
6363  				kfree(vphy);
6364  			}
6365  		}
6366  		if (!port->vphys_mask && !port->sas_address)
6367  			port->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6368  	}
6369  }
6370  
6371  /**
6372   * _scsih_del_dirty_port_entries - delete dirty port entries from port list
6373   *					after host reset
6374   *@ioc: per adapter object
6375   *
6376   */
6377  static void
_scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER * ioc)6378  _scsih_del_dirty_port_entries(struct MPT3SAS_ADAPTER *ioc)
6379  {
6380  	struct hba_port *port, *port_next;
6381  
6382  	list_for_each_entry_safe(port, port_next,
6383  	    &ioc->port_table_list, list) {
6384  		if (!(port->flags & HBA_PORT_FLAG_DIRTY_PORT) ||
6385  		    port->flags & HBA_PORT_FLAG_NEW_PORT)
6386  			continue;
6387  
6388  		drsprintk(ioc, ioc_info(ioc,
6389  		    "Deleting port table entry %p having Port: %d\t Phy_mask 0x%08x\n",
6390  		    port, port->port_id, port->phy_mask));
6391  		list_del(&port->list);
6392  		kfree(port);
6393  	}
6394  }
6395  
6396  /**
6397   * _scsih_sas_port_refresh - Update HBA port table after host reset
6398   * @ioc: per adapter object
6399   */
6400  static void
_scsih_sas_port_refresh(struct MPT3SAS_ADAPTER * ioc)6401  _scsih_sas_port_refresh(struct MPT3SAS_ADAPTER *ioc)
6402  {
6403  	u32 port_count = 0;
6404  	struct hba_port *port_table;
6405  	struct hba_port *port_table_entry;
6406  	struct hba_port *port_entry = NULL;
6407  	int i, j, count = 0, lcount = 0;
6408  	int ret;
6409  	u64 sas_addr;
6410  	u8 num_phys;
6411  
6412  	drsprintk(ioc, ioc_info(ioc,
6413  	    "updating ports for sas_host(0x%016llx)\n",
6414  	    (unsigned long long)ioc->sas_hba.sas_address));
6415  
6416  	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6417  	if (!num_phys) {
6418  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6419  		    __FILE__, __LINE__, __func__);
6420  		return;
6421  	}
6422  
6423  	if (num_phys > ioc->sas_hba.nr_phys_allocated) {
6424  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6425  		   __FILE__, __LINE__, __func__);
6426  		return;
6427  	}
6428  	ioc->sas_hba.num_phys = num_phys;
6429  
6430  	port_table = kcalloc(ioc->sas_hba.num_phys,
6431  	    sizeof(struct hba_port), GFP_KERNEL);
6432  	if (!port_table)
6433  		return;
6434  
6435  	port_count = _scsih_get_port_table_after_reset(ioc, port_table);
6436  	if (!port_count)
6437  		return;
6438  
6439  	drsprintk(ioc, ioc_info(ioc, "New Port table\n"));
6440  	for (j = 0; j < port_count; j++)
6441  		drsprintk(ioc, ioc_info(ioc,
6442  		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6443  		    port_table[j].port_id,
6444  		    port_table[j].phy_mask, port_table[j].sas_address));
6445  
6446  	list_for_each_entry(port_table_entry, &ioc->port_table_list, list)
6447  		port_table_entry->flags |= HBA_PORT_FLAG_DIRTY_PORT;
6448  
6449  	drsprintk(ioc, ioc_info(ioc, "Old Port table\n"));
6450  	port_table_entry = NULL;
6451  	list_for_each_entry(port_table_entry, &ioc->port_table_list, list) {
6452  		drsprintk(ioc, ioc_info(ioc,
6453  		    "Port: %d\t Phy_mask 0x%08x\t sas_addr(0x%016llx)\n",
6454  		    port_table_entry->port_id,
6455  		    port_table_entry->phy_mask,
6456  		    port_table_entry->sas_address));
6457  	}
6458  
6459  	for (j = 0; j < port_count; j++) {
6460  		ret = _scsih_look_and_get_matched_port_entry(ioc,
6461  		    &port_table[j], &port_entry, &count);
6462  		if (!port_entry) {
6463  			drsprintk(ioc, ioc_info(ioc,
6464  			    "No Matched entry for sas_addr(0x%16llx), Port:%d\n",
6465  			    port_table[j].sas_address,
6466  			    port_table[j].port_id));
6467  			continue;
6468  		}
6469  
6470  		switch (ret) {
6471  		case MATCHED_WITH_ADDR_SUBPHYMASK_AND_PORT:
6472  		case MATCHED_WITH_ADDR_AND_SUBPHYMASK:
6473  			_scsih_add_or_del_phys_from_existing_port(ioc,
6474  			    port_entry, port_table, j, port_count);
6475  			break;
6476  		case MATCHED_WITH_ADDR:
6477  			sas_addr = port_table[j].sas_address;
6478  			for (i = 0; i < port_count; i++) {
6479  				if (port_table[i].sas_address == sas_addr)
6480  					lcount++;
6481  			}
6482  
6483  			if (count > 1 || lcount > 1)
6484  				port_entry = NULL;
6485  			else
6486  				_scsih_add_or_del_phys_from_existing_port(ioc,
6487  				    port_entry, port_table, j, port_count);
6488  		}
6489  
6490  		if (!port_entry)
6491  			continue;
6492  
6493  		if (port_entry->port_id != port_table[j].port_id)
6494  			port_entry->port_id = port_table[j].port_id;
6495  		port_entry->flags &= ~HBA_PORT_FLAG_DIRTY_PORT;
6496  		port_entry->phy_mask = port_table[j].phy_mask;
6497  	}
6498  
6499  	port_table_entry = NULL;
6500  }
6501  
6502  /**
6503   * _scsih_alloc_vphy - allocate virtual_phy object
6504   * @ioc: per adapter object
6505   * @port_id: Port ID number
6506   * @phy_num: HBA Phy number
6507   *
6508   * Returns allocated virtual_phy object.
6509   */
6510  static struct virtual_phy *
_scsih_alloc_vphy(struct MPT3SAS_ADAPTER * ioc,u8 port_id,u8 phy_num)6511  _scsih_alloc_vphy(struct MPT3SAS_ADAPTER *ioc, u8 port_id, u8 phy_num)
6512  {
6513  	struct virtual_phy *vphy;
6514  	struct hba_port *port;
6515  
6516  	port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6517  	if (!port)
6518  		return NULL;
6519  
6520  	vphy = mpt3sas_get_vphy_by_phy(ioc, port, phy_num);
6521  	if (!vphy) {
6522  		vphy = kzalloc(sizeof(struct virtual_phy), GFP_KERNEL);
6523  		if (!vphy)
6524  			return NULL;
6525  
6526  		if (!port->vphys_mask)
6527  			INIT_LIST_HEAD(&port->vphys_list);
6528  
6529  		/*
6530  		 * Enable bit corresponding to HBA phy number on its
6531  		 * parent hba_port object's vphys_mask field.
6532  		 */
6533  		port->vphys_mask |= (1 << phy_num);
6534  		vphy->phy_mask |= (1 << phy_num);
6535  
6536  		list_add_tail(&vphy->list, &port->vphys_list);
6537  
6538  		ioc_info(ioc,
6539  		    "vphy entry: %p, port id: %d, phy:%d is added to port's vphys_list\n",
6540  		    vphy, port->port_id, phy_num);
6541  	}
6542  	return vphy;
6543  }
6544  
6545  /**
6546   * _scsih_sas_host_refresh - refreshing sas host object contents
6547   * @ioc: per adapter object
6548   * Context: user
6549   *
6550   * During port enable, fw will send topology events for every device. Its
6551   * possible that the handles may change from the previous setting, so this
6552   * code keeping handles updating if changed.
6553   */
6554  static void
_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER * ioc)6555  _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
6556  {
6557  	u16 sz;
6558  	u16 ioc_status;
6559  	int i;
6560  	Mpi2ConfigReply_t mpi_reply;
6561  	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6562  	u16 attached_handle;
6563  	u8 link_rate, port_id;
6564  	struct hba_port *port;
6565  	Mpi2SasPhyPage0_t phy_pg0;
6566  
6567  	dtmprintk(ioc,
6568  		  ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
6569  			   (u64)ioc->sas_hba.sas_address));
6570  
6571  	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6572  	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6573  	if (!sas_iounit_pg0) {
6574  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6575  			__FILE__, __LINE__, __func__);
6576  		return;
6577  	}
6578  
6579  	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6580  	    sas_iounit_pg0, sz)) != 0)
6581  		goto out;
6582  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6583  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6584  		goto out;
6585  	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6586  		link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
6587  		if (i == 0)
6588  			ioc->sas_hba.handle = le16_to_cpu(
6589  			    sas_iounit_pg0->PhyData[0].ControllerDevHandle);
6590  		port_id = sas_iounit_pg0->PhyData[i].Port;
6591  		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6592  			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6593  			if (!port)
6594  				goto out;
6595  
6596  			port->port_id = port_id;
6597  			ioc_info(ioc,
6598  			    "hba_port entry: %p, port: %d is added to hba_port list\n",
6599  			    port, port->port_id);
6600  			if (ioc->shost_recovery)
6601  				port->flags = HBA_PORT_FLAG_NEW_PORT;
6602  			list_add_tail(&port->list, &ioc->port_table_list);
6603  		}
6604  		/*
6605  		 * Check whether current Phy belongs to HBA vSES device or not.
6606  		 */
6607  		if (le32_to_cpu(sas_iounit_pg0->PhyData[i].ControllerPhyDeviceInfo) &
6608  		    MPI2_SAS_DEVICE_INFO_SEP &&
6609  		    (link_rate >=  MPI2_SAS_NEG_LINK_RATE_1_5)) {
6610  			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6611  			    &phy_pg0, i))) {
6612  				ioc_err(ioc,
6613  				    "failure at %s:%d/%s()!\n",
6614  				     __FILE__, __LINE__, __func__);
6615  				goto out;
6616  			}
6617  			if (!(le32_to_cpu(phy_pg0.PhyInfo) &
6618  			    MPI2_SAS_PHYINFO_VIRTUAL_PHY))
6619  				continue;
6620  			/*
6621  			 * Allocate a virtual_phy object for vSES device, if
6622  			 * this vSES device is hot added.
6623  			 */
6624  			if (!_scsih_alloc_vphy(ioc, port_id, i))
6625  				goto out;
6626  			ioc->sas_hba.phy[i].hba_vphy = 1;
6627  		}
6628  
6629  		/*
6630  		 * Add new HBA phys to STL if these new phys got added as part
6631  		 * of HBA Firmware upgrade/downgrade operation.
6632  		 */
6633  		if (!ioc->sas_hba.phy[i].phy) {
6634  			if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply,
6635  							&phy_pg0, i))) {
6636  				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6637  					__FILE__, __LINE__, __func__);
6638  				continue;
6639  			}
6640  			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6641  				MPI2_IOCSTATUS_MASK;
6642  			if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6643  				ioc_err(ioc, "failure at %s:%d/%s()!\n",
6644  					__FILE__, __LINE__, __func__);
6645  				continue;
6646  			}
6647  			ioc->sas_hba.phy[i].phy_id = i;
6648  			mpt3sas_transport_add_host_phy(ioc,
6649  				&ioc->sas_hba.phy[i], phy_pg0,
6650  				ioc->sas_hba.parent_dev);
6651  			continue;
6652  		}
6653  		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6654  		attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
6655  		    AttachedDevHandle);
6656  		if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6657  			link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
6658  		ioc->sas_hba.phy[i].port =
6659  		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6660  		mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
6661  		    attached_handle, i, link_rate,
6662  		    ioc->sas_hba.phy[i].port);
6663  	}
6664  	/*
6665  	 * Clear the phy details if this phy got disabled as part of
6666  	 * HBA Firmware upgrade/downgrade operation.
6667  	 */
6668  	for (i = ioc->sas_hba.num_phys;
6669  	     i < ioc->sas_hba.nr_phys_allocated; i++) {
6670  		if (ioc->sas_hba.phy[i].phy &&
6671  		    ioc->sas_hba.phy[i].phy->negotiated_linkrate >=
6672  		    SAS_LINK_RATE_1_5_GBPS)
6673  			mpt3sas_transport_update_links(ioc,
6674  				ioc->sas_hba.sas_address, 0, i,
6675  				MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED, NULL);
6676  	}
6677   out:
6678  	kfree(sas_iounit_pg0);
6679  }
6680  
6681  /**
6682   * _scsih_sas_host_add - create sas host object
6683   * @ioc: per adapter object
6684   *
6685   * Creating host side data object, stored in ioc->sas_hba
6686   */
6687  static void
_scsih_sas_host_add(struct MPT3SAS_ADAPTER * ioc)6688  _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
6689  {
6690  	int i;
6691  	Mpi2ConfigReply_t mpi_reply;
6692  	Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
6693  	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
6694  	Mpi2SasPhyPage0_t phy_pg0;
6695  	Mpi2SasDevicePage0_t sas_device_pg0;
6696  	Mpi2SasEnclosurePage0_t enclosure_pg0;
6697  	u16 ioc_status;
6698  	u16 sz;
6699  	u8 device_missing_delay;
6700  	u8 num_phys, port_id;
6701  	struct hba_port *port;
6702  
6703  	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
6704  	if (!num_phys) {
6705  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6706  			__FILE__, __LINE__, __func__);
6707  		return;
6708  	}
6709  
6710  	ioc->sas_hba.nr_phys_allocated = max_t(u8,
6711  	    MPT_MAX_HBA_NUM_PHYS, num_phys);
6712  	ioc->sas_hba.phy = kcalloc(ioc->sas_hba.nr_phys_allocated,
6713  	    sizeof(struct _sas_phy), GFP_KERNEL);
6714  	if (!ioc->sas_hba.phy) {
6715  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6716  			__FILE__, __LINE__, __func__);
6717  		goto out;
6718  	}
6719  	ioc->sas_hba.num_phys = num_phys;
6720  
6721  	/* sas_iounit page 0 */
6722  	sz = struct_size(sas_iounit_pg0, PhyData, ioc->sas_hba.num_phys);
6723  	sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
6724  	if (!sas_iounit_pg0) {
6725  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6726  			__FILE__, __LINE__, __func__);
6727  		return;
6728  	}
6729  	if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
6730  	    sas_iounit_pg0, sz))) {
6731  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6732  			__FILE__, __LINE__, __func__);
6733  		goto out;
6734  	}
6735  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6736  	    MPI2_IOCSTATUS_MASK;
6737  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6738  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6739  			__FILE__, __LINE__, __func__);
6740  		goto out;
6741  	}
6742  
6743  	/* sas_iounit page 1 */
6744  	sz = struct_size(sas_iounit_pg1, PhyData, ioc->sas_hba.num_phys);
6745  	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
6746  	if (!sas_iounit_pg1) {
6747  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6748  			__FILE__, __LINE__, __func__);
6749  		goto out;
6750  	}
6751  	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
6752  	    sas_iounit_pg1, sz))) {
6753  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6754  			__FILE__, __LINE__, __func__);
6755  		goto out;
6756  	}
6757  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6758  	    MPI2_IOCSTATUS_MASK;
6759  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6760  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6761  			__FILE__, __LINE__, __func__);
6762  		goto out;
6763  	}
6764  
6765  	ioc->io_missing_delay =
6766  	    sas_iounit_pg1->IODeviceMissingDelay;
6767  	device_missing_delay =
6768  	    sas_iounit_pg1->ReportDeviceMissingDelay;
6769  	if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
6770  		ioc->device_missing_delay = (device_missing_delay &
6771  		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
6772  	else
6773  		ioc->device_missing_delay = device_missing_delay &
6774  		    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
6775  
6776  	ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
6777  	for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
6778  		if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
6779  		    i))) {
6780  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6781  				__FILE__, __LINE__, __func__);
6782  			goto out;
6783  		}
6784  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6785  		    MPI2_IOCSTATUS_MASK;
6786  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6787  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6788  				__FILE__, __LINE__, __func__);
6789  			goto out;
6790  		}
6791  
6792  		if (i == 0)
6793  			ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
6794  			    PhyData[0].ControllerDevHandle);
6795  
6796  		port_id = sas_iounit_pg0->PhyData[i].Port;
6797  		if (!(mpt3sas_get_port_by_id(ioc, port_id, 0))) {
6798  			port = kzalloc(sizeof(struct hba_port), GFP_KERNEL);
6799  			if (!port)
6800  				goto out;
6801  
6802  			port->port_id = port_id;
6803  			ioc_info(ioc,
6804  			   "hba_port entry: %p, port: %d is added to hba_port list\n",
6805  			   port, port->port_id);
6806  			list_add_tail(&port->list,
6807  			    &ioc->port_table_list);
6808  		}
6809  
6810  		/*
6811  		 * Check whether current Phy belongs to HBA vSES device or not.
6812  		 */
6813  		if ((le32_to_cpu(phy_pg0.PhyInfo) &
6814  		    MPI2_SAS_PHYINFO_VIRTUAL_PHY) &&
6815  		    (phy_pg0.NegotiatedLinkRate >> 4) >=
6816  		    MPI2_SAS_NEG_LINK_RATE_1_5) {
6817  			/*
6818  			 * Allocate a virtual_phy object for vSES device.
6819  			 */
6820  			if (!_scsih_alloc_vphy(ioc, port_id, i))
6821  				goto out;
6822  			ioc->sas_hba.phy[i].hba_vphy = 1;
6823  		}
6824  
6825  		ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
6826  		ioc->sas_hba.phy[i].phy_id = i;
6827  		ioc->sas_hba.phy[i].port =
6828  		    mpt3sas_get_port_by_id(ioc, port_id, 0);
6829  		mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
6830  		    phy_pg0, ioc->sas_hba.parent_dev);
6831  	}
6832  	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6833  	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
6834  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6835  			__FILE__, __LINE__, __func__);
6836  		goto out;
6837  	}
6838  	ioc->sas_hba.enclosure_handle =
6839  	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
6840  	ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6841  	ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6842  		 ioc->sas_hba.handle,
6843  		 (u64)ioc->sas_hba.sas_address,
6844  		 ioc->sas_hba.num_phys);
6845  
6846  	if (ioc->sas_hba.enclosure_handle) {
6847  		if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
6848  		    &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
6849  		   ioc->sas_hba.enclosure_handle)))
6850  			ioc->sas_hba.enclosure_logical_id =
6851  			    le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
6852  	}
6853  
6854   out:
6855  	kfree(sas_iounit_pg1);
6856  	kfree(sas_iounit_pg0);
6857  }
6858  
6859  /**
6860   * _scsih_expander_add -  creating expander object
6861   * @ioc: per adapter object
6862   * @handle: expander handle
6863   *
6864   * Creating expander object, stored in ioc->sas_expander_list.
6865   *
6866   * Return: 0 for success, else error.
6867   */
6868  static int
_scsih_expander_add(struct MPT3SAS_ADAPTER * ioc,u16 handle)6869  _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6870  {
6871  	struct _sas_node *sas_expander;
6872  	struct _enclosure_node *enclosure_dev;
6873  	Mpi2ConfigReply_t mpi_reply;
6874  	Mpi2ExpanderPage0_t expander_pg0;
6875  	Mpi2ExpanderPage1_t expander_pg1;
6876  	u32 ioc_status;
6877  	u16 parent_handle;
6878  	u64 sas_address, sas_address_parent = 0;
6879  	int i;
6880  	unsigned long flags;
6881  	struct _sas_port *mpt3sas_port = NULL;
6882  	u8 port_id;
6883  
6884  	int rc = 0;
6885  
6886  	if (!handle)
6887  		return -1;
6888  
6889  	if (ioc->shost_recovery || ioc->pci_error_recovery)
6890  		return -1;
6891  
6892  	if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
6893  	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
6894  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6895  			__FILE__, __LINE__, __func__);
6896  		return -1;
6897  	}
6898  
6899  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6900  	    MPI2_IOCSTATUS_MASK;
6901  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6902  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6903  			__FILE__, __LINE__, __func__);
6904  		return -1;
6905  	}
6906  
6907  	/* handle out of order topology events */
6908  	parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
6909  	if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
6910  	    != 0) {
6911  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6912  			__FILE__, __LINE__, __func__);
6913  		return -1;
6914  	}
6915  
6916  	port_id = expander_pg0.PhysicalPort;
6917  	if (sas_address_parent != ioc->sas_hba.sas_address) {
6918  		spin_lock_irqsave(&ioc->sas_node_lock, flags);
6919  		sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6920  		    sas_address_parent,
6921  		    mpt3sas_get_port_by_id(ioc, port_id, 0));
6922  		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6923  		if (!sas_expander) {
6924  			rc = _scsih_expander_add(ioc, parent_handle);
6925  			if (rc != 0)
6926  				return rc;
6927  		}
6928  	}
6929  
6930  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
6931  	sas_address = le64_to_cpu(expander_pg0.SASAddress);
6932  	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
6933  	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
6934  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6935  
6936  	if (sas_expander)
6937  		return 0;
6938  
6939  	sas_expander = kzalloc(sizeof(struct _sas_node),
6940  	    GFP_KERNEL);
6941  	if (!sas_expander) {
6942  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6943  			__FILE__, __LINE__, __func__);
6944  		return -1;
6945  	}
6946  
6947  	sas_expander->handle = handle;
6948  	sas_expander->num_phys = expander_pg0.NumPhys;
6949  	sas_expander->sas_address_parent = sas_address_parent;
6950  	sas_expander->sas_address = sas_address;
6951  	sas_expander->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
6952  	if (!sas_expander->port) {
6953  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6954  		    __FILE__, __LINE__, __func__);
6955  		rc = -1;
6956  		goto out_fail;
6957  	}
6958  
6959  	ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
6960  		 handle, parent_handle,
6961  		 (u64)sas_expander->sas_address, sas_expander->num_phys);
6962  
6963  	if (!sas_expander->num_phys) {
6964  		rc = -1;
6965  		goto out_fail;
6966  	}
6967  	sas_expander->phy = kcalloc(sas_expander->num_phys,
6968  	    sizeof(struct _sas_phy), GFP_KERNEL);
6969  	if (!sas_expander->phy) {
6970  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6971  			__FILE__, __LINE__, __func__);
6972  		rc = -1;
6973  		goto out_fail;
6974  	}
6975  
6976  	INIT_LIST_HEAD(&sas_expander->sas_port_list);
6977  	mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
6978  	    sas_address_parent, sas_expander->port);
6979  	if (!mpt3sas_port) {
6980  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
6981  			__FILE__, __LINE__, __func__);
6982  		rc = -1;
6983  		goto out_fail;
6984  	}
6985  	sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
6986  	sas_expander->rphy = mpt3sas_port->rphy;
6987  
6988  	for (i = 0 ; i < sas_expander->num_phys ; i++) {
6989  		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
6990  		    &expander_pg1, i, handle))) {
6991  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
6992  				__FILE__, __LINE__, __func__);
6993  			rc = -1;
6994  			goto out_fail;
6995  		}
6996  		sas_expander->phy[i].handle = handle;
6997  		sas_expander->phy[i].phy_id = i;
6998  		sas_expander->phy[i].port =
6999  		    mpt3sas_get_port_by_id(ioc, port_id, 0);
7000  
7001  		if ((mpt3sas_transport_add_expander_phy(ioc,
7002  		    &sas_expander->phy[i], expander_pg1,
7003  		    sas_expander->parent_dev))) {
7004  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
7005  				__FILE__, __LINE__, __func__);
7006  			rc = -1;
7007  			goto out_fail;
7008  		}
7009  	}
7010  
7011  	if (sas_expander->enclosure_handle) {
7012  		enclosure_dev =
7013  			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7014  						sas_expander->enclosure_handle);
7015  		if (enclosure_dev)
7016  			sas_expander->enclosure_logical_id =
7017  			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7018  	}
7019  
7020  	_scsih_expander_node_add(ioc, sas_expander);
7021  	return 0;
7022  
7023   out_fail:
7024  
7025  	if (mpt3sas_port)
7026  		mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
7027  		    sas_address_parent, sas_expander->port);
7028  	kfree(sas_expander);
7029  	return rc;
7030  }
7031  
7032  /**
7033   * mpt3sas_expander_remove - removing expander object
7034   * @ioc: per adapter object
7035   * @sas_address: expander sas_address
7036   * @port: hba port entry
7037   */
7038  void
mpt3sas_expander_remove(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,struct hba_port * port)7039  mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7040  	struct hba_port *port)
7041  {
7042  	struct _sas_node *sas_expander;
7043  	unsigned long flags;
7044  
7045  	if (ioc->shost_recovery)
7046  		return;
7047  
7048  	if (!port)
7049  		return;
7050  
7051  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7052  	sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
7053  	    sas_address, port);
7054  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7055  	if (sas_expander)
7056  		_scsih_expander_node_remove(ioc, sas_expander);
7057  }
7058  
7059  /**
7060   * _scsih_done -  internal SCSI_IO callback handler.
7061   * @ioc: per adapter object
7062   * @smid: system request message index
7063   * @msix_index: MSIX table index supplied by the OS
7064   * @reply: reply message frame(lower 32bit addr)
7065   *
7066   * Callback handler when sending internal generated SCSI_IO.
7067   * The callback index passed is `ioc->scsih_cb_idx`
7068   *
7069   * Return: 1 meaning mf should be freed from _base_interrupt
7070   *         0 means the mf is freed from this function.
7071   */
7072  static u8
_scsih_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)7073  _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
7074  {
7075  	MPI2DefaultReply_t *mpi_reply;
7076  
7077  	mpi_reply =  mpt3sas_base_get_reply_virt_addr(ioc, reply);
7078  	if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
7079  		return 1;
7080  	if (ioc->scsih_cmds.smid != smid)
7081  		return 1;
7082  	ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
7083  	if (mpi_reply) {
7084  		memcpy(ioc->scsih_cmds.reply, mpi_reply,
7085  		    mpi_reply->MsgLength*4);
7086  		ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
7087  	}
7088  	ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
7089  	complete(&ioc->scsih_cmds.done);
7090  	return 1;
7091  }
7092  
7093  
7094  
7095  
7096  #define MPT3_MAX_LUNS (255)
7097  
7098  
7099  /**
7100   * _scsih_check_access_status - check access flags
7101   * @ioc: per adapter object
7102   * @sas_address: sas address
7103   * @handle: sas device handle
7104   * @access_status: errors returned during discovery of the device
7105   *
7106   * Return: 0 for success, else failure
7107   */
7108  static u8
_scsih_check_access_status(struct MPT3SAS_ADAPTER * ioc,u64 sas_address,u16 handle,u8 access_status)7109  _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
7110  	u16 handle, u8 access_status)
7111  {
7112  	u8 rc = 1;
7113  	char *desc = NULL;
7114  
7115  	switch (access_status) {
7116  	case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
7117  	case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
7118  		rc = 0;
7119  		break;
7120  	case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
7121  		desc = "sata capability failed";
7122  		break;
7123  	case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
7124  		desc = "sata affiliation conflict";
7125  		break;
7126  	case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
7127  		desc = "route not addressable";
7128  		break;
7129  	case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
7130  		desc = "smp error not addressable";
7131  		break;
7132  	case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
7133  		desc = "device blocked";
7134  		break;
7135  	case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
7136  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
7137  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
7138  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
7139  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
7140  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
7141  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
7142  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
7143  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
7144  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
7145  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
7146  	case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
7147  		desc = "sata initialization failed";
7148  		break;
7149  	default:
7150  		desc = "unknown";
7151  		break;
7152  	}
7153  
7154  	if (!rc)
7155  		return 0;
7156  
7157  	ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
7158  		desc, (u64)sas_address, handle);
7159  	return rc;
7160  }
7161  
7162  /**
7163   * _scsih_check_device - checking device responsiveness
7164   * @ioc: per adapter object
7165   * @parent_sas_address: sas address of parent expander or sas host
7166   * @handle: attached device handle
7167   * @phy_number: phy number
7168   * @link_rate: new link rate
7169   */
7170  static void
_scsih_check_device(struct MPT3SAS_ADAPTER * ioc,u64 parent_sas_address,u16 handle,u8 phy_number,u8 link_rate)7171  _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
7172  	u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
7173  {
7174  	Mpi2ConfigReply_t mpi_reply;
7175  	Mpi2SasDevicePage0_t sas_device_pg0;
7176  	struct _sas_device *sas_device = NULL;
7177  	struct _enclosure_node *enclosure_dev = NULL;
7178  	u32 ioc_status;
7179  	unsigned long flags;
7180  	u64 sas_address;
7181  	struct scsi_target *starget;
7182  	struct MPT3SAS_TARGET *sas_target_priv_data;
7183  	u32 device_info;
7184  	struct hba_port *port;
7185  
7186  	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7187  	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
7188  		return;
7189  
7190  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7191  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7192  		return;
7193  
7194  	/* wide port handling ~ we need only handle device once for the phy that
7195  	 * is matched in sas device page zero
7196  	 */
7197  	if (phy_number != sas_device_pg0.PhyNum)
7198  		return;
7199  
7200  	/* check if this is end device */
7201  	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7202  	if (!(_scsih_is_end_device(device_info)))
7203  		return;
7204  
7205  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7206  	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7207  	port = mpt3sas_get_port_by_id(ioc, sas_device_pg0.PhysicalPort, 0);
7208  	if (!port)
7209  		goto out_unlock;
7210  	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7211  	    sas_address, port);
7212  
7213  	if (!sas_device)
7214  		goto out_unlock;
7215  
7216  	if (unlikely(sas_device->handle != handle)) {
7217  		starget = sas_device->starget;
7218  		sas_target_priv_data = starget->hostdata;
7219  		starget_printk(KERN_INFO, starget,
7220  			"handle changed from(0x%04x) to (0x%04x)!!!\n",
7221  			sas_device->handle, handle);
7222  		sas_target_priv_data->handle = handle;
7223  		sas_device->handle = handle;
7224  		if (le16_to_cpu(sas_device_pg0.Flags) &
7225  		     MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7226  			sas_device->enclosure_level =
7227  				sas_device_pg0.EnclosureLevel;
7228  			memcpy(sas_device->connector_name,
7229  				sas_device_pg0.ConnectorName, 4);
7230  			sas_device->connector_name[4] = '\0';
7231  		} else {
7232  			sas_device->enclosure_level = 0;
7233  			sas_device->connector_name[0] = '\0';
7234  		}
7235  
7236  		sas_device->enclosure_handle =
7237  				le16_to_cpu(sas_device_pg0.EnclosureHandle);
7238  		sas_device->is_chassis_slot_valid = 0;
7239  		enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
7240  						sas_device->enclosure_handle);
7241  		if (enclosure_dev) {
7242  			sas_device->enclosure_logical_id =
7243  			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7244  			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7245  			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7246  				sas_device->is_chassis_slot_valid = 1;
7247  				sas_device->chassis_slot =
7248  					enclosure_dev->pg0.ChassisSlot;
7249  			}
7250  		}
7251  	}
7252  
7253  	/* check if device is present */
7254  	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7255  	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7256  		ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
7257  			handle);
7258  		goto out_unlock;
7259  	}
7260  
7261  	/* check if there were any issues with discovery */
7262  	if (_scsih_check_access_status(ioc, sas_address, handle,
7263  	    sas_device_pg0.AccessStatus))
7264  		goto out_unlock;
7265  
7266  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7267  	_scsih_ublock_io_device(ioc, sas_address, port);
7268  
7269  	if (sas_device)
7270  		sas_device_put(sas_device);
7271  	return;
7272  
7273  out_unlock:
7274  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7275  	if (sas_device)
7276  		sas_device_put(sas_device);
7277  }
7278  
7279  /**
7280   * _scsih_add_device -  creating sas device object
7281   * @ioc: per adapter object
7282   * @handle: sas device handle
7283   * @phy_num: phy number end device attached to
7284   * @is_pd: is this hidden raid component
7285   *
7286   * Creating end device object, stored in ioc->sas_device_list.
7287   *
7288   * Return: 0 for success, non-zero for failure.
7289   */
7290  static int
_scsih_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phy_num,u8 is_pd)7291  _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
7292  	u8 is_pd)
7293  {
7294  	Mpi2ConfigReply_t mpi_reply;
7295  	Mpi2SasDevicePage0_t sas_device_pg0;
7296  	struct _sas_device *sas_device;
7297  	struct _enclosure_node *enclosure_dev = NULL;
7298  	u32 ioc_status;
7299  	u64 sas_address;
7300  	u32 device_info;
7301  	u8 port_id;
7302  
7303  	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7304  	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7305  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7306  			__FILE__, __LINE__, __func__);
7307  		return -1;
7308  	}
7309  
7310  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7311  	    MPI2_IOCSTATUS_MASK;
7312  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7313  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7314  			__FILE__, __LINE__, __func__);
7315  		return -1;
7316  	}
7317  
7318  	/* check if this is end device */
7319  	device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
7320  	if (!(_scsih_is_end_device(device_info)))
7321  		return -1;
7322  	set_bit(handle, ioc->pend_os_device_add);
7323  	sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
7324  
7325  	/* check if device is present */
7326  	if (!(le16_to_cpu(sas_device_pg0.Flags) &
7327  	    MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
7328  		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
7329  			handle);
7330  		return -1;
7331  	}
7332  
7333  	/* check if there were any issues with discovery */
7334  	if (_scsih_check_access_status(ioc, sas_address, handle,
7335  	    sas_device_pg0.AccessStatus))
7336  		return -1;
7337  
7338  	port_id = sas_device_pg0.PhysicalPort;
7339  	sas_device = mpt3sas_get_sdev_by_addr(ioc,
7340  	    sas_address, mpt3sas_get_port_by_id(ioc, port_id, 0));
7341  	if (sas_device) {
7342  		clear_bit(handle, ioc->pend_os_device_add);
7343  		sas_device_put(sas_device);
7344  		return -1;
7345  	}
7346  
7347  	if (sas_device_pg0.EnclosureHandle) {
7348  		enclosure_dev =
7349  			mpt3sas_scsih_enclosure_find_by_handle(ioc,
7350  			    le16_to_cpu(sas_device_pg0.EnclosureHandle));
7351  		if (enclosure_dev == NULL)
7352  			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
7353  				 sas_device_pg0.EnclosureHandle);
7354  	}
7355  
7356  	sas_device = kzalloc(sizeof(struct _sas_device),
7357  	    GFP_KERNEL);
7358  	if (!sas_device) {
7359  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7360  			__FILE__, __LINE__, __func__);
7361  		return 0;
7362  	}
7363  
7364  	kref_init(&sas_device->refcount);
7365  	sas_device->handle = handle;
7366  	if (_scsih_get_sas_address(ioc,
7367  	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
7368  	    &sas_device->sas_address_parent) != 0)
7369  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7370  			__FILE__, __LINE__, __func__);
7371  	sas_device->enclosure_handle =
7372  	    le16_to_cpu(sas_device_pg0.EnclosureHandle);
7373  	if (sas_device->enclosure_handle != 0)
7374  		sas_device->slot =
7375  		    le16_to_cpu(sas_device_pg0.Slot);
7376  	sas_device->device_info = device_info;
7377  	sas_device->sas_address = sas_address;
7378  	sas_device->phy = sas_device_pg0.PhyNum;
7379  	sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
7380  	    MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
7381  	sas_device->port = mpt3sas_get_port_by_id(ioc, port_id, 0);
7382  	if (!sas_device->port) {
7383  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
7384  		    __FILE__, __LINE__, __func__);
7385  		goto out;
7386  	}
7387  
7388  	if (le16_to_cpu(sas_device_pg0.Flags)
7389  		& MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
7390  		sas_device->enclosure_level =
7391  			sas_device_pg0.EnclosureLevel;
7392  		memcpy(sas_device->connector_name,
7393  			sas_device_pg0.ConnectorName, 4);
7394  		sas_device->connector_name[4] = '\0';
7395  	} else {
7396  		sas_device->enclosure_level = 0;
7397  		sas_device->connector_name[0] = '\0';
7398  	}
7399  	/* get enclosure_logical_id & chassis_slot*/
7400  	sas_device->is_chassis_slot_valid = 0;
7401  	if (enclosure_dev) {
7402  		sas_device->enclosure_logical_id =
7403  		    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
7404  		if (le16_to_cpu(enclosure_dev->pg0.Flags) &
7405  		    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
7406  			sas_device->is_chassis_slot_valid = 1;
7407  			sas_device->chassis_slot =
7408  					enclosure_dev->pg0.ChassisSlot;
7409  		}
7410  	}
7411  
7412  	/* get device name */
7413  	sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
7414  	sas_device->port_type = sas_device_pg0.MaxPortConnections;
7415  	ioc_info(ioc,
7416  	    "handle(0x%0x) sas_address(0x%016llx) port_type(0x%0x)\n",
7417  	    handle, sas_device->sas_address, sas_device->port_type);
7418  
7419  	if (ioc->wait_for_discovery_to_complete)
7420  		_scsih_sas_device_init_add(ioc, sas_device);
7421  	else
7422  		_scsih_sas_device_add(ioc, sas_device);
7423  
7424  out:
7425  	sas_device_put(sas_device);
7426  	return 0;
7427  }
7428  
7429  /**
7430   * _scsih_remove_device -  removing sas device object
7431   * @ioc: per adapter object
7432   * @sas_device: the sas_device object
7433   */
7434  static void
_scsih_remove_device(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)7435  _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
7436  	struct _sas_device *sas_device)
7437  {
7438  	struct MPT3SAS_TARGET *sas_target_priv_data;
7439  
7440  	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
7441  	     (sas_device->pfa_led_on)) {
7442  		_scsih_turn_off_pfa_led(ioc, sas_device);
7443  		sas_device->pfa_led_on = 0;
7444  	}
7445  
7446  	dewtprintk(ioc,
7447  		   ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
7448  			    __func__,
7449  			    sas_device->handle, (u64)sas_device->sas_address));
7450  
7451  	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7452  	    NULL, NULL));
7453  
7454  	if (sas_device->starget && sas_device->starget->hostdata) {
7455  		sas_target_priv_data = sas_device->starget->hostdata;
7456  		sas_target_priv_data->deleted = 1;
7457  		_scsih_ublock_io_device(ioc, sas_device->sas_address,
7458  		    sas_device->port);
7459  		sas_target_priv_data->handle =
7460  		     MPT3SAS_INVALID_DEVICE_HANDLE;
7461  	}
7462  
7463  	if (!ioc->hide_drives)
7464  		mpt3sas_transport_port_remove(ioc,
7465  		    sas_device->sas_address,
7466  		    sas_device->sas_address_parent,
7467  		    sas_device->port);
7468  
7469  	ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
7470  		 sas_device->handle, (u64)sas_device->sas_address);
7471  
7472  	_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
7473  
7474  	dewtprintk(ioc,
7475  		   ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
7476  			    __func__,
7477  			    sas_device->handle, (u64)sas_device->sas_address));
7478  	dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
7479  	    NULL, NULL));
7480  }
7481  
7482  /**
7483   * _scsih_sas_topology_change_event_debug - debug for topology event
7484   * @ioc: per adapter object
7485   * @event_data: event data payload
7486   * Context: user.
7487   */
7488  static void
_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasTopologyChangeList_t * event_data)7489  _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7490  	Mpi2EventDataSasTopologyChangeList_t *event_data)
7491  {
7492  	int i;
7493  	u16 handle;
7494  	u16 reason_code;
7495  	u8 phy_number;
7496  	char *status_str = NULL;
7497  	u8 link_rate, prev_link_rate;
7498  
7499  	switch (event_data->ExpStatus) {
7500  	case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7501  		status_str = "add";
7502  		break;
7503  	case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7504  		status_str = "remove";
7505  		break;
7506  	case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7507  	case 0:
7508  		status_str =  "responding";
7509  		break;
7510  	case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7511  		status_str = "remove delay";
7512  		break;
7513  	default:
7514  		status_str = "unknown status";
7515  		break;
7516  	}
7517  	ioc_info(ioc, "sas topology change: (%s)\n", status_str);
7518  	pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
7519  	    "start_phy(%02d), count(%d)\n",
7520  	    le16_to_cpu(event_data->ExpanderDevHandle),
7521  	    le16_to_cpu(event_data->EnclosureHandle),
7522  	    event_data->StartPhyNum, event_data->NumEntries);
7523  	for (i = 0; i < event_data->NumEntries; i++) {
7524  		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7525  		if (!handle)
7526  			continue;
7527  		phy_number = event_data->StartPhyNum + i;
7528  		reason_code = event_data->PHY[i].PhyStatus &
7529  		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7530  		switch (reason_code) {
7531  		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7532  			status_str = "target add";
7533  			break;
7534  		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7535  			status_str = "target remove";
7536  			break;
7537  		case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7538  			status_str = "delay target remove";
7539  			break;
7540  		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7541  			status_str = "link rate change";
7542  			break;
7543  		case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7544  			status_str = "target responding";
7545  			break;
7546  		default:
7547  			status_str = "unknown";
7548  			break;
7549  		}
7550  		link_rate = event_data->PHY[i].LinkRate >> 4;
7551  		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7552  		pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
7553  		    " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
7554  		    handle, status_str, link_rate, prev_link_rate);
7555  
7556  	}
7557  }
7558  
7559  /**
7560   * _scsih_sas_topology_change_event - handle topology changes
7561   * @ioc: per adapter object
7562   * @fw_event: The fw_event_work object
7563   * Context: user.
7564   *
7565   */
7566  static int
_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)7567  _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
7568  	struct fw_event_work *fw_event)
7569  {
7570  	int i;
7571  	u16 parent_handle, handle;
7572  	u16 reason_code;
7573  	u8 phy_number, max_phys;
7574  	struct _sas_node *sas_expander;
7575  	u64 sas_address;
7576  	unsigned long flags;
7577  	u8 link_rate, prev_link_rate;
7578  	struct hba_port *port;
7579  	Mpi2EventDataSasTopologyChangeList_t *event_data =
7580  		(Mpi2EventDataSasTopologyChangeList_t *)
7581  		fw_event->event_data;
7582  
7583  	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7584  		_scsih_sas_topology_change_event_debug(ioc, event_data);
7585  
7586  	if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
7587  		return 0;
7588  
7589  	if (!ioc->sas_hba.num_phys)
7590  		_scsih_sas_host_add(ioc);
7591  	else
7592  		_scsih_sas_host_refresh(ioc);
7593  
7594  	if (fw_event->ignore) {
7595  		dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
7596  		return 0;
7597  	}
7598  
7599  	parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
7600  	port = mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0);
7601  
7602  	/* handle expander add */
7603  	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
7604  		if (_scsih_expander_add(ioc, parent_handle) != 0)
7605  			return 0;
7606  
7607  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
7608  	sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
7609  	    parent_handle);
7610  	if (sas_expander) {
7611  		sas_address = sas_expander->sas_address;
7612  		max_phys = sas_expander->num_phys;
7613  		port = sas_expander->port;
7614  	} else if (parent_handle < ioc->sas_hba.num_phys) {
7615  		sas_address = ioc->sas_hba.sas_address;
7616  		max_phys = ioc->sas_hba.num_phys;
7617  	} else {
7618  		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7619  		return 0;
7620  	}
7621  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
7622  
7623  	/* handle siblings events */
7624  	for (i = 0; i < event_data->NumEntries; i++) {
7625  		if (fw_event->ignore) {
7626  			dewtprintk(ioc,
7627  				   ioc_info(ioc, "ignoring expander event\n"));
7628  			return 0;
7629  		}
7630  		if (ioc->remove_host || ioc->pci_error_recovery)
7631  			return 0;
7632  		phy_number = event_data->StartPhyNum + i;
7633  		if (phy_number >= max_phys)
7634  			continue;
7635  		reason_code = event_data->PHY[i].PhyStatus &
7636  		    MPI2_EVENT_SAS_TOPO_RC_MASK;
7637  		if ((event_data->PHY[i].PhyStatus &
7638  		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
7639  		    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
7640  				continue;
7641  		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
7642  		if (!handle)
7643  			continue;
7644  		link_rate = event_data->PHY[i].LinkRate >> 4;
7645  		prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
7646  		switch (reason_code) {
7647  		case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7648  
7649  			if (ioc->shost_recovery)
7650  				break;
7651  
7652  			if (link_rate == prev_link_rate)
7653  				break;
7654  
7655  			mpt3sas_transport_update_links(ioc, sas_address,
7656  			    handle, phy_number, link_rate, port);
7657  
7658  			if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
7659  				break;
7660  
7661  			_scsih_check_device(ioc, sas_address, handle,
7662  			    phy_number, link_rate);
7663  
7664  			if (!test_bit(handle, ioc->pend_os_device_add))
7665  				break;
7666  
7667  			fallthrough;
7668  
7669  		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7670  
7671  			if (ioc->shost_recovery)
7672  				break;
7673  
7674  			mpt3sas_transport_update_links(ioc, sas_address,
7675  			    handle, phy_number, link_rate, port);
7676  
7677  			_scsih_add_device(ioc, handle, phy_number, 0);
7678  
7679  			break;
7680  		case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7681  
7682  			_scsih_device_remove_by_handle(ioc, handle);
7683  			break;
7684  		}
7685  	}
7686  
7687  	/* handle expander removal */
7688  	if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
7689  	    sas_expander)
7690  		mpt3sas_expander_remove(ioc, sas_address, port);
7691  
7692  	return 0;
7693  }
7694  
7695  /**
7696   * _scsih_sas_device_status_change_event_debug - debug for device event
7697   * @ioc: ?
7698   * @event_data: event data payload
7699   * Context: user.
7700   */
7701  static void
_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7702  _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7703  	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7704  {
7705  	char *reason_str = NULL;
7706  
7707  	switch (event_data->ReasonCode) {
7708  	case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
7709  		reason_str = "smart data";
7710  		break;
7711  	case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
7712  		reason_str = "unsupported device discovered";
7713  		break;
7714  	case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
7715  		reason_str = "internal device reset";
7716  		break;
7717  	case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
7718  		reason_str = "internal task abort";
7719  		break;
7720  	case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7721  		reason_str = "internal task abort set";
7722  		break;
7723  	case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7724  		reason_str = "internal clear task set";
7725  		break;
7726  	case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
7727  		reason_str = "internal query task";
7728  		break;
7729  	case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
7730  		reason_str = "sata init failure";
7731  		break;
7732  	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7733  		reason_str = "internal device reset complete";
7734  		break;
7735  	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7736  		reason_str = "internal task abort complete";
7737  		break;
7738  	case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
7739  		reason_str = "internal async notification";
7740  		break;
7741  	case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
7742  		reason_str = "expander reduced functionality";
7743  		break;
7744  	case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
7745  		reason_str = "expander reduced functionality complete";
7746  		break;
7747  	default:
7748  		reason_str = "unknown reason";
7749  		break;
7750  	}
7751  	ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
7752  		 reason_str, le16_to_cpu(event_data->DevHandle),
7753  		 (u64)le64_to_cpu(event_data->SASAddress),
7754  		 le16_to_cpu(event_data->TaskTag));
7755  	if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
7756  		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7757  			event_data->ASC, event_data->ASCQ);
7758  	pr_cont("\n");
7759  }
7760  
7761  /**
7762   * _scsih_sas_device_status_change_event - handle device status change
7763   * @ioc: per adapter object
7764   * @event_data: The fw event
7765   * Context: user.
7766   */
7767  static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasDeviceStatusChange_t * event_data)7768  _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7769  	Mpi2EventDataSasDeviceStatusChange_t *event_data)
7770  {
7771  	struct MPT3SAS_TARGET *target_priv_data;
7772  	struct _sas_device *sas_device;
7773  	u64 sas_address;
7774  	unsigned long flags;
7775  
7776  	/* In MPI Revision K (0xC), the internal device reset complete was
7777  	 * implemented, so avoid setting tm_busy flag for older firmware.
7778  	 */
7779  	if ((ioc->facts.HeaderVersion >> 8) < 0xC)
7780  		return;
7781  
7782  	if (event_data->ReasonCode !=
7783  	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7784  	   event_data->ReasonCode !=
7785  	    MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7786  		return;
7787  
7788  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
7789  	sas_address = le64_to_cpu(event_data->SASAddress);
7790  	sas_device = __mpt3sas_get_sdev_by_addr(ioc,
7791  	    sas_address,
7792  	    mpt3sas_get_port_by_id(ioc, event_data->PhysicalPort, 0));
7793  
7794  	if (!sas_device || !sas_device->starget)
7795  		goto out;
7796  
7797  	target_priv_data = sas_device->starget->hostdata;
7798  	if (!target_priv_data)
7799  		goto out;
7800  
7801  	if (event_data->ReasonCode ==
7802  	    MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
7803  		target_priv_data->tm_busy = 1;
7804  	else
7805  		target_priv_data->tm_busy = 0;
7806  
7807  	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7808  		ioc_info(ioc,
7809  		    "%s tm_busy flag for handle(0x%04x)\n",
7810  		    (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
7811  		    target_priv_data->handle);
7812  
7813  out:
7814  	if (sas_device)
7815  		sas_device_put(sas_device);
7816  
7817  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7818  }
7819  
7820  
7821  /**
7822   * _scsih_check_pcie_access_status - check access flags
7823   * @ioc: per adapter object
7824   * @wwid: wwid
7825   * @handle: sas device handle
7826   * @access_status: errors returned during discovery of the device
7827   *
7828   * Return: 0 for success, else failure
7829   */
7830  static u8
_scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle,u8 access_status)7831  _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
7832  	u16 handle, u8 access_status)
7833  {
7834  	u8 rc = 1;
7835  	char *desc = NULL;
7836  
7837  	switch (access_status) {
7838  	case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
7839  	case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
7840  		rc = 0;
7841  		break;
7842  	case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
7843  		desc = "PCIe device capability failed";
7844  		break;
7845  	case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
7846  		desc = "PCIe device blocked";
7847  		ioc_info(ioc,
7848  		    "Device with Access Status (%s): wwid(0x%016llx), "
7849  		    "handle(0x%04x)\n ll only be added to the internal list",
7850  		    desc, (u64)wwid, handle);
7851  		rc = 0;
7852  		break;
7853  	case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
7854  		desc = "PCIe device mem space access failed";
7855  		break;
7856  	case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
7857  		desc = "PCIe device unsupported";
7858  		break;
7859  	case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
7860  		desc = "PCIe device MSIx Required";
7861  		break;
7862  	case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
7863  		desc = "PCIe device init fail max";
7864  		break;
7865  	case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
7866  		desc = "PCIe device status unknown";
7867  		break;
7868  	case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
7869  		desc = "nvme ready timeout";
7870  		break;
7871  	case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
7872  		desc = "nvme device configuration unsupported";
7873  		break;
7874  	case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
7875  		desc = "nvme identify failed";
7876  		break;
7877  	case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
7878  		desc = "nvme qconfig failed";
7879  		break;
7880  	case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
7881  		desc = "nvme qcreation failed";
7882  		break;
7883  	case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
7884  		desc = "nvme eventcfg failed";
7885  		break;
7886  	case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
7887  		desc = "nvme get feature stat failed";
7888  		break;
7889  	case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
7890  		desc = "nvme idle timeout";
7891  		break;
7892  	case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
7893  		desc = "nvme failure status";
7894  		break;
7895  	default:
7896  		ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
7897  			access_status, (u64)wwid, handle);
7898  		return rc;
7899  	}
7900  
7901  	if (!rc)
7902  		return rc;
7903  
7904  	ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
7905  		 desc, (u64)wwid, handle);
7906  	return rc;
7907  }
7908  
7909  /**
7910   * _scsih_pcie_device_remove_from_sml -  removing pcie device
7911   * from SML and free up associated memory
7912   * @ioc: per adapter object
7913   * @pcie_device: the pcie_device object
7914   */
7915  static void
_scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)7916  _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
7917  	struct _pcie_device *pcie_device)
7918  {
7919  	struct MPT3SAS_TARGET *sas_target_priv_data;
7920  
7921  	dewtprintk(ioc,
7922  		   ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
7923  			    __func__,
7924  			    pcie_device->handle, (u64)pcie_device->wwid));
7925  	if (pcie_device->enclosure_handle != 0)
7926  		dewtprintk(ioc,
7927  			   ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
7928  				    __func__,
7929  				    (u64)pcie_device->enclosure_logical_id,
7930  				    pcie_device->slot));
7931  	if (pcie_device->connector_name[0] != '\0')
7932  		dewtprintk(ioc,
7933  			   ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
7934  				    __func__,
7935  				    pcie_device->enclosure_level,
7936  				    pcie_device->connector_name));
7937  
7938  	if (pcie_device->starget && pcie_device->starget->hostdata) {
7939  		sas_target_priv_data = pcie_device->starget->hostdata;
7940  		sas_target_priv_data->deleted = 1;
7941  		_scsih_ublock_io_device(ioc, pcie_device->wwid, NULL);
7942  		sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
7943  	}
7944  
7945  	ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7946  		 pcie_device->handle, (u64)pcie_device->wwid);
7947  	if (pcie_device->enclosure_handle != 0)
7948  		ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
7949  			 (u64)pcie_device->enclosure_logical_id,
7950  			 pcie_device->slot);
7951  	if (pcie_device->connector_name[0] != '\0')
7952  		ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
7953  			 pcie_device->enclosure_level,
7954  			 pcie_device->connector_name);
7955  
7956  	if (pcie_device->starget && (pcie_device->access_status !=
7957  				MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
7958  		scsi_remove_target(&pcie_device->starget->dev);
7959  	dewtprintk(ioc,
7960  		   ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
7961  			    __func__,
7962  			    pcie_device->handle, (u64)pcie_device->wwid));
7963  	if (pcie_device->enclosure_handle != 0)
7964  		dewtprintk(ioc,
7965  			   ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
7966  				    __func__,
7967  				    (u64)pcie_device->enclosure_logical_id,
7968  				    pcie_device->slot));
7969  	if (pcie_device->connector_name[0] != '\0')
7970  		dewtprintk(ioc,
7971  			   ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
7972  				    __func__,
7973  				    pcie_device->enclosure_level,
7974  				    pcie_device->connector_name));
7975  
7976  	kfree(pcie_device->serial_number);
7977  }
7978  
7979  
7980  /**
7981   * _scsih_pcie_check_device - checking device responsiveness
7982   * @ioc: per adapter object
7983   * @handle: attached device handle
7984   */
7985  static void
_scsih_pcie_check_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)7986  _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7987  {
7988  	Mpi2ConfigReply_t mpi_reply;
7989  	Mpi26PCIeDevicePage0_t pcie_device_pg0;
7990  	u32 ioc_status;
7991  	struct _pcie_device *pcie_device;
7992  	u64 wwid;
7993  	unsigned long flags;
7994  	struct scsi_target *starget;
7995  	struct MPT3SAS_TARGET *sas_target_priv_data;
7996  	u32 device_info;
7997  
7998  	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
7999  		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
8000  		return;
8001  
8002  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
8003  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8004  		return;
8005  
8006  	/* check if this is end device */
8007  	device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8008  	if (!(_scsih_is_nvme_pciescsi_device(device_info)))
8009  		return;
8010  
8011  	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8012  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8013  	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8014  
8015  	if (!pcie_device) {
8016  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8017  		return;
8018  	}
8019  
8020  	if (unlikely(pcie_device->handle != handle)) {
8021  		starget = pcie_device->starget;
8022  		sas_target_priv_data = starget->hostdata;
8023  		pcie_device->access_status = pcie_device_pg0.AccessStatus;
8024  		starget_printk(KERN_INFO, starget,
8025  		    "handle changed from(0x%04x) to (0x%04x)!!!\n",
8026  		    pcie_device->handle, handle);
8027  		sas_target_priv_data->handle = handle;
8028  		pcie_device->handle = handle;
8029  
8030  		if (le32_to_cpu(pcie_device_pg0.Flags) &
8031  		    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8032  			pcie_device->enclosure_level =
8033  			    pcie_device_pg0.EnclosureLevel;
8034  			memcpy(&pcie_device->connector_name[0],
8035  			    &pcie_device_pg0.ConnectorName[0], 4);
8036  		} else {
8037  			pcie_device->enclosure_level = 0;
8038  			pcie_device->connector_name[0] = '\0';
8039  		}
8040  	}
8041  
8042  	/* check if device is present */
8043  	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8044  	    MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8045  		ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
8046  			 handle);
8047  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8048  		pcie_device_put(pcie_device);
8049  		return;
8050  	}
8051  
8052  	/* check if there were any issues with discovery */
8053  	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8054  	    pcie_device_pg0.AccessStatus)) {
8055  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8056  		pcie_device_put(pcie_device);
8057  		return;
8058  	}
8059  
8060  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8061  	pcie_device_put(pcie_device);
8062  
8063  	_scsih_ublock_io_device(ioc, wwid, NULL);
8064  
8065  	return;
8066  }
8067  
8068  /**
8069   * _scsih_pcie_add_device -  creating pcie device object
8070   * @ioc: per adapter object
8071   * @handle: pcie device handle
8072   *
8073   * Creating end device object, stored in ioc->pcie_device_list.
8074   *
8075   * Return: 1 means queue the event later, 0 means complete the event
8076   */
8077  static int
_scsih_pcie_add_device(struct MPT3SAS_ADAPTER * ioc,u16 handle)8078  _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
8079  {
8080  	Mpi26PCIeDevicePage0_t pcie_device_pg0;
8081  	Mpi26PCIeDevicePage2_t pcie_device_pg2;
8082  	Mpi2ConfigReply_t mpi_reply;
8083  	struct _pcie_device *pcie_device;
8084  	struct _enclosure_node *enclosure_dev;
8085  	u32 ioc_status;
8086  	u64 wwid;
8087  
8088  	if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8089  	    &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
8090  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8091  			__FILE__, __LINE__, __func__);
8092  		return 0;
8093  	}
8094  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8095  	    MPI2_IOCSTATUS_MASK;
8096  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8097  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8098  			__FILE__, __LINE__, __func__);
8099  		return 0;
8100  	}
8101  
8102  	set_bit(handle, ioc->pend_os_device_add);
8103  	wwid = le64_to_cpu(pcie_device_pg0.WWID);
8104  
8105  	/* check if device is present */
8106  	if (!(le32_to_cpu(pcie_device_pg0.Flags) &
8107  		MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
8108  		ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
8109  			handle);
8110  		return 0;
8111  	}
8112  
8113  	/* check if there were any issues with discovery */
8114  	if (_scsih_check_pcie_access_status(ioc, wwid, handle,
8115  	    pcie_device_pg0.AccessStatus))
8116  		return 0;
8117  
8118  	if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
8119  	    (pcie_device_pg0.DeviceInfo))))
8120  		return 0;
8121  
8122  	pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
8123  	if (pcie_device) {
8124  		clear_bit(handle, ioc->pend_os_device_add);
8125  		pcie_device_put(pcie_device);
8126  		return 0;
8127  	}
8128  
8129  	/* PCIe Device Page 2 contains read-only information about a
8130  	 * specific NVMe device; therefore, this page is only
8131  	 * valid for NVMe devices and skip for pcie devices of type scsi.
8132  	 */
8133  	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8134  		le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8135  		if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
8136  		    &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8137  		    handle)) {
8138  			ioc_err(ioc,
8139  			    "failure at %s:%d/%s()!\n", __FILE__,
8140  			    __LINE__, __func__);
8141  			return 0;
8142  		}
8143  
8144  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8145  					MPI2_IOCSTATUS_MASK;
8146  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8147  			ioc_err(ioc,
8148  			    "failure at %s:%d/%s()!\n", __FILE__,
8149  			    __LINE__, __func__);
8150  			return 0;
8151  		}
8152  	}
8153  
8154  	pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
8155  	if (!pcie_device) {
8156  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
8157  			__FILE__, __LINE__, __func__);
8158  		return 0;
8159  	}
8160  
8161  	kref_init(&pcie_device->refcount);
8162  	pcie_device->id = ioc->pcie_target_id++;
8163  	pcie_device->channel = PCIE_CHANNEL;
8164  	pcie_device->handle = handle;
8165  	pcie_device->access_status = pcie_device_pg0.AccessStatus;
8166  	pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8167  	pcie_device->wwid = wwid;
8168  	pcie_device->port_num = pcie_device_pg0.PortNum;
8169  	pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
8170  	    MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
8171  
8172  	pcie_device->enclosure_handle =
8173  	    le16_to_cpu(pcie_device_pg0.EnclosureHandle);
8174  	if (pcie_device->enclosure_handle != 0)
8175  		pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
8176  
8177  	if (le32_to_cpu(pcie_device_pg0.Flags) &
8178  	    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
8179  		pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
8180  		memcpy(&pcie_device->connector_name[0],
8181  		    &pcie_device_pg0.ConnectorName[0], 4);
8182  	} else {
8183  		pcie_device->enclosure_level = 0;
8184  		pcie_device->connector_name[0] = '\0';
8185  	}
8186  
8187  	/* get enclosure_logical_id */
8188  	if (pcie_device->enclosure_handle) {
8189  		enclosure_dev =
8190  			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8191  						pcie_device->enclosure_handle);
8192  		if (enclosure_dev)
8193  			pcie_device->enclosure_logical_id =
8194  			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8195  	}
8196  	/* TODO -- Add device name once FW supports it */
8197  	if (!(mpt3sas_scsih_is_pcie_scsi_device(
8198  	    le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
8199  		pcie_device->nvme_mdts =
8200  		    le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
8201  		pcie_device->shutdown_latency =
8202  			le16_to_cpu(pcie_device_pg2.ShutdownLatency);
8203  		/*
8204  		 * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
8205  		 * if drive's RTD3 Entry Latency is greater then IOC's
8206  		 * max_shutdown_latency.
8207  		 */
8208  		if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
8209  			ioc->max_shutdown_latency =
8210  				pcie_device->shutdown_latency;
8211  		if (pcie_device_pg2.ControllerResetTO)
8212  			pcie_device->reset_timeout =
8213  			    pcie_device_pg2.ControllerResetTO;
8214  		else
8215  			pcie_device->reset_timeout = 30;
8216  	} else
8217  		pcie_device->reset_timeout = 30;
8218  
8219  	if (ioc->wait_for_discovery_to_complete)
8220  		_scsih_pcie_device_init_add(ioc, pcie_device);
8221  	else
8222  		_scsih_pcie_device_add(ioc, pcie_device);
8223  
8224  	pcie_device_put(pcie_device);
8225  	return 0;
8226  }
8227  
8228  /**
8229   * _scsih_pcie_topology_change_event_debug - debug for topology
8230   * event
8231   * @ioc: per adapter object
8232   * @event_data: event data payload
8233   * Context: user.
8234   */
8235  static void
_scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeTopologyChangeList_t * event_data)8236  _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8237  	Mpi26EventDataPCIeTopologyChangeList_t *event_data)
8238  {
8239  	int i;
8240  	u16 handle;
8241  	u16 reason_code;
8242  	u8 port_number;
8243  	char *status_str = NULL;
8244  	u8 link_rate, prev_link_rate;
8245  
8246  	switch (event_data->SwitchStatus) {
8247  	case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
8248  		status_str = "add";
8249  		break;
8250  	case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
8251  		status_str = "remove";
8252  		break;
8253  	case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
8254  	case 0:
8255  		status_str =  "responding";
8256  		break;
8257  	case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
8258  		status_str = "remove delay";
8259  		break;
8260  	default:
8261  		status_str = "unknown status";
8262  		break;
8263  	}
8264  	ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
8265  	pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
8266  		"start_port(%02d), count(%d)\n",
8267  		le16_to_cpu(event_data->SwitchDevHandle),
8268  		le16_to_cpu(event_data->EnclosureHandle),
8269  		event_data->StartPortNum, event_data->NumEntries);
8270  	for (i = 0; i < event_data->NumEntries; i++) {
8271  		handle =
8272  			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8273  		if (!handle)
8274  			continue;
8275  		port_number = event_data->StartPortNum + i;
8276  		reason_code = event_data->PortEntry[i].PortStatus;
8277  		switch (reason_code) {
8278  		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8279  			status_str = "target add";
8280  			break;
8281  		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8282  			status_str = "target remove";
8283  			break;
8284  		case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
8285  			status_str = "delay target remove";
8286  			break;
8287  		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8288  			status_str = "link rate change";
8289  			break;
8290  		case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
8291  			status_str = "target responding";
8292  			break;
8293  		default:
8294  			status_str = "unknown";
8295  			break;
8296  		}
8297  		link_rate = event_data->PortEntry[i].CurrentPortInfo &
8298  			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8299  		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
8300  			MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8301  		pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
8302  			" link rate: new(0x%02x), old(0x%02x)\n", port_number,
8303  			handle, status_str, link_rate, prev_link_rate);
8304  	}
8305  }
8306  
8307  /**
8308   * _scsih_pcie_topology_change_event - handle PCIe topology
8309   *  changes
8310   * @ioc: per adapter object
8311   * @fw_event: The fw_event_work object
8312   * Context: user.
8313   *
8314   */
8315  static void
_scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8316  _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
8317  	struct fw_event_work *fw_event)
8318  {
8319  	int i;
8320  	u16 handle;
8321  	u16 reason_code;
8322  	u8 link_rate, prev_link_rate;
8323  	unsigned long flags;
8324  	int rc;
8325  	Mpi26EventDataPCIeTopologyChangeList_t *event_data =
8326  		(Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
8327  	struct _pcie_device *pcie_device;
8328  
8329  	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8330  		_scsih_pcie_topology_change_event_debug(ioc, event_data);
8331  
8332  	if (ioc->shost_recovery || ioc->remove_host ||
8333  		ioc->pci_error_recovery)
8334  		return;
8335  
8336  	if (fw_event->ignore) {
8337  		dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
8338  		return;
8339  	}
8340  
8341  	/* handle siblings events */
8342  	for (i = 0; i < event_data->NumEntries; i++) {
8343  		if (fw_event->ignore) {
8344  			dewtprintk(ioc,
8345  				   ioc_info(ioc, "ignoring switch event\n"));
8346  			return;
8347  		}
8348  		if (ioc->remove_host || ioc->pci_error_recovery)
8349  			return;
8350  		reason_code = event_data->PortEntry[i].PortStatus;
8351  		handle =
8352  			le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
8353  		if (!handle)
8354  			continue;
8355  
8356  		link_rate = event_data->PortEntry[i].CurrentPortInfo
8357  			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8358  		prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
8359  			& MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
8360  
8361  		switch (reason_code) {
8362  		case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
8363  			if (ioc->shost_recovery)
8364  				break;
8365  			if (link_rate == prev_link_rate)
8366  				break;
8367  			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8368  				break;
8369  
8370  			_scsih_pcie_check_device(ioc, handle);
8371  
8372  			/* This code after this point handles the test case
8373  			 * where a device has been added, however its returning
8374  			 * BUSY for sometime.  Then before the Device Missing
8375  			 * Delay expires and the device becomes READY, the
8376  			 * device is removed and added back.
8377  			 */
8378  			spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8379  			pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
8380  			spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8381  
8382  			if (pcie_device) {
8383  				pcie_device_put(pcie_device);
8384  				break;
8385  			}
8386  
8387  			if (!test_bit(handle, ioc->pend_os_device_add))
8388  				break;
8389  
8390  			dewtprintk(ioc,
8391  				   ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
8392  					    handle));
8393  			event_data->PortEntry[i].PortStatus &= 0xF0;
8394  			event_data->PortEntry[i].PortStatus |=
8395  				MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
8396  			fallthrough;
8397  		case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
8398  			if (ioc->shost_recovery)
8399  				break;
8400  			if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
8401  				break;
8402  
8403  			rc = _scsih_pcie_add_device(ioc, handle);
8404  			if (!rc) {
8405  				/* mark entry vacant */
8406  				/* TODO This needs to be reviewed and fixed,
8407  				 * we dont have an entry
8408  				 * to make an event void like vacant
8409  				 */
8410  				event_data->PortEntry[i].PortStatus |=
8411  					MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
8412  			}
8413  			break;
8414  		case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
8415  			_scsih_pcie_device_remove_by_handle(ioc, handle);
8416  			break;
8417  		}
8418  	}
8419  }
8420  
8421  /**
8422   * _scsih_pcie_device_status_change_event_debug - debug for device event
8423   * @ioc: ?
8424   * @event_data: event data payload
8425   * Context: user.
8426   */
8427  static void
_scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi26EventDataPCIeDeviceStatusChange_t * event_data)8428  _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8429  	Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
8430  {
8431  	char *reason_str = NULL;
8432  
8433  	switch (event_data->ReasonCode) {
8434  	case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
8435  		reason_str = "smart data";
8436  		break;
8437  	case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
8438  		reason_str = "unsupported device discovered";
8439  		break;
8440  	case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
8441  		reason_str = "internal device reset";
8442  		break;
8443  	case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
8444  		reason_str = "internal task abort";
8445  		break;
8446  	case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8447  		reason_str = "internal task abort set";
8448  		break;
8449  	case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8450  		reason_str = "internal clear task set";
8451  		break;
8452  	case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
8453  		reason_str = "internal query task";
8454  		break;
8455  	case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
8456  		reason_str = "device init failure";
8457  		break;
8458  	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8459  		reason_str = "internal device reset complete";
8460  		break;
8461  	case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8462  		reason_str = "internal task abort complete";
8463  		break;
8464  	case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
8465  		reason_str = "internal async notification";
8466  		break;
8467  	case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
8468  		reason_str = "pcie hot reset failed";
8469  		break;
8470  	default:
8471  		reason_str = "unknown reason";
8472  		break;
8473  	}
8474  
8475  	ioc_info(ioc, "PCIE device status change: (%s)\n"
8476  		 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
8477  		 reason_str, le16_to_cpu(event_data->DevHandle),
8478  		 (u64)le64_to_cpu(event_data->WWID),
8479  		 le16_to_cpu(event_data->TaskTag));
8480  	if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
8481  		pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
8482  			event_data->ASC, event_data->ASCQ);
8483  	pr_cont("\n");
8484  }
8485  
8486  /**
8487   * _scsih_pcie_device_status_change_event - handle device status
8488   * change
8489   * @ioc: per adapter object
8490   * @fw_event: The fw_event_work object
8491   * Context: user.
8492   */
8493  static void
_scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8494  _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8495  	struct fw_event_work *fw_event)
8496  {
8497  	struct MPT3SAS_TARGET *target_priv_data;
8498  	struct _pcie_device *pcie_device;
8499  	u64 wwid;
8500  	unsigned long flags;
8501  	Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
8502  		(Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
8503  	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8504  		_scsih_pcie_device_status_change_event_debug(ioc,
8505  			event_data);
8506  
8507  	if (event_data->ReasonCode !=
8508  		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
8509  		event_data->ReasonCode !=
8510  		MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
8511  		return;
8512  
8513  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8514  	wwid = le64_to_cpu(event_data->WWID);
8515  	pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
8516  
8517  	if (!pcie_device || !pcie_device->starget)
8518  		goto out;
8519  
8520  	target_priv_data = pcie_device->starget->hostdata;
8521  	if (!target_priv_data)
8522  		goto out;
8523  
8524  	if (event_data->ReasonCode ==
8525  		MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
8526  		target_priv_data->tm_busy = 1;
8527  	else
8528  		target_priv_data->tm_busy = 0;
8529  out:
8530  	if (pcie_device)
8531  		pcie_device_put(pcie_device);
8532  
8533  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8534  }
8535  
8536  /**
8537   * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
8538   * event
8539   * @ioc: per adapter object
8540   * @event_data: event data payload
8541   * Context: user.
8542   */
8543  static void
_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataSasEnclDevStatusChange_t * event_data)8544  _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
8545  	Mpi2EventDataSasEnclDevStatusChange_t *event_data)
8546  {
8547  	char *reason_str = NULL;
8548  
8549  	switch (event_data->ReasonCode) {
8550  	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8551  		reason_str = "enclosure add";
8552  		break;
8553  	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8554  		reason_str = "enclosure remove";
8555  		break;
8556  	default:
8557  		reason_str = "unknown reason";
8558  		break;
8559  	}
8560  
8561  	ioc_info(ioc, "enclosure status change: (%s)\n"
8562  		 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
8563  		 reason_str,
8564  		 le16_to_cpu(event_data->EnclosureHandle),
8565  		 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
8566  		 le16_to_cpu(event_data->StartSlot));
8567  }
8568  
8569  /**
8570   * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
8571   * @ioc: per adapter object
8572   * @fw_event: The fw_event_work object
8573   * Context: user.
8574   */
8575  static void
_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8576  _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
8577  	struct fw_event_work *fw_event)
8578  {
8579  	Mpi2ConfigReply_t mpi_reply;
8580  	struct _enclosure_node *enclosure_dev = NULL;
8581  	Mpi2EventDataSasEnclDevStatusChange_t *event_data =
8582  		(Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
8583  	int rc;
8584  	u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
8585  
8586  	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
8587  		_scsih_sas_enclosure_dev_status_change_event_debug(ioc,
8588  		     (Mpi2EventDataSasEnclDevStatusChange_t *)
8589  		     fw_event->event_data);
8590  	if (ioc->shost_recovery)
8591  		return;
8592  
8593  	if (enclosure_handle)
8594  		enclosure_dev =
8595  			mpt3sas_scsih_enclosure_find_by_handle(ioc,
8596  						enclosure_handle);
8597  	switch (event_data->ReasonCode) {
8598  	case MPI2_EVENT_SAS_ENCL_RC_ADDED:
8599  		if (!enclosure_dev) {
8600  			enclosure_dev =
8601  				kzalloc(sizeof(struct _enclosure_node),
8602  					GFP_KERNEL);
8603  			if (!enclosure_dev) {
8604  				ioc_info(ioc, "failure at %s:%d/%s()!\n",
8605  					 __FILE__, __LINE__, __func__);
8606  				return;
8607  			}
8608  			rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8609  				&enclosure_dev->pg0,
8610  				MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
8611  				enclosure_handle);
8612  
8613  			if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8614  						MPI2_IOCSTATUS_MASK)) {
8615  				kfree(enclosure_dev);
8616  				return;
8617  			}
8618  
8619  			list_add_tail(&enclosure_dev->list,
8620  							&ioc->enclosure_list);
8621  		}
8622  		break;
8623  	case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
8624  		if (enclosure_dev) {
8625  			list_del(&enclosure_dev->list);
8626  			kfree(enclosure_dev);
8627  		}
8628  		break;
8629  	default:
8630  		break;
8631  	}
8632  }
8633  
8634  /**
8635   * _scsih_sas_broadcast_primitive_event - handle broadcast events
8636   * @ioc: per adapter object
8637   * @fw_event: The fw_event_work object
8638   * Context: user.
8639   */
8640  static void
_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8641  _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
8642  	struct fw_event_work *fw_event)
8643  {
8644  	struct scsi_cmnd *scmd;
8645  	struct scsi_device *sdev;
8646  	struct scsiio_tracker *st;
8647  	u16 smid, handle;
8648  	u32 lun;
8649  	struct MPT3SAS_DEVICE *sas_device_priv_data;
8650  	u32 termination_count;
8651  	u32 query_count;
8652  	Mpi2SCSITaskManagementReply_t *mpi_reply;
8653  	Mpi2EventDataSasBroadcastPrimitive_t *event_data =
8654  		(Mpi2EventDataSasBroadcastPrimitive_t *)
8655  		fw_event->event_data;
8656  	u16 ioc_status;
8657  	unsigned long flags;
8658  	int r;
8659  	u8 max_retries = 0;
8660  	u8 task_abort_retries;
8661  
8662  	mutex_lock(&ioc->tm_cmds.mutex);
8663  	ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
8664  		 __func__, event_data->PhyNum, event_data->PortWidth);
8665  
8666  	_scsih_block_io_all_device(ioc);
8667  
8668  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8669  	mpi_reply = ioc->tm_cmds.reply;
8670   broadcast_aen_retry:
8671  
8672  	/* sanity checks for retrying this loop */
8673  	if (max_retries++ == 5) {
8674  		dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
8675  		goto out;
8676  	} else if (max_retries > 1)
8677  		dewtprintk(ioc,
8678  			   ioc_info(ioc, "%s: %d retry\n",
8679  				    __func__, max_retries - 1));
8680  
8681  	termination_count = 0;
8682  	query_count = 0;
8683  	for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
8684  		if (ioc->shost_recovery)
8685  			goto out;
8686  		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
8687  		if (!scmd)
8688  			continue;
8689  		st = scsi_cmd_priv(scmd);
8690  		sdev = scmd->device;
8691  		sas_device_priv_data = sdev->hostdata;
8692  		if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
8693  			continue;
8694  		 /* skip hidden raid components */
8695  		if (sas_device_priv_data->sas_target->flags &
8696  		    MPT_TARGET_FLAGS_RAID_COMPONENT)
8697  			continue;
8698  		 /* skip volumes */
8699  		if (sas_device_priv_data->sas_target->flags &
8700  		    MPT_TARGET_FLAGS_VOLUME)
8701  			continue;
8702  		 /* skip PCIe devices */
8703  		if (sas_device_priv_data->sas_target->flags &
8704  		    MPT_TARGET_FLAGS_PCIE_DEVICE)
8705  			continue;
8706  
8707  		handle = sas_device_priv_data->sas_target->handle;
8708  		lun = sas_device_priv_data->lun;
8709  		query_count++;
8710  
8711  		if (ioc->shost_recovery)
8712  			goto out;
8713  
8714  		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8715  		r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
8716  			MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
8717  			st->msix_io, 30, 0);
8718  		if (r == FAILED) {
8719  			sdev_printk(KERN_WARNING, sdev,
8720  			    "mpt3sas_scsih_issue_tm: FAILED when sending "
8721  			    "QUERY_TASK: scmd(%p)\n", scmd);
8722  			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8723  			goto broadcast_aen_retry;
8724  		}
8725  		ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
8726  		    & MPI2_IOCSTATUS_MASK;
8727  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8728  			sdev_printk(KERN_WARNING, sdev,
8729  				"query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
8730  				ioc_status, scmd);
8731  			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8732  			goto broadcast_aen_retry;
8733  		}
8734  
8735  		/* see if IO is still owned by IOC and target */
8736  		if (mpi_reply->ResponseCode ==
8737  		     MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
8738  		     mpi_reply->ResponseCode ==
8739  		     MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
8740  			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8741  			continue;
8742  		}
8743  		task_abort_retries = 0;
8744   tm_retry:
8745  		if (task_abort_retries++ == 60) {
8746  			dewtprintk(ioc,
8747  				   ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
8748  					    __func__));
8749  			spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8750  			goto broadcast_aen_retry;
8751  		}
8752  
8753  		if (ioc->shost_recovery)
8754  			goto out_no_lock;
8755  
8756  		r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
8757  			sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
8758  			st->smid, st->msix_io, 30, 0);
8759  		if (r == FAILED || st->cb_idx != 0xFF) {
8760  			sdev_printk(KERN_WARNING, sdev,
8761  			    "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
8762  			    "scmd(%p)\n", scmd);
8763  			goto tm_retry;
8764  		}
8765  
8766  		if (task_abort_retries > 1)
8767  			sdev_printk(KERN_WARNING, sdev,
8768  			    "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
8769  			    " scmd(%p)\n",
8770  			    task_abort_retries - 1, scmd);
8771  
8772  		termination_count += le32_to_cpu(mpi_reply->TerminationCount);
8773  		spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8774  	}
8775  
8776  	if (ioc->broadcast_aen_pending) {
8777  		dewtprintk(ioc,
8778  			   ioc_info(ioc,
8779  				    "%s: loop back due to pending AEN\n",
8780  				    __func__));
8781  		 ioc->broadcast_aen_pending = 0;
8782  		 goto broadcast_aen_retry;
8783  	}
8784  
8785   out:
8786  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8787   out_no_lock:
8788  
8789  	dewtprintk(ioc,
8790  		   ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
8791  			    __func__, query_count, termination_count));
8792  
8793  	ioc->broadcast_aen_busy = 0;
8794  	if (!ioc->shost_recovery)
8795  		_scsih_ublock_io_all_device(ioc);
8796  	mutex_unlock(&ioc->tm_cmds.mutex);
8797  }
8798  
8799  /**
8800   * _scsih_sas_discovery_event - handle discovery events
8801   * @ioc: per adapter object
8802   * @fw_event: The fw_event_work object
8803   * Context: user.
8804   */
8805  static void
_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8806  _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
8807  	struct fw_event_work *fw_event)
8808  {
8809  	Mpi2EventDataSasDiscovery_t *event_data =
8810  		(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
8811  
8812  	if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
8813  		ioc_info(ioc, "discovery event: (%s)",
8814  			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
8815  			 "start" : "stop");
8816  		if (event_data->DiscoveryStatus)
8817  			pr_cont("discovery_status(0x%08x)",
8818  				le32_to_cpu(event_data->DiscoveryStatus));
8819  		pr_cont("\n");
8820  	}
8821  
8822  	if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
8823  	    !ioc->sas_hba.num_phys) {
8824  		if (disable_discovery > 0 && ioc->shost_recovery) {
8825  			/* Wait for the reset to complete */
8826  			while (ioc->shost_recovery)
8827  				ssleep(1);
8828  		}
8829  		_scsih_sas_host_add(ioc);
8830  	}
8831  }
8832  
8833  /**
8834   * _scsih_sas_device_discovery_error_event - display SAS device discovery error
8835   *						events
8836   * @ioc: per adapter object
8837   * @fw_event: The fw_event_work object
8838   * Context: user.
8839   */
8840  static void
_scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8841  _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
8842  	struct fw_event_work *fw_event)
8843  {
8844  	Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
8845  		(Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
8846  
8847  	switch (event_data->ReasonCode) {
8848  	case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
8849  		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
8850  			 le16_to_cpu(event_data->DevHandle),
8851  			 (u64)le64_to_cpu(event_data->SASAddress),
8852  			 event_data->PhysicalPort);
8853  		break;
8854  	case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
8855  		ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
8856  			 le16_to_cpu(event_data->DevHandle),
8857  			 (u64)le64_to_cpu(event_data->SASAddress),
8858  			 event_data->PhysicalPort);
8859  		break;
8860  	default:
8861  		break;
8862  	}
8863  }
8864  
8865  /**
8866   * _scsih_pcie_enumeration_event - handle enumeration events
8867   * @ioc: per adapter object
8868   * @fw_event: The fw_event_work object
8869   * Context: user.
8870   */
8871  static void
_scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)8872  _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
8873  	struct fw_event_work *fw_event)
8874  {
8875  	Mpi26EventDataPCIeEnumeration_t *event_data =
8876  		(Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
8877  
8878  	if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
8879  		return;
8880  
8881  	ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
8882  		 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
8883  		 "started" : "completed",
8884  		 event_data->Flags);
8885  	if (event_data->EnumerationStatus)
8886  		pr_cont("enumeration_status(0x%08x)",
8887  			le32_to_cpu(event_data->EnumerationStatus));
8888  	pr_cont("\n");
8889  }
8890  
8891  /**
8892   * _scsih_ir_fastpath - turn on fastpath for IR physdisk
8893   * @ioc: per adapter object
8894   * @handle: device handle for physical disk
8895   * @phys_disk_num: physical disk number
8896   *
8897   * Return: 0 for success, else failure.
8898   */
8899  static int
_scsih_ir_fastpath(struct MPT3SAS_ADAPTER * ioc,u16 handle,u8 phys_disk_num)8900  _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
8901  {
8902  	Mpi2RaidActionRequest_t *mpi_request;
8903  	Mpi2RaidActionReply_t *mpi_reply;
8904  	u16 smid;
8905  	u8 issue_reset = 0;
8906  	int rc = 0;
8907  	u16 ioc_status;
8908  	u32 log_info;
8909  
8910  	if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
8911  		return rc;
8912  
8913  	mutex_lock(&ioc->scsih_cmds.mutex);
8914  
8915  	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
8916  		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
8917  		rc = -EAGAIN;
8918  		goto out;
8919  	}
8920  	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
8921  
8922  	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
8923  	if (!smid) {
8924  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
8925  		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8926  		rc = -EAGAIN;
8927  		goto out;
8928  	}
8929  
8930  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
8931  	ioc->scsih_cmds.smid = smid;
8932  	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
8933  
8934  	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
8935  	mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
8936  	mpi_request->PhysDiskNum = phys_disk_num;
8937  
8938  	dewtprintk(ioc,
8939  		   ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
8940  			    handle, phys_disk_num));
8941  
8942  	init_completion(&ioc->scsih_cmds.done);
8943  	ioc->put_smid_default(ioc, smid);
8944  	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
8945  
8946  	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
8947  		mpt3sas_check_cmd_timeout(ioc,
8948  		    ioc->scsih_cmds.status, mpi_request,
8949  		    sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
8950  		rc = -EFAULT;
8951  		goto out;
8952  	}
8953  
8954  	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
8955  
8956  		mpi_reply = ioc->scsih_cmds.reply;
8957  		ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
8958  		if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
8959  			log_info =  le32_to_cpu(mpi_reply->IOCLogInfo);
8960  		else
8961  			log_info = 0;
8962  		ioc_status &= MPI2_IOCSTATUS_MASK;
8963  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8964  			dewtprintk(ioc,
8965  				   ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
8966  					    ioc_status, log_info));
8967  			rc = -EFAULT;
8968  		} else
8969  			dewtprintk(ioc,
8970  				   ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
8971  	}
8972  
8973   out:
8974  	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8975  	mutex_unlock(&ioc->scsih_cmds.mutex);
8976  
8977  	if (issue_reset)
8978  		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
8979  	return rc;
8980  }
8981  
8982  /**
8983   * _scsih_reprobe_lun - reprobing lun
8984   * @sdev: scsi device struct
8985   * @no_uld_attach: sdev->no_uld_attach flag setting
8986   *
8987   **/
8988  static void
_scsih_reprobe_lun(struct scsi_device * sdev,void * no_uld_attach)8989  _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
8990  {
8991  	sdev->no_uld_attach = no_uld_attach ? 1 : 0;
8992  	sdev_printk(KERN_INFO, sdev, "%s raid component\n",
8993  	    sdev->no_uld_attach ? "hiding" : "exposing");
8994  	WARN_ON(scsi_device_reprobe(sdev));
8995  }
8996  
8997  /**
8998   * _scsih_sas_volume_add - add new volume
8999   * @ioc: per adapter object
9000   * @element: IR config element data
9001   * Context: user.
9002   */
9003  static void
_scsih_sas_volume_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9004  _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
9005  	Mpi2EventIrConfigElement_t *element)
9006  {
9007  	struct _raid_device *raid_device;
9008  	unsigned long flags;
9009  	u64 wwid;
9010  	u16 handle = le16_to_cpu(element->VolDevHandle);
9011  	int rc;
9012  
9013  	mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9014  	if (!wwid) {
9015  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9016  			__FILE__, __LINE__, __func__);
9017  		return;
9018  	}
9019  
9020  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9021  	raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
9022  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9023  
9024  	if (raid_device)
9025  		return;
9026  
9027  	raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9028  	if (!raid_device) {
9029  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9030  			__FILE__, __LINE__, __func__);
9031  		return;
9032  	}
9033  
9034  	raid_device->id = ioc->sas_id++;
9035  	raid_device->channel = RAID_CHANNEL;
9036  	raid_device->handle = handle;
9037  	raid_device->wwid = wwid;
9038  	_scsih_raid_device_add(ioc, raid_device);
9039  	if (!ioc->wait_for_discovery_to_complete) {
9040  		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9041  		    raid_device->id, 0);
9042  		if (rc)
9043  			_scsih_raid_device_remove(ioc, raid_device);
9044  	} else {
9045  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9046  		_scsih_determine_boot_device(ioc, raid_device, 1);
9047  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9048  	}
9049  }
9050  
9051  /**
9052   * _scsih_sas_volume_delete - delete volume
9053   * @ioc: per adapter object
9054   * @handle: volume device handle
9055   * Context: user.
9056   */
9057  static void
_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER * ioc,u16 handle)9058  _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
9059  {
9060  	struct _raid_device *raid_device;
9061  	unsigned long flags;
9062  	struct MPT3SAS_TARGET *sas_target_priv_data;
9063  	struct scsi_target *starget = NULL;
9064  
9065  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
9066  	raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9067  	if (raid_device) {
9068  		if (raid_device->starget) {
9069  			starget = raid_device->starget;
9070  			sas_target_priv_data = starget->hostdata;
9071  			sas_target_priv_data->deleted = 1;
9072  		}
9073  		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9074  			 raid_device->handle, (u64)raid_device->wwid);
9075  		list_del(&raid_device->list);
9076  		kfree(raid_device);
9077  	}
9078  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9079  	if (starget)
9080  		scsi_remove_target(&starget->dev);
9081  }
9082  
9083  /**
9084   * _scsih_sas_pd_expose - expose pd component to /dev/sdX
9085   * @ioc: per adapter object
9086   * @element: IR config element data
9087   * Context: user.
9088   */
9089  static void
_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9090  _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
9091  	Mpi2EventIrConfigElement_t *element)
9092  {
9093  	struct _sas_device *sas_device;
9094  	struct scsi_target *starget = NULL;
9095  	struct MPT3SAS_TARGET *sas_target_priv_data;
9096  	unsigned long flags;
9097  	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9098  
9099  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9100  	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9101  	if (sas_device) {
9102  		sas_device->volume_handle = 0;
9103  		sas_device->volume_wwid = 0;
9104  		clear_bit(handle, ioc->pd_handles);
9105  		if (sas_device->starget && sas_device->starget->hostdata) {
9106  			starget = sas_device->starget;
9107  			sas_target_priv_data = starget->hostdata;
9108  			sas_target_priv_data->flags &=
9109  			    ~MPT_TARGET_FLAGS_RAID_COMPONENT;
9110  		}
9111  	}
9112  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9113  	if (!sas_device)
9114  		return;
9115  
9116  	/* exposing raid component */
9117  	if (starget)
9118  		starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
9119  
9120  	sas_device_put(sas_device);
9121  }
9122  
9123  /**
9124   * _scsih_sas_pd_hide - hide pd component from /dev/sdX
9125   * @ioc: per adapter object
9126   * @element: IR config element data
9127   * Context: user.
9128   */
9129  static void
_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9130  _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
9131  	Mpi2EventIrConfigElement_t *element)
9132  {
9133  	struct _sas_device *sas_device;
9134  	struct scsi_target *starget = NULL;
9135  	struct MPT3SAS_TARGET *sas_target_priv_data;
9136  	unsigned long flags;
9137  	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9138  	u16 volume_handle = 0;
9139  	u64 volume_wwid = 0;
9140  
9141  	mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
9142  	if (volume_handle)
9143  		mpt3sas_config_get_volume_wwid(ioc, volume_handle,
9144  		    &volume_wwid);
9145  
9146  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9147  	sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
9148  	if (sas_device) {
9149  		set_bit(handle, ioc->pd_handles);
9150  		if (sas_device->starget && sas_device->starget->hostdata) {
9151  			starget = sas_device->starget;
9152  			sas_target_priv_data = starget->hostdata;
9153  			sas_target_priv_data->flags |=
9154  			    MPT_TARGET_FLAGS_RAID_COMPONENT;
9155  			sas_device->volume_handle = volume_handle;
9156  			sas_device->volume_wwid = volume_wwid;
9157  		}
9158  	}
9159  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9160  	if (!sas_device)
9161  		return;
9162  
9163  	/* hiding raid component */
9164  	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9165  
9166  	if (starget)
9167  		starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
9168  
9169  	sas_device_put(sas_device);
9170  }
9171  
9172  /**
9173   * _scsih_sas_pd_delete - delete pd component
9174   * @ioc: per adapter object
9175   * @element: IR config element data
9176   * Context: user.
9177   */
9178  static void
_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9179  _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
9180  	Mpi2EventIrConfigElement_t *element)
9181  {
9182  	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9183  
9184  	_scsih_device_remove_by_handle(ioc, handle);
9185  }
9186  
9187  /**
9188   * _scsih_sas_pd_add - remove pd component
9189   * @ioc: per adapter object
9190   * @element: IR config element data
9191   * Context: user.
9192   */
9193  static void
_scsih_sas_pd_add(struct MPT3SAS_ADAPTER * ioc,Mpi2EventIrConfigElement_t * element)9194  _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
9195  	Mpi2EventIrConfigElement_t *element)
9196  {
9197  	struct _sas_device *sas_device;
9198  	u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
9199  	Mpi2ConfigReply_t mpi_reply;
9200  	Mpi2SasDevicePage0_t sas_device_pg0;
9201  	u32 ioc_status;
9202  	u64 sas_address;
9203  	u16 parent_handle;
9204  
9205  	set_bit(handle, ioc->pd_handles);
9206  
9207  	sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9208  	if (sas_device) {
9209  		_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9210  		sas_device_put(sas_device);
9211  		return;
9212  	}
9213  
9214  	if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
9215  	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
9216  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9217  			__FILE__, __LINE__, __func__);
9218  		return;
9219  	}
9220  
9221  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9222  	    MPI2_IOCSTATUS_MASK;
9223  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9224  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
9225  			__FILE__, __LINE__, __func__);
9226  		return;
9227  	}
9228  
9229  	parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9230  	if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9231  		mpt3sas_transport_update_links(ioc, sas_address, handle,
9232  		    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9233  		    mpt3sas_get_port_by_id(ioc,
9234  		    sas_device_pg0.PhysicalPort, 0));
9235  
9236  	_scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
9237  	_scsih_add_device(ioc, handle, 0, 1);
9238  }
9239  
9240  /**
9241   * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
9242   * @ioc: per adapter object
9243   * @event_data: event data payload
9244   * Context: user.
9245   */
9246  static void
_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrConfigChangeList_t * event_data)9247  _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
9248  	Mpi2EventDataIrConfigChangeList_t *event_data)
9249  {
9250  	Mpi2EventIrConfigElement_t *element;
9251  	u8 element_type;
9252  	int i;
9253  	char *reason_str = NULL, *element_str = NULL;
9254  
9255  	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9256  
9257  	ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
9258  		 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
9259  		 "foreign" : "native",
9260  		 event_data->NumElements);
9261  	for (i = 0; i < event_data->NumElements; i++, element++) {
9262  		switch (element->ReasonCode) {
9263  		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9264  			reason_str = "add";
9265  			break;
9266  		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9267  			reason_str = "remove";
9268  			break;
9269  		case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
9270  			reason_str = "no change";
9271  			break;
9272  		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9273  			reason_str = "hide";
9274  			break;
9275  		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9276  			reason_str = "unhide";
9277  			break;
9278  		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9279  			reason_str = "volume_created";
9280  			break;
9281  		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9282  			reason_str = "volume_deleted";
9283  			break;
9284  		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9285  			reason_str = "pd_created";
9286  			break;
9287  		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9288  			reason_str = "pd_deleted";
9289  			break;
9290  		default:
9291  			reason_str = "unknown reason";
9292  			break;
9293  		}
9294  		element_type = le16_to_cpu(element->ElementFlags) &
9295  		    MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
9296  		switch (element_type) {
9297  		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
9298  			element_str = "volume";
9299  			break;
9300  		case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
9301  			element_str = "phys disk";
9302  			break;
9303  		case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
9304  			element_str = "hot spare";
9305  			break;
9306  		default:
9307  			element_str = "unknown element";
9308  			break;
9309  		}
9310  		pr_info("\t(%s:%s), vol handle(0x%04x), " \
9311  		    "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
9312  		    reason_str, le16_to_cpu(element->VolDevHandle),
9313  		    le16_to_cpu(element->PhysDiskDevHandle),
9314  		    element->PhysDiskNum);
9315  	}
9316  }
9317  
9318  /**
9319   * _scsih_sas_ir_config_change_event - handle ir configuration change events
9320   * @ioc: per adapter object
9321   * @fw_event: The fw_event_work object
9322   * Context: user.
9323   */
9324  static void
_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9325  _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
9326  	struct fw_event_work *fw_event)
9327  {
9328  	Mpi2EventIrConfigElement_t *element;
9329  	int i;
9330  	u8 foreign_config;
9331  	Mpi2EventDataIrConfigChangeList_t *event_data =
9332  		(Mpi2EventDataIrConfigChangeList_t *)
9333  		fw_event->event_data;
9334  
9335  	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9336  	     (!ioc->hide_ir_msg))
9337  		_scsih_sas_ir_config_change_event_debug(ioc, event_data);
9338  
9339  	foreign_config = (le32_to_cpu(event_data->Flags) &
9340  	    MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
9341  
9342  	element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
9343  	if (ioc->shost_recovery &&
9344  	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
9345  		for (i = 0; i < event_data->NumElements; i++, element++) {
9346  			if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
9347  				_scsih_ir_fastpath(ioc,
9348  					le16_to_cpu(element->PhysDiskDevHandle),
9349  					element->PhysDiskNum);
9350  		}
9351  		return;
9352  	}
9353  
9354  	for (i = 0; i < event_data->NumElements; i++, element++) {
9355  
9356  		switch (element->ReasonCode) {
9357  		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
9358  		case MPI2_EVENT_IR_CHANGE_RC_ADDED:
9359  			if (!foreign_config)
9360  				_scsih_sas_volume_add(ioc, element);
9361  			break;
9362  		case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
9363  		case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
9364  			if (!foreign_config)
9365  				_scsih_sas_volume_delete(ioc,
9366  				    le16_to_cpu(element->VolDevHandle));
9367  			break;
9368  		case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
9369  			if (!ioc->is_warpdrive)
9370  				_scsih_sas_pd_hide(ioc, element);
9371  			break;
9372  		case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
9373  			if (!ioc->is_warpdrive)
9374  				_scsih_sas_pd_expose(ioc, element);
9375  			break;
9376  		case MPI2_EVENT_IR_CHANGE_RC_HIDE:
9377  			if (!ioc->is_warpdrive)
9378  				_scsih_sas_pd_add(ioc, element);
9379  			break;
9380  		case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
9381  			if (!ioc->is_warpdrive)
9382  				_scsih_sas_pd_delete(ioc, element);
9383  			break;
9384  		}
9385  	}
9386  }
9387  
9388  /**
9389   * _scsih_sas_ir_volume_event - IR volume event
9390   * @ioc: per adapter object
9391   * @fw_event: The fw_event_work object
9392   * Context: user.
9393   */
9394  static void
_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9395  _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
9396  	struct fw_event_work *fw_event)
9397  {
9398  	u64 wwid;
9399  	unsigned long flags;
9400  	struct _raid_device *raid_device;
9401  	u16 handle;
9402  	u32 state;
9403  	int rc;
9404  	Mpi2EventDataIrVolume_t *event_data =
9405  		(Mpi2EventDataIrVolume_t *) fw_event->event_data;
9406  
9407  	if (ioc->shost_recovery)
9408  		return;
9409  
9410  	if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
9411  		return;
9412  
9413  	handle = le16_to_cpu(event_data->VolDevHandle);
9414  	state = le32_to_cpu(event_data->NewValue);
9415  	if (!ioc->hide_ir_msg)
9416  		dewtprintk(ioc,
9417  			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9418  				    __func__, handle,
9419  				    le32_to_cpu(event_data->PreviousValue),
9420  				    state));
9421  	switch (state) {
9422  	case MPI2_RAID_VOL_STATE_MISSING:
9423  	case MPI2_RAID_VOL_STATE_FAILED:
9424  		_scsih_sas_volume_delete(ioc, handle);
9425  		break;
9426  
9427  	case MPI2_RAID_VOL_STATE_ONLINE:
9428  	case MPI2_RAID_VOL_STATE_DEGRADED:
9429  	case MPI2_RAID_VOL_STATE_OPTIMAL:
9430  
9431  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9432  		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9433  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9434  
9435  		if (raid_device)
9436  			break;
9437  
9438  		mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
9439  		if (!wwid) {
9440  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9441  				__FILE__, __LINE__, __func__);
9442  			break;
9443  		}
9444  
9445  		raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
9446  		if (!raid_device) {
9447  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9448  				__FILE__, __LINE__, __func__);
9449  			break;
9450  		}
9451  
9452  		raid_device->id = ioc->sas_id++;
9453  		raid_device->channel = RAID_CHANNEL;
9454  		raid_device->handle = handle;
9455  		raid_device->wwid = wwid;
9456  		_scsih_raid_device_add(ioc, raid_device);
9457  		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9458  		    raid_device->id, 0);
9459  		if (rc)
9460  			_scsih_raid_device_remove(ioc, raid_device);
9461  		break;
9462  
9463  	case MPI2_RAID_VOL_STATE_INITIALIZING:
9464  	default:
9465  		break;
9466  	}
9467  }
9468  
9469  /**
9470   * _scsih_sas_ir_physical_disk_event - PD event
9471   * @ioc: per adapter object
9472   * @fw_event: The fw_event_work object
9473   * Context: user.
9474   */
9475  static void
_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9476  _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
9477  	struct fw_event_work *fw_event)
9478  {
9479  	u16 handle, parent_handle;
9480  	u32 state;
9481  	struct _sas_device *sas_device;
9482  	Mpi2ConfigReply_t mpi_reply;
9483  	Mpi2SasDevicePage0_t sas_device_pg0;
9484  	u32 ioc_status;
9485  	Mpi2EventDataIrPhysicalDisk_t *event_data =
9486  		(Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
9487  	u64 sas_address;
9488  
9489  	if (ioc->shost_recovery)
9490  		return;
9491  
9492  	if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
9493  		return;
9494  
9495  	handle = le16_to_cpu(event_data->PhysDiskDevHandle);
9496  	state = le32_to_cpu(event_data->NewValue);
9497  
9498  	if (!ioc->hide_ir_msg)
9499  		dewtprintk(ioc,
9500  			   ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
9501  				    __func__, handle,
9502  				    le32_to_cpu(event_data->PreviousValue),
9503  				    state));
9504  
9505  	switch (state) {
9506  	case MPI2_RAID_PD_STATE_ONLINE:
9507  	case MPI2_RAID_PD_STATE_DEGRADED:
9508  	case MPI2_RAID_PD_STATE_REBUILDING:
9509  	case MPI2_RAID_PD_STATE_OPTIMAL:
9510  	case MPI2_RAID_PD_STATE_HOT_SPARE:
9511  
9512  		if (!ioc->is_warpdrive)
9513  			set_bit(handle, ioc->pd_handles);
9514  
9515  		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9516  		if (sas_device) {
9517  			sas_device_put(sas_device);
9518  			return;
9519  		}
9520  
9521  		if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9522  		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9523  		    handle))) {
9524  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9525  				__FILE__, __LINE__, __func__);
9526  			return;
9527  		}
9528  
9529  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9530  		    MPI2_IOCSTATUS_MASK;
9531  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9532  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9533  				__FILE__, __LINE__, __func__);
9534  			return;
9535  		}
9536  
9537  		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9538  		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
9539  			mpt3sas_transport_update_links(ioc, sas_address, handle,
9540  			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
9541  			    mpt3sas_get_port_by_id(ioc,
9542  			    sas_device_pg0.PhysicalPort, 0));
9543  
9544  		_scsih_add_device(ioc, handle, 0, 1);
9545  
9546  		break;
9547  
9548  	case MPI2_RAID_PD_STATE_OFFLINE:
9549  	case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
9550  	case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
9551  	default:
9552  		break;
9553  	}
9554  }
9555  
9556  /**
9557   * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
9558   * @ioc: per adapter object
9559   * @event_data: event data payload
9560   * Context: user.
9561   */
9562  static void
_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER * ioc,Mpi2EventDataIrOperationStatus_t * event_data)9563  _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
9564  	Mpi2EventDataIrOperationStatus_t *event_data)
9565  {
9566  	char *reason_str = NULL;
9567  
9568  	switch (event_data->RAIDOperation) {
9569  	case MPI2_EVENT_IR_RAIDOP_RESYNC:
9570  		reason_str = "resync";
9571  		break;
9572  	case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
9573  		reason_str = "online capacity expansion";
9574  		break;
9575  	case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
9576  		reason_str = "consistency check";
9577  		break;
9578  	case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
9579  		reason_str = "background init";
9580  		break;
9581  	case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
9582  		reason_str = "make data consistent";
9583  		break;
9584  	}
9585  
9586  	if (!reason_str)
9587  		return;
9588  
9589  	ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
9590  		 reason_str,
9591  		 le16_to_cpu(event_data->VolDevHandle),
9592  		 event_data->PercentComplete);
9593  }
9594  
9595  /**
9596   * _scsih_sas_ir_operation_status_event - handle RAID operation events
9597   * @ioc: per adapter object
9598   * @fw_event: The fw_event_work object
9599   * Context: user.
9600   */
9601  static void
_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)9602  _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
9603  	struct fw_event_work *fw_event)
9604  {
9605  	Mpi2EventDataIrOperationStatus_t *event_data =
9606  		(Mpi2EventDataIrOperationStatus_t *)
9607  		fw_event->event_data;
9608  	static struct _raid_device *raid_device;
9609  	unsigned long flags;
9610  	u16 handle;
9611  
9612  	if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
9613  	    (!ioc->hide_ir_msg))
9614  		_scsih_sas_ir_operation_status_event_debug(ioc,
9615  		     event_data);
9616  
9617  	/* code added for raid transport support */
9618  	if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
9619  
9620  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
9621  		handle = le16_to_cpu(event_data->VolDevHandle);
9622  		raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
9623  		if (raid_device)
9624  			raid_device->percent_complete =
9625  			    event_data->PercentComplete;
9626  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9627  	}
9628  }
9629  
9630  /**
9631   * _scsih_prep_device_scan - initialize parameters prior to device scan
9632   * @ioc: per adapter object
9633   *
9634   * Set the deleted flag prior to device scan.  If the device is found during
9635   * the scan, then we clear the deleted flag.
9636   */
9637  static void
_scsih_prep_device_scan(struct MPT3SAS_ADAPTER * ioc)9638  _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
9639  {
9640  	struct MPT3SAS_DEVICE *sas_device_priv_data;
9641  	struct scsi_device *sdev;
9642  
9643  	shost_for_each_device(sdev, ioc->shost) {
9644  		sas_device_priv_data = sdev->hostdata;
9645  		if (sas_device_priv_data && sas_device_priv_data->sas_target)
9646  			sas_device_priv_data->sas_target->deleted = 1;
9647  	}
9648  }
9649  
9650  /**
9651   * _scsih_update_device_qdepth - Update QD during Reset.
9652   * @ioc: per adapter object
9653   *
9654   */
9655  static void
_scsih_update_device_qdepth(struct MPT3SAS_ADAPTER * ioc)9656  _scsih_update_device_qdepth(struct MPT3SAS_ADAPTER *ioc)
9657  {
9658  	struct MPT3SAS_DEVICE *sas_device_priv_data;
9659  	struct MPT3SAS_TARGET *sas_target_priv_data;
9660  	struct _sas_device *sas_device;
9661  	struct scsi_device *sdev;
9662  	u16 qdepth;
9663  
9664  	ioc_info(ioc, "Update devices with firmware reported queue depth\n");
9665  	shost_for_each_device(sdev, ioc->shost) {
9666  		sas_device_priv_data = sdev->hostdata;
9667  		if (sas_device_priv_data && sas_device_priv_data->sas_target) {
9668  			sas_target_priv_data = sas_device_priv_data->sas_target;
9669  			sas_device = sas_device_priv_data->sas_target->sas_dev;
9670  			if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE)
9671  				qdepth = ioc->max_nvme_qd;
9672  			else if (sas_device &&
9673  			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
9674  				qdepth = (sas_device->port_type > 1) ?
9675  				    ioc->max_wideport_qd : ioc->max_narrowport_qd;
9676  			else if (sas_device &&
9677  			    sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
9678  				qdepth = ioc->max_sata_qd;
9679  			else
9680  				continue;
9681  			mpt3sas_scsih_change_queue_depth(sdev, qdepth);
9682  		}
9683  	}
9684  }
9685  
9686  /**
9687   * _scsih_mark_responding_sas_device - mark a sas_devices as responding
9688   * @ioc: per adapter object
9689   * @sas_device_pg0: SAS Device page 0
9690   *
9691   * After host reset, find out whether devices are still responding.
9692   * Used in _scsih_remove_unresponsive_sas_devices.
9693   */
9694  static void
_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER * ioc,Mpi2SasDevicePage0_t * sas_device_pg0)9695  _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
9696  Mpi2SasDevicePage0_t *sas_device_pg0)
9697  {
9698  	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9699  	struct scsi_target *starget;
9700  	struct _sas_device *sas_device = NULL;
9701  	struct _enclosure_node *enclosure_dev = NULL;
9702  	unsigned long flags;
9703  	struct hba_port *port = mpt3sas_get_port_by_id(
9704  	    ioc, sas_device_pg0->PhysicalPort, 0);
9705  
9706  	if (sas_device_pg0->EnclosureHandle) {
9707  		enclosure_dev =
9708  			mpt3sas_scsih_enclosure_find_by_handle(ioc,
9709  				le16_to_cpu(sas_device_pg0->EnclosureHandle));
9710  		if (enclosure_dev == NULL)
9711  			ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
9712  				 sas_device_pg0->EnclosureHandle);
9713  	}
9714  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
9715  	list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
9716  		if (sas_device->sas_address != le64_to_cpu(
9717  		    sas_device_pg0->SASAddress))
9718  			continue;
9719  		if (sas_device->slot != le16_to_cpu(sas_device_pg0->Slot))
9720  			continue;
9721  		if (sas_device->port != port)
9722  			continue;
9723  		sas_device->responding = 1;
9724  		starget = sas_device->starget;
9725  		if (starget && starget->hostdata) {
9726  			sas_target_priv_data = starget->hostdata;
9727  			sas_target_priv_data->tm_busy = 0;
9728  			sas_target_priv_data->deleted = 0;
9729  		} else
9730  			sas_target_priv_data = NULL;
9731  		if (starget) {
9732  			starget_printk(KERN_INFO, starget,
9733  			    "handle(0x%04x), sas_addr(0x%016llx)\n",
9734  			    le16_to_cpu(sas_device_pg0->DevHandle),
9735  			    (unsigned long long)
9736  			    sas_device->sas_address);
9737  
9738  			if (sas_device->enclosure_handle != 0)
9739  				starget_printk(KERN_INFO, starget,
9740  				 "enclosure logical id(0x%016llx), slot(%d)\n",
9741  				 (unsigned long long)
9742  				 sas_device->enclosure_logical_id,
9743  				 sas_device->slot);
9744  		}
9745  		if (le16_to_cpu(sas_device_pg0->Flags) &
9746  		      MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
9747  			sas_device->enclosure_level =
9748  			   sas_device_pg0->EnclosureLevel;
9749  			memcpy(&sas_device->connector_name[0],
9750  				&sas_device_pg0->ConnectorName[0], 4);
9751  		} else {
9752  			sas_device->enclosure_level = 0;
9753  			sas_device->connector_name[0] = '\0';
9754  		}
9755  
9756  		sas_device->enclosure_handle =
9757  			le16_to_cpu(sas_device_pg0->EnclosureHandle);
9758  		sas_device->is_chassis_slot_valid = 0;
9759  		if (enclosure_dev) {
9760  			sas_device->enclosure_logical_id = le64_to_cpu(
9761  				enclosure_dev->pg0.EnclosureLogicalID);
9762  			if (le16_to_cpu(enclosure_dev->pg0.Flags) &
9763  			    MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
9764  				sas_device->is_chassis_slot_valid = 1;
9765  				sas_device->chassis_slot =
9766  					enclosure_dev->pg0.ChassisSlot;
9767  			}
9768  		}
9769  
9770  		if (sas_device->handle == le16_to_cpu(
9771  		    sas_device_pg0->DevHandle))
9772  			goto out;
9773  		pr_info("\thandle changed from(0x%04x)!!!\n",
9774  		    sas_device->handle);
9775  		sas_device->handle = le16_to_cpu(
9776  		    sas_device_pg0->DevHandle);
9777  		if (sas_target_priv_data)
9778  			sas_target_priv_data->handle =
9779  			    le16_to_cpu(sas_device_pg0->DevHandle);
9780  		goto out;
9781  	}
9782   out:
9783  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9784  }
9785  
9786  /**
9787   * _scsih_create_enclosure_list_after_reset - Free Existing list,
9788   *	And create enclosure list by scanning all Enclosure Page(0)s
9789   * @ioc: per adapter object
9790   */
9791  static void
_scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER * ioc)9792  _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
9793  {
9794  	struct _enclosure_node *enclosure_dev;
9795  	Mpi2ConfigReply_t mpi_reply;
9796  	u16 enclosure_handle;
9797  	int rc;
9798  
9799  	/* Free existing enclosure list */
9800  	mpt3sas_free_enclosure_list(ioc);
9801  
9802  	/* Re constructing enclosure list after reset*/
9803  	enclosure_handle = 0xFFFF;
9804  	do {
9805  		enclosure_dev =
9806  			kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
9807  		if (!enclosure_dev) {
9808  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
9809  				__FILE__, __LINE__, __func__);
9810  			return;
9811  		}
9812  		rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
9813  				&enclosure_dev->pg0,
9814  				MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
9815  				enclosure_handle);
9816  
9817  		if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
9818  						MPI2_IOCSTATUS_MASK)) {
9819  			kfree(enclosure_dev);
9820  			return;
9821  		}
9822  		list_add_tail(&enclosure_dev->list,
9823  						&ioc->enclosure_list);
9824  		enclosure_handle =
9825  			le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
9826  	} while (1);
9827  }
9828  
9829  /**
9830   * _scsih_search_responding_sas_devices -
9831   * @ioc: per adapter object
9832   *
9833   * After host reset, find out whether devices are still responding.
9834   * If not remove.
9835   */
9836  static void
_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER * ioc)9837  _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
9838  {
9839  	Mpi2SasDevicePage0_t sas_device_pg0;
9840  	Mpi2ConfigReply_t mpi_reply;
9841  	u16 ioc_status;
9842  	u16 handle;
9843  	u32 device_info;
9844  
9845  	ioc_info(ioc, "search for end-devices: start\n");
9846  
9847  	if (list_empty(&ioc->sas_device_list))
9848  		goto out;
9849  
9850  	handle = 0xFFFF;
9851  	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9852  	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9853  	    handle))) {
9854  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9855  		    MPI2_IOCSTATUS_MASK;
9856  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
9857  			break;
9858  		handle = le16_to_cpu(sas_device_pg0.DevHandle);
9859  		device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
9860  		if (!(_scsih_is_end_device(device_info)))
9861  			continue;
9862  		_scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
9863  	}
9864  
9865   out:
9866  	ioc_info(ioc, "search for end-devices: complete\n");
9867  }
9868  
9869  /**
9870   * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
9871   * @ioc: per adapter object
9872   * @pcie_device_pg0: PCIe Device page 0
9873   *
9874   * After host reset, find out whether devices are still responding.
9875   * Used in _scsih_remove_unresponding_devices.
9876   */
9877  static void
_scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER * ioc,Mpi26PCIeDevicePage0_t * pcie_device_pg0)9878  _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
9879  	Mpi26PCIeDevicePage0_t *pcie_device_pg0)
9880  {
9881  	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
9882  	struct scsi_target *starget;
9883  	struct _pcie_device *pcie_device;
9884  	unsigned long flags;
9885  
9886  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9887  	list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
9888  		if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
9889  		    && (pcie_device->slot == le16_to_cpu(
9890  		    pcie_device_pg0->Slot))) {
9891  			pcie_device->access_status =
9892  					pcie_device_pg0->AccessStatus;
9893  			pcie_device->responding = 1;
9894  			starget = pcie_device->starget;
9895  			if (starget && starget->hostdata) {
9896  				sas_target_priv_data = starget->hostdata;
9897  				sas_target_priv_data->tm_busy = 0;
9898  				sas_target_priv_data->deleted = 0;
9899  			} else
9900  				sas_target_priv_data = NULL;
9901  			if (starget) {
9902  				starget_printk(KERN_INFO, starget,
9903  				    "handle(0x%04x), wwid(0x%016llx) ",
9904  				    pcie_device->handle,
9905  				    (unsigned long long)pcie_device->wwid);
9906  				if (pcie_device->enclosure_handle != 0)
9907  					starget_printk(KERN_INFO, starget,
9908  					    "enclosure logical id(0x%016llx), "
9909  					    "slot(%d)\n",
9910  					    (unsigned long long)
9911  					    pcie_device->enclosure_logical_id,
9912  					    pcie_device->slot);
9913  			}
9914  
9915  			if (((le32_to_cpu(pcie_device_pg0->Flags)) &
9916  			    MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
9917  			    (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
9918  				pcie_device->enclosure_level =
9919  				    pcie_device_pg0->EnclosureLevel;
9920  				memcpy(&pcie_device->connector_name[0],
9921  				    &pcie_device_pg0->ConnectorName[0], 4);
9922  			} else {
9923  				pcie_device->enclosure_level = 0;
9924  				pcie_device->connector_name[0] = '\0';
9925  			}
9926  
9927  			if (pcie_device->handle == le16_to_cpu(
9928  			    pcie_device_pg0->DevHandle))
9929  				goto out;
9930  			pr_info("\thandle changed from(0x%04x)!!!\n",
9931  			    pcie_device->handle);
9932  			pcie_device->handle = le16_to_cpu(
9933  			    pcie_device_pg0->DevHandle);
9934  			if (sas_target_priv_data)
9935  				sas_target_priv_data->handle =
9936  				    le16_to_cpu(pcie_device_pg0->DevHandle);
9937  			goto out;
9938  		}
9939  	}
9940  
9941   out:
9942  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9943  }
9944  
9945  /**
9946   * _scsih_search_responding_pcie_devices -
9947   * @ioc: per adapter object
9948   *
9949   * After host reset, find out whether devices are still responding.
9950   * If not remove.
9951   */
9952  static void
_scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER * ioc)9953  _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
9954  {
9955  	Mpi26PCIeDevicePage0_t pcie_device_pg0;
9956  	Mpi2ConfigReply_t mpi_reply;
9957  	u16 ioc_status;
9958  	u16 handle;
9959  	u32 device_info;
9960  
9961  	ioc_info(ioc, "search for end-devices: start\n");
9962  
9963  	if (list_empty(&ioc->pcie_device_list))
9964  		goto out;
9965  
9966  	handle = 0xFFFF;
9967  	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9968  		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9969  		handle))) {
9970  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9971  		    MPI2_IOCSTATUS_MASK;
9972  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9973  			ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
9974  				 __func__, ioc_status,
9975  				 le32_to_cpu(mpi_reply.IOCLogInfo));
9976  			break;
9977  		}
9978  		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9979  		device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
9980  		if (!(_scsih_is_nvme_pciescsi_device(device_info)))
9981  			continue;
9982  		_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
9983  	}
9984  out:
9985  	ioc_info(ioc, "search for PCIe end-devices: complete\n");
9986  }
9987  
9988  /**
9989   * _scsih_mark_responding_raid_device - mark a raid_device as responding
9990   * @ioc: per adapter object
9991   * @wwid: world wide identifier for raid volume
9992   * @handle: device handle
9993   *
9994   * After host reset, find out whether devices are still responding.
9995   * Used in _scsih_remove_unresponsive_raid_devices.
9996   */
9997  static void
_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER * ioc,u64 wwid,u16 handle)9998  _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
9999  	u16 handle)
10000  {
10001  	struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
10002  	struct scsi_target *starget;
10003  	struct _raid_device *raid_device;
10004  	unsigned long flags;
10005  
10006  	spin_lock_irqsave(&ioc->raid_device_lock, flags);
10007  	list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
10008  		if (raid_device->wwid == wwid && raid_device->starget) {
10009  			starget = raid_device->starget;
10010  			if (starget && starget->hostdata) {
10011  				sas_target_priv_data = starget->hostdata;
10012  				sas_target_priv_data->deleted = 0;
10013  			} else
10014  				sas_target_priv_data = NULL;
10015  			raid_device->responding = 1;
10016  			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10017  			starget_printk(KERN_INFO, raid_device->starget,
10018  			    "handle(0x%04x), wwid(0x%016llx)\n", handle,
10019  			    (unsigned long long)raid_device->wwid);
10020  
10021  			/*
10022  			 * WARPDRIVE: The handles of the PDs might have changed
10023  			 * across the host reset so re-initialize the
10024  			 * required data for Direct IO
10025  			 */
10026  			mpt3sas_init_warpdrive_properties(ioc, raid_device);
10027  			spin_lock_irqsave(&ioc->raid_device_lock, flags);
10028  			if (raid_device->handle == handle) {
10029  				spin_unlock_irqrestore(&ioc->raid_device_lock,
10030  				    flags);
10031  				return;
10032  			}
10033  			pr_info("\thandle changed from(0x%04x)!!!\n",
10034  			    raid_device->handle);
10035  			raid_device->handle = handle;
10036  			if (sas_target_priv_data)
10037  				sas_target_priv_data->handle = handle;
10038  			spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10039  			return;
10040  		}
10041  	}
10042  	spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10043  }
10044  
10045  /**
10046   * _scsih_search_responding_raid_devices -
10047   * @ioc: per adapter object
10048   *
10049   * After host reset, find out whether devices are still responding.
10050   * If not remove.
10051   */
10052  static void
_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER * ioc)10053  _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
10054  {
10055  	Mpi2RaidVolPage1_t volume_pg1;
10056  	Mpi2RaidVolPage0_t volume_pg0;
10057  	Mpi2RaidPhysDiskPage0_t pd_pg0;
10058  	Mpi2ConfigReply_t mpi_reply;
10059  	u16 ioc_status;
10060  	u16 handle;
10061  	u8 phys_disk_num;
10062  
10063  	if (!ioc->ir_firmware)
10064  		return;
10065  
10066  	ioc_info(ioc, "search for raid volumes: start\n");
10067  
10068  	if (list_empty(&ioc->raid_device_list))
10069  		goto out;
10070  
10071  	handle = 0xFFFF;
10072  	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10073  	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10074  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10075  		    MPI2_IOCSTATUS_MASK;
10076  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10077  			break;
10078  		handle = le16_to_cpu(volume_pg1.DevHandle);
10079  
10080  		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10081  		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10082  		     sizeof(Mpi2RaidVolPage0_t)))
10083  			continue;
10084  
10085  		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10086  		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10087  		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
10088  			_scsih_mark_responding_raid_device(ioc,
10089  			    le64_to_cpu(volume_pg1.WWID), handle);
10090  	}
10091  
10092  	/* refresh the pd_handles */
10093  	if (!ioc->is_warpdrive) {
10094  		phys_disk_num = 0xFF;
10095  		memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
10096  		while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10097  		    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10098  		    phys_disk_num))) {
10099  			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10100  			    MPI2_IOCSTATUS_MASK;
10101  			if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10102  				break;
10103  			phys_disk_num = pd_pg0.PhysDiskNum;
10104  			handle = le16_to_cpu(pd_pg0.DevHandle);
10105  			set_bit(handle, ioc->pd_handles);
10106  		}
10107  	}
10108   out:
10109  	ioc_info(ioc, "search for responding raid volumes: complete\n");
10110  }
10111  
10112  /**
10113   * _scsih_mark_responding_expander - mark a expander as responding
10114   * @ioc: per adapter object
10115   * @expander_pg0:SAS Expander Config Page0
10116   *
10117   * After host reset, find out whether devices are still responding.
10118   * Used in _scsih_remove_unresponsive_expanders.
10119   */
10120  static void
_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER * ioc,Mpi2ExpanderPage0_t * expander_pg0)10121  _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
10122  	Mpi2ExpanderPage0_t *expander_pg0)
10123  {
10124  	struct _sas_node *sas_expander = NULL;
10125  	unsigned long flags;
10126  	int i;
10127  	struct _enclosure_node *enclosure_dev = NULL;
10128  	u16 handle = le16_to_cpu(expander_pg0->DevHandle);
10129  	u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
10130  	u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
10131  	struct hba_port *port = mpt3sas_get_port_by_id(
10132  	    ioc, expander_pg0->PhysicalPort, 0);
10133  
10134  	if (enclosure_handle)
10135  		enclosure_dev =
10136  			mpt3sas_scsih_enclosure_find_by_handle(ioc,
10137  							enclosure_handle);
10138  
10139  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10140  	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
10141  		if (sas_expander->sas_address != sas_address)
10142  			continue;
10143  		if (sas_expander->port != port)
10144  			continue;
10145  		sas_expander->responding = 1;
10146  
10147  		if (enclosure_dev) {
10148  			sas_expander->enclosure_logical_id =
10149  			    le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
10150  			sas_expander->enclosure_handle =
10151  			    le16_to_cpu(expander_pg0->EnclosureHandle);
10152  		}
10153  
10154  		if (sas_expander->handle == handle)
10155  			goto out;
10156  		pr_info("\texpander(0x%016llx): handle changed" \
10157  		    " from(0x%04x) to (0x%04x)!!!\n",
10158  		    (unsigned long long)sas_expander->sas_address,
10159  		    sas_expander->handle, handle);
10160  		sas_expander->handle = handle;
10161  		for (i = 0 ; i < sas_expander->num_phys ; i++)
10162  			sas_expander->phy[i].handle = handle;
10163  		goto out;
10164  	}
10165   out:
10166  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10167  }
10168  
10169  /**
10170   * _scsih_search_responding_expanders -
10171   * @ioc: per adapter object
10172   *
10173   * After host reset, find out whether devices are still responding.
10174   * If not remove.
10175   */
10176  static void
_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER * ioc)10177  _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
10178  {
10179  	Mpi2ExpanderPage0_t expander_pg0;
10180  	Mpi2ConfigReply_t mpi_reply;
10181  	u16 ioc_status;
10182  	u64 sas_address;
10183  	u16 handle;
10184  	u8 port;
10185  
10186  	ioc_info(ioc, "search for expanders: start\n");
10187  
10188  	if (list_empty(&ioc->sas_expander_list))
10189  		goto out;
10190  
10191  	handle = 0xFFFF;
10192  	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10193  	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10194  
10195  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10196  		    MPI2_IOCSTATUS_MASK;
10197  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
10198  			break;
10199  
10200  		handle = le16_to_cpu(expander_pg0.DevHandle);
10201  		sas_address = le64_to_cpu(expander_pg0.SASAddress);
10202  		port = expander_pg0.PhysicalPort;
10203  		pr_info(
10204  		    "\texpander present: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
10205  		    handle, (unsigned long long)sas_address,
10206  		    (ioc->multipath_on_hba ?
10207  		    port : MULTIPATH_DISABLED_PORT_ID));
10208  		_scsih_mark_responding_expander(ioc, &expander_pg0);
10209  	}
10210  
10211   out:
10212  	ioc_info(ioc, "search for expanders: complete\n");
10213  }
10214  
10215  /**
10216   * _scsih_remove_unresponding_devices - removing unresponding devices
10217   * @ioc: per adapter object
10218   */
10219  static void
_scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER * ioc)10220  _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
10221  {
10222  	struct _sas_device *sas_device, *sas_device_next;
10223  	struct _sas_node *sas_expander, *sas_expander_next;
10224  	struct _raid_device *raid_device, *raid_device_next;
10225  	struct _pcie_device *pcie_device, *pcie_device_next;
10226  	struct list_head tmp_list;
10227  	unsigned long flags;
10228  	LIST_HEAD(head);
10229  
10230  	ioc_info(ioc, "removing unresponding devices: start\n");
10231  
10232  	/* removing unresponding end devices */
10233  	ioc_info(ioc, "removing unresponding devices: end-devices\n");
10234  	/*
10235  	 * Iterate, pulling off devices marked as non-responding. We become the
10236  	 * owner for the reference the list had on any object we prune.
10237  	 */
10238  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
10239  
10240  	/*
10241  	 * Clean up the sas_device_init_list list as
10242  	 * driver goes for fresh scan as part of diag reset.
10243  	 */
10244  	list_for_each_entry_safe(sas_device, sas_device_next,
10245  	    &ioc->sas_device_init_list, list) {
10246  		list_del_init(&sas_device->list);
10247  		sas_device_put(sas_device);
10248  	}
10249  
10250  	list_for_each_entry_safe(sas_device, sas_device_next,
10251  	    &ioc->sas_device_list, list) {
10252  		if (!sas_device->responding)
10253  			list_move_tail(&sas_device->list, &head);
10254  		else
10255  			sas_device->responding = 0;
10256  	}
10257  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
10258  
10259  	/*
10260  	 * Now, uninitialize and remove the unresponding devices we pruned.
10261  	 */
10262  	list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
10263  		_scsih_remove_device(ioc, sas_device);
10264  		list_del_init(&sas_device->list);
10265  		sas_device_put(sas_device);
10266  	}
10267  
10268  	ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
10269  	INIT_LIST_HEAD(&head);
10270  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
10271  	/*
10272  	 * Clean up the pcie_device_init_list list as
10273  	 * driver goes for fresh scan as part of diag reset.
10274  	 */
10275  	list_for_each_entry_safe(pcie_device, pcie_device_next,
10276  	    &ioc->pcie_device_init_list, list) {
10277  		list_del_init(&pcie_device->list);
10278  		pcie_device_put(pcie_device);
10279  	}
10280  
10281  	list_for_each_entry_safe(pcie_device, pcie_device_next,
10282  	    &ioc->pcie_device_list, list) {
10283  		if (!pcie_device->responding)
10284  			list_move_tail(&pcie_device->list, &head);
10285  		else
10286  			pcie_device->responding = 0;
10287  	}
10288  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10289  
10290  	list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
10291  		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
10292  		list_del_init(&pcie_device->list);
10293  		pcie_device_put(pcie_device);
10294  	}
10295  
10296  	/* removing unresponding volumes */
10297  	if (ioc->ir_firmware) {
10298  		ioc_info(ioc, "removing unresponding devices: volumes\n");
10299  		list_for_each_entry_safe(raid_device, raid_device_next,
10300  		    &ioc->raid_device_list, list) {
10301  			if (!raid_device->responding)
10302  				_scsih_sas_volume_delete(ioc,
10303  				    raid_device->handle);
10304  			else
10305  				raid_device->responding = 0;
10306  		}
10307  	}
10308  
10309  	/* removing unresponding expanders */
10310  	ioc_info(ioc, "removing unresponding devices: expanders\n");
10311  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
10312  	INIT_LIST_HEAD(&tmp_list);
10313  	list_for_each_entry_safe(sas_expander, sas_expander_next,
10314  	    &ioc->sas_expander_list, list) {
10315  		if (!sas_expander->responding)
10316  			list_move_tail(&sas_expander->list, &tmp_list);
10317  		else
10318  			sas_expander->responding = 0;
10319  	}
10320  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10321  	list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
10322  	    list) {
10323  		_scsih_expander_node_remove(ioc, sas_expander);
10324  	}
10325  
10326  	ioc_info(ioc, "removing unresponding devices: complete\n");
10327  
10328  	/* unblock devices */
10329  	_scsih_ublock_io_all_device(ioc);
10330  }
10331  
10332  static void
_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander,u16 handle)10333  _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
10334  	struct _sas_node *sas_expander, u16 handle)
10335  {
10336  	Mpi2ExpanderPage1_t expander_pg1;
10337  	Mpi2ConfigReply_t mpi_reply;
10338  	int i;
10339  
10340  	for (i = 0 ; i < sas_expander->num_phys ; i++) {
10341  		if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
10342  		    &expander_pg1, i, handle))) {
10343  			ioc_err(ioc, "failure at %s:%d/%s()!\n",
10344  				__FILE__, __LINE__, __func__);
10345  			return;
10346  		}
10347  
10348  		mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
10349  		    le16_to_cpu(expander_pg1.AttachedDevHandle), i,
10350  		    expander_pg1.NegotiatedLinkRate >> 4,
10351  		    sas_expander->port);
10352  	}
10353  }
10354  
10355  /**
10356   * _scsih_scan_for_devices_after_reset - scan for devices after host reset
10357   * @ioc: per adapter object
10358   */
10359  static void
_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER * ioc)10360  _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
10361  {
10362  	Mpi2ExpanderPage0_t expander_pg0;
10363  	Mpi2SasDevicePage0_t sas_device_pg0;
10364  	Mpi26PCIeDevicePage0_t pcie_device_pg0;
10365  	Mpi2RaidVolPage1_t volume_pg1;
10366  	Mpi2RaidVolPage0_t volume_pg0;
10367  	Mpi2RaidPhysDiskPage0_t pd_pg0;
10368  	Mpi2EventIrConfigElement_t element;
10369  	Mpi2ConfigReply_t mpi_reply;
10370  	u8 phys_disk_num, port_id;
10371  	u16 ioc_status;
10372  	u16 handle, parent_handle;
10373  	u64 sas_address;
10374  	struct _sas_device *sas_device;
10375  	struct _pcie_device *pcie_device;
10376  	struct _sas_node *expander_device;
10377  	static struct _raid_device *raid_device;
10378  	u8 retry_count;
10379  	unsigned long flags;
10380  
10381  	ioc_info(ioc, "scan devices: start\n");
10382  
10383  	_scsih_sas_host_refresh(ioc);
10384  
10385  	ioc_info(ioc, "\tscan devices: expanders start\n");
10386  
10387  	/* expanders */
10388  	handle = 0xFFFF;
10389  	while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
10390  	    MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
10391  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10392  		    MPI2_IOCSTATUS_MASK;
10393  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10394  			ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10395  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10396  			break;
10397  		}
10398  		handle = le16_to_cpu(expander_pg0.DevHandle);
10399  		spin_lock_irqsave(&ioc->sas_node_lock, flags);
10400  		port_id = expander_pg0.PhysicalPort;
10401  		expander_device = mpt3sas_scsih_expander_find_by_sas_address(
10402  		    ioc, le64_to_cpu(expander_pg0.SASAddress),
10403  		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10404  		spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
10405  		if (expander_device)
10406  			_scsih_refresh_expander_links(ioc, expander_device,
10407  			    handle);
10408  		else {
10409  			ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10410  				 handle,
10411  				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10412  			_scsih_expander_add(ioc, handle);
10413  			ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
10414  				 handle,
10415  				 (u64)le64_to_cpu(expander_pg0.SASAddress));
10416  		}
10417  	}
10418  
10419  	ioc_info(ioc, "\tscan devices: expanders complete\n");
10420  
10421  	if (!ioc->ir_firmware)
10422  		goto skip_to_sas;
10423  
10424  	ioc_info(ioc, "\tscan devices: phys disk start\n");
10425  
10426  	/* phys disk */
10427  	phys_disk_num = 0xFF;
10428  	while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
10429  	    &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
10430  	    phys_disk_num))) {
10431  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10432  		    MPI2_IOCSTATUS_MASK;
10433  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10434  			ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10435  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10436  			break;
10437  		}
10438  		phys_disk_num = pd_pg0.PhysDiskNum;
10439  		handle = le16_to_cpu(pd_pg0.DevHandle);
10440  		sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
10441  		if (sas_device) {
10442  			sas_device_put(sas_device);
10443  			continue;
10444  		}
10445  		if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10446  		    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
10447  		    handle) != 0)
10448  			continue;
10449  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10450  		    MPI2_IOCSTATUS_MASK;
10451  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10452  			ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
10453  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10454  			break;
10455  		}
10456  		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10457  		if (!_scsih_get_sas_address(ioc, parent_handle,
10458  		    &sas_address)) {
10459  			ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10460  				 handle,
10461  				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10462  			port_id = sas_device_pg0.PhysicalPort;
10463  			mpt3sas_transport_update_links(ioc, sas_address,
10464  			    handle, sas_device_pg0.PhyNum,
10465  			    MPI2_SAS_NEG_LINK_RATE_1_5,
10466  			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10467  			set_bit(handle, ioc->pd_handles);
10468  			retry_count = 0;
10469  			/* This will retry adding the end device.
10470  			 * _scsih_add_device() will decide on retries and
10471  			 * return "1" when it should be retried
10472  			 */
10473  			while (_scsih_add_device(ioc, handle, retry_count++,
10474  			    1)) {
10475  				ssleep(1);
10476  			}
10477  			ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
10478  				 handle,
10479  				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10480  		}
10481  	}
10482  
10483  	ioc_info(ioc, "\tscan devices: phys disk complete\n");
10484  
10485  	ioc_info(ioc, "\tscan devices: volumes start\n");
10486  
10487  	/* volumes */
10488  	handle = 0xFFFF;
10489  	while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
10490  	    &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
10491  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10492  		    MPI2_IOCSTATUS_MASK;
10493  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10494  			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10495  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10496  			break;
10497  		}
10498  		handle = le16_to_cpu(volume_pg1.DevHandle);
10499  		spin_lock_irqsave(&ioc->raid_device_lock, flags);
10500  		raid_device = _scsih_raid_device_find_by_wwid(ioc,
10501  		    le64_to_cpu(volume_pg1.WWID));
10502  		spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
10503  		if (raid_device)
10504  			continue;
10505  		if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
10506  		    &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
10507  		     sizeof(Mpi2RaidVolPage0_t)))
10508  			continue;
10509  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10510  		    MPI2_IOCSTATUS_MASK;
10511  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10512  			ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10513  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10514  			break;
10515  		}
10516  		if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
10517  		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
10518  		    volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
10519  			memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
10520  			element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
10521  			element.VolDevHandle = volume_pg1.DevHandle;
10522  			ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
10523  				 volume_pg1.DevHandle);
10524  			_scsih_sas_volume_add(ioc, &element);
10525  			ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
10526  				 volume_pg1.DevHandle);
10527  		}
10528  	}
10529  
10530  	ioc_info(ioc, "\tscan devices: volumes complete\n");
10531  
10532   skip_to_sas:
10533  
10534  	ioc_info(ioc, "\tscan devices: end devices start\n");
10535  
10536  	/* sas devices */
10537  	handle = 0xFFFF;
10538  	while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
10539  	    &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10540  	    handle))) {
10541  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
10542  		    MPI2_IOCSTATUS_MASK;
10543  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10544  			ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10545  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10546  			break;
10547  		}
10548  		handle = le16_to_cpu(sas_device_pg0.DevHandle);
10549  		if (!(_scsih_is_end_device(
10550  		    le32_to_cpu(sas_device_pg0.DeviceInfo))))
10551  			continue;
10552  		port_id = sas_device_pg0.PhysicalPort;
10553  		sas_device = mpt3sas_get_sdev_by_addr(ioc,
10554  		    le64_to_cpu(sas_device_pg0.SASAddress),
10555  		    mpt3sas_get_port_by_id(ioc, port_id, 0));
10556  		if (sas_device) {
10557  			sas_device_put(sas_device);
10558  			continue;
10559  		}
10560  		parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
10561  		if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
10562  			ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10563  				 handle,
10564  				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10565  			mpt3sas_transport_update_links(ioc, sas_address, handle,
10566  			    sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5,
10567  			    mpt3sas_get_port_by_id(ioc, port_id, 0));
10568  			retry_count = 0;
10569  			/* This will retry adding the end device.
10570  			 * _scsih_add_device() will decide on retries and
10571  			 * return "1" when it should be retried
10572  			 */
10573  			while (_scsih_add_device(ioc, handle, retry_count++,
10574  			    0)) {
10575  				ssleep(1);
10576  			}
10577  			ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
10578  				 handle,
10579  				 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
10580  		}
10581  	}
10582  	ioc_info(ioc, "\tscan devices: end devices complete\n");
10583  	ioc_info(ioc, "\tscan devices: pcie end devices start\n");
10584  
10585  	/* pcie devices */
10586  	handle = 0xFFFF;
10587  	while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
10588  		&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
10589  		handle))) {
10590  		ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
10591  				& MPI2_IOCSTATUS_MASK;
10592  		if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
10593  			ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
10594  				 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
10595  			break;
10596  		}
10597  		handle = le16_to_cpu(pcie_device_pg0.DevHandle);
10598  		if (!(_scsih_is_nvme_pciescsi_device(
10599  			le32_to_cpu(pcie_device_pg0.DeviceInfo))))
10600  			continue;
10601  		pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
10602  				le64_to_cpu(pcie_device_pg0.WWID));
10603  		if (pcie_device) {
10604  			pcie_device_put(pcie_device);
10605  			continue;
10606  		}
10607  		retry_count = 0;
10608  		parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
10609  		_scsih_pcie_add_device(ioc, handle);
10610  
10611  		ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
10612  			 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
10613  	}
10614  
10615  	ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
10616  	ioc_info(ioc, "scan devices: complete\n");
10617  }
10618  
10619  /**
10620   * mpt3sas_scsih_pre_reset_handler - reset callback handler (for scsih)
10621   * @ioc: per adapter object
10622   *
10623   * The handler for doing any required cleanup or initialization.
10624   */
mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)10625  void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
10626  {
10627  	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
10628  }
10629  
10630  /**
10631   * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
10632   *							scsi & tm cmds.
10633   * @ioc: per adapter object
10634   *
10635   * The handler for doing any required cleanup or initialization.
10636   */
10637  void
mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER * ioc)10638  mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
10639  {
10640  	dtmprintk(ioc,
10641  	    ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
10642  	if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
10643  		ioc->scsih_cmds.status |= MPT3_CMD_RESET;
10644  		mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
10645  		complete(&ioc->scsih_cmds.done);
10646  	}
10647  	if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
10648  		ioc->tm_cmds.status |= MPT3_CMD_RESET;
10649  		mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
10650  		complete(&ioc->tm_cmds.done);
10651  	}
10652  
10653  	memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
10654  	memset(ioc->device_remove_in_progress, 0,
10655  	       ioc->device_remove_in_progress_sz);
10656  	_scsih_fw_event_cleanup_queue(ioc);
10657  	_scsih_flush_running_cmds(ioc);
10658  }
10659  
10660  /**
10661   * mpt3sas_scsih_reset_done_handler - reset callback handler (for scsih)
10662   * @ioc: per adapter object
10663   *
10664   * The handler for doing any required cleanup or initialization.
10665   */
10666  void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)10667  mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
10668  {
10669  	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
10670  	if (!(disable_discovery > 0 && !ioc->sas_hba.num_phys)) {
10671  		if (ioc->multipath_on_hba) {
10672  			_scsih_sas_port_refresh(ioc);
10673  			_scsih_update_vphys_after_reset(ioc);
10674  		}
10675  		_scsih_prep_device_scan(ioc);
10676  		_scsih_create_enclosure_list_after_reset(ioc);
10677  		_scsih_search_responding_sas_devices(ioc);
10678  		_scsih_search_responding_pcie_devices(ioc);
10679  		_scsih_search_responding_raid_devices(ioc);
10680  		_scsih_search_responding_expanders(ioc);
10681  		_scsih_error_recovery_delete_devices(ioc);
10682  	}
10683  }
10684  
10685  /**
10686   * _mpt3sas_fw_work - delayed task for processing firmware events
10687   * @ioc: per adapter object
10688   * @fw_event: The fw_event_work object
10689   * Context: user.
10690   */
10691  static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER * ioc,struct fw_event_work * fw_event)10692  _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
10693  {
10694  	ioc->current_event = fw_event;
10695  	_scsih_fw_event_del_from_list(ioc, fw_event);
10696  
10697  	/* the queue is being flushed so ignore this event */
10698  	if (ioc->remove_host || ioc->pci_error_recovery) {
10699  		fw_event_work_put(fw_event);
10700  		ioc->current_event = NULL;
10701  		return;
10702  	}
10703  
10704  	switch (fw_event->event) {
10705  	case MPT3SAS_PROCESS_TRIGGER_DIAG:
10706  		mpt3sas_process_trigger_data(ioc,
10707  			(struct SL_WH_TRIGGERS_EVENT_DATA_T *)
10708  			fw_event->event_data);
10709  		break;
10710  	case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
10711  		while (scsi_host_in_recovery(ioc->shost) ||
10712  					 ioc->shost_recovery) {
10713  			/*
10714  			 * If we're unloading or cancelling the work, bail.
10715  			 * Otherwise, this can become an infinite loop.
10716  			 */
10717  			if (ioc->remove_host || ioc->fw_events_cleanup)
10718  				goto out;
10719  			ssleep(1);
10720  		}
10721  		_scsih_remove_unresponding_devices(ioc);
10722  		_scsih_del_dirty_vphy(ioc);
10723  		_scsih_del_dirty_port_entries(ioc);
10724  		if (ioc->is_gen35_ioc)
10725  			_scsih_update_device_qdepth(ioc);
10726  		_scsih_scan_for_devices_after_reset(ioc);
10727  		/*
10728  		 * If diag reset has occurred during the driver load
10729  		 * then driver has to complete the driver load operation
10730  		 * by executing the following items:
10731  		 *- Register the devices from sas_device_init_list to SML
10732  		 *- clear is_driver_loading flag,
10733  		 *- start the watchdog thread.
10734  		 * In happy driver load path, above things are taken care of when
10735  		 * driver executes scsih_scan_finished().
10736  		 */
10737  		if (ioc->is_driver_loading)
10738  			_scsih_complete_devices_scanning(ioc);
10739  		_scsih_set_nvme_max_shutdown_latency(ioc);
10740  		break;
10741  	case MPT3SAS_PORT_ENABLE_COMPLETE:
10742  		ioc->start_scan = 0;
10743  		if (missing_delay[0] != -1 && missing_delay[1] != -1)
10744  			mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
10745  			    missing_delay[1]);
10746  		dewtprintk(ioc,
10747  			   ioc_info(ioc, "port enable: complete from worker thread\n"));
10748  		break;
10749  	case MPT3SAS_TURN_ON_PFA_LED:
10750  		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
10751  		break;
10752  	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10753  		_scsih_sas_topology_change_event(ioc, fw_event);
10754  		break;
10755  	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10756  		if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
10757  			_scsih_sas_device_status_change_event_debug(ioc,
10758  			    (Mpi2EventDataSasDeviceStatusChange_t *)
10759  			    fw_event->event_data);
10760  		break;
10761  	case MPI2_EVENT_SAS_DISCOVERY:
10762  		_scsih_sas_discovery_event(ioc, fw_event);
10763  		break;
10764  	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10765  		_scsih_sas_device_discovery_error_event(ioc, fw_event);
10766  		break;
10767  	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10768  		_scsih_sas_broadcast_primitive_event(ioc, fw_event);
10769  		break;
10770  	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10771  		_scsih_sas_enclosure_dev_status_change_event(ioc,
10772  		    fw_event);
10773  		break;
10774  	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10775  		_scsih_sas_ir_config_change_event(ioc, fw_event);
10776  		break;
10777  	case MPI2_EVENT_IR_VOLUME:
10778  		_scsih_sas_ir_volume_event(ioc, fw_event);
10779  		break;
10780  	case MPI2_EVENT_IR_PHYSICAL_DISK:
10781  		_scsih_sas_ir_physical_disk_event(ioc, fw_event);
10782  		break;
10783  	case MPI2_EVENT_IR_OPERATION_STATUS:
10784  		_scsih_sas_ir_operation_status_event(ioc, fw_event);
10785  		break;
10786  	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10787  		_scsih_pcie_device_status_change_event(ioc, fw_event);
10788  		break;
10789  	case MPI2_EVENT_PCIE_ENUMERATION:
10790  		_scsih_pcie_enumeration_event(ioc, fw_event);
10791  		break;
10792  	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10793  		_scsih_pcie_topology_change_event(ioc, fw_event);
10794  		ioc->current_event = NULL;
10795  		return;
10796  	}
10797  out:
10798  	fw_event_work_put(fw_event);
10799  	ioc->current_event = NULL;
10800  }
10801  
10802  /**
10803   * _firmware_event_work
10804   * @work: The fw_event_work object
10805   * Context: user.
10806   *
10807   * wrappers for the work thread handling firmware events
10808   */
10809  
10810  static void
_firmware_event_work(struct work_struct * work)10811  _firmware_event_work(struct work_struct *work)
10812  {
10813  	struct fw_event_work *fw_event = container_of(work,
10814  	    struct fw_event_work, work);
10815  
10816  	_mpt3sas_fw_work(fw_event->ioc, fw_event);
10817  }
10818  
10819  /**
10820   * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
10821   * @ioc: per adapter object
10822   * @msix_index: MSIX table index supplied by the OS
10823   * @reply: reply message frame(lower 32bit addr)
10824   * Context: interrupt.
10825   *
10826   * This function merely adds a new work task into ioc->firmware_event_thread.
10827   * The tasks are worked from _firmware_event_work in user context.
10828   *
10829   * Return: 1 meaning mf should be freed from _base_interrupt
10830   *         0 means the mf is freed from this function.
10831   */
10832  u8
mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)10833  mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
10834  	u32 reply)
10835  {
10836  	struct fw_event_work *fw_event;
10837  	Mpi2EventNotificationReply_t *mpi_reply;
10838  	u16 event;
10839  	u16 sz;
10840  	Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
10841  
10842  	/* events turned off due to host reset */
10843  	if (ioc->pci_error_recovery)
10844  		return 1;
10845  
10846  	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
10847  
10848  	if (unlikely(!mpi_reply)) {
10849  		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
10850  			__FILE__, __LINE__, __func__);
10851  		return 1;
10852  	}
10853  
10854  	event = le16_to_cpu(mpi_reply->Event);
10855  
10856  	if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
10857  		mpt3sas_trigger_event(ioc, event, 0);
10858  
10859  	switch (event) {
10860  	/* handle these */
10861  	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
10862  	{
10863  		Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
10864  		    (Mpi2EventDataSasBroadcastPrimitive_t *)
10865  		    mpi_reply->EventData;
10866  
10867  		if (baen_data->Primitive !=
10868  		    MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
10869  			return 1;
10870  
10871  		if (ioc->broadcast_aen_busy) {
10872  			ioc->broadcast_aen_pending++;
10873  			return 1;
10874  		} else
10875  			ioc->broadcast_aen_busy = 1;
10876  		break;
10877  	}
10878  
10879  	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
10880  		_scsih_check_topo_delete_events(ioc,
10881  		    (Mpi2EventDataSasTopologyChangeList_t *)
10882  		    mpi_reply->EventData);
10883  		/*
10884  		 * No need to add the topology change list
10885  		 * event to fw event work queue when
10886  		 * diag reset is going on. Since during diag
10887  		 * reset driver scan the devices by reading
10888  		 * sas device page0's not by processing the
10889  		 * events.
10890  		 */
10891  		if (ioc->shost_recovery)
10892  			return 1;
10893  		break;
10894  	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
10895  	_scsih_check_pcie_topo_remove_events(ioc,
10896  		    (Mpi26EventDataPCIeTopologyChangeList_t *)
10897  		    mpi_reply->EventData);
10898  		if (ioc->shost_recovery)
10899  			return 1;
10900  		break;
10901  	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
10902  		_scsih_check_ir_config_unhide_events(ioc,
10903  		    (Mpi2EventDataIrConfigChangeList_t *)
10904  		    mpi_reply->EventData);
10905  		break;
10906  	case MPI2_EVENT_IR_VOLUME:
10907  		_scsih_check_volume_delete_events(ioc,
10908  		    (Mpi2EventDataIrVolume_t *)
10909  		    mpi_reply->EventData);
10910  		break;
10911  	case MPI2_EVENT_LOG_ENTRY_ADDED:
10912  	{
10913  		Mpi2EventDataLogEntryAdded_t *log_entry;
10914  		u32 log_code;
10915  
10916  		if (!ioc->is_warpdrive)
10917  			break;
10918  
10919  		log_entry = (Mpi2EventDataLogEntryAdded_t *)
10920  		    mpi_reply->EventData;
10921  		log_code = le32_to_cpu(*(__le32 *)log_entry->LogData);
10922  
10923  		if (le16_to_cpu(log_entry->LogEntryQualifier)
10924  		    != MPT2_WARPDRIVE_LOGENTRY)
10925  			break;
10926  
10927  		switch (log_code) {
10928  		case MPT2_WARPDRIVE_LC_SSDT:
10929  			ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10930  			break;
10931  		case MPT2_WARPDRIVE_LC_SSDLW:
10932  			ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
10933  			break;
10934  		case MPT2_WARPDRIVE_LC_SSDLF:
10935  			ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
10936  			break;
10937  		case MPT2_WARPDRIVE_LC_BRMF:
10938  			ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
10939  			break;
10940  		}
10941  
10942  		break;
10943  	}
10944  	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
10945  		_scsih_sas_device_status_change_event(ioc,
10946  		    (Mpi2EventDataSasDeviceStatusChange_t *)
10947  		    mpi_reply->EventData);
10948  		break;
10949  	case MPI2_EVENT_IR_OPERATION_STATUS:
10950  	case MPI2_EVENT_SAS_DISCOVERY:
10951  	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
10952  	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
10953  	case MPI2_EVENT_IR_PHYSICAL_DISK:
10954  	case MPI2_EVENT_PCIE_ENUMERATION:
10955  	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
10956  		break;
10957  
10958  	case MPI2_EVENT_TEMP_THRESHOLD:
10959  		_scsih_temp_threshold_events(ioc,
10960  			(Mpi2EventDataTemperature_t *)
10961  			mpi_reply->EventData);
10962  		break;
10963  	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
10964  		ActiveCableEventData =
10965  		    (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
10966  		switch (ActiveCableEventData->ReasonCode) {
10967  		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
10968  			ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
10969  				   ActiveCableEventData->ReceptacleID);
10970  			pr_notice("cannot be powered and devices connected\n");
10971  			pr_notice("to this active cable will not be seen\n");
10972  			pr_notice("This active cable requires %d mW of power\n",
10973  			    le32_to_cpu(
10974  			    ActiveCableEventData->ActiveCablePowerRequirement));
10975  			break;
10976  
10977  		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
10978  			ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
10979  				   ActiveCableEventData->ReceptacleID);
10980  			pr_notice(
10981  			    "is not running at optimal speed(12 Gb/s rate)\n");
10982  			break;
10983  		}
10984  
10985  		break;
10986  
10987  	default: /* ignore the rest */
10988  		return 1;
10989  	}
10990  
10991  	sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
10992  	fw_event = alloc_fw_event_work(sz);
10993  	if (!fw_event) {
10994  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
10995  			__FILE__, __LINE__, __func__);
10996  		return 1;
10997  	}
10998  
10999  	memcpy(fw_event->event_data, mpi_reply->EventData, sz);
11000  	fw_event->ioc = ioc;
11001  	fw_event->VF_ID = mpi_reply->VF_ID;
11002  	fw_event->VP_ID = mpi_reply->VP_ID;
11003  	fw_event->event = event;
11004  	_scsih_fw_event_add(ioc, fw_event);
11005  	fw_event_work_put(fw_event);
11006  	return 1;
11007  }
11008  
11009  /**
11010   * _scsih_expander_node_remove - removing expander device from list.
11011   * @ioc: per adapter object
11012   * @sas_expander: the sas_device object
11013   *
11014   * Removing object and freeing associated memory from the
11015   * ioc->sas_expander_list.
11016   */
11017  static void
_scsih_expander_node_remove(struct MPT3SAS_ADAPTER * ioc,struct _sas_node * sas_expander)11018  _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
11019  	struct _sas_node *sas_expander)
11020  {
11021  	struct _sas_port *mpt3sas_port, *next;
11022  	unsigned long flags;
11023  	int port_id;
11024  
11025  	/* remove sibling ports attached to this expander */
11026  	list_for_each_entry_safe(mpt3sas_port, next,
11027  	   &sas_expander->sas_port_list, port_list) {
11028  		if (ioc->shost_recovery)
11029  			return;
11030  		if (mpt3sas_port->remote_identify.device_type ==
11031  		    SAS_END_DEVICE)
11032  			mpt3sas_device_remove_by_sas_address(ioc,
11033  			    mpt3sas_port->remote_identify.sas_address,
11034  			    mpt3sas_port->hba_port);
11035  		else if (mpt3sas_port->remote_identify.device_type ==
11036  		    SAS_EDGE_EXPANDER_DEVICE ||
11037  		    mpt3sas_port->remote_identify.device_type ==
11038  		    SAS_FANOUT_EXPANDER_DEVICE)
11039  			mpt3sas_expander_remove(ioc,
11040  			    mpt3sas_port->remote_identify.sas_address,
11041  			    mpt3sas_port->hba_port);
11042  	}
11043  
11044  	port_id = sas_expander->port->port_id;
11045  
11046  	mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
11047  	    sas_expander->sas_address_parent, sas_expander->port);
11048  
11049  	ioc_info(ioc,
11050  	    "expander_remove: handle(0x%04x), sas_addr(0x%016llx), port:%d\n",
11051  	    sas_expander->handle, (unsigned long long)
11052  	    sas_expander->sas_address,
11053  	    port_id);
11054  
11055  	spin_lock_irqsave(&ioc->sas_node_lock, flags);
11056  	list_del(&sas_expander->list);
11057  	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
11058  
11059  	kfree(sas_expander->phy);
11060  	kfree(sas_expander);
11061  }
11062  
11063  /**
11064   * _scsih_nvme_shutdown - NVMe shutdown notification
11065   * @ioc: per adapter object
11066   *
11067   * Sending IoUnitControl request with shutdown operation code to alert IOC that
11068   * the host system is shutting down so that IOC can issue NVMe shutdown to
11069   * NVMe drives attached to it.
11070   */
11071  static void
_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER * ioc)11072  _scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
11073  {
11074  	Mpi26IoUnitControlRequest_t *mpi_request;
11075  	Mpi26IoUnitControlReply_t *mpi_reply;
11076  	u16 smid;
11077  
11078  	/* are there any NVMe devices ? */
11079  	if (list_empty(&ioc->pcie_device_list))
11080  		return;
11081  
11082  	mutex_lock(&ioc->scsih_cmds.mutex);
11083  
11084  	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11085  		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11086  		goto out;
11087  	}
11088  
11089  	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11090  
11091  	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11092  	if (!smid) {
11093  		ioc_err(ioc,
11094  		    "%s: failed obtaining a smid\n", __func__);
11095  		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11096  		goto out;
11097  	}
11098  
11099  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11100  	ioc->scsih_cmds.smid = smid;
11101  	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
11102  	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
11103  	mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
11104  
11105  	init_completion(&ioc->scsih_cmds.done);
11106  	ioc->put_smid_default(ioc, smid);
11107  	/* Wait for max_shutdown_latency seconds */
11108  	ioc_info(ioc,
11109  		"Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
11110  		ioc->max_shutdown_latency);
11111  	wait_for_completion_timeout(&ioc->scsih_cmds.done,
11112  			ioc->max_shutdown_latency*HZ);
11113  
11114  	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11115  		ioc_err(ioc, "%s: timeout\n", __func__);
11116  		goto out;
11117  	}
11118  
11119  	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11120  		mpi_reply = ioc->scsih_cmds.reply;
11121  		ioc_info(ioc, "Io Unit Control shutdown (complete):"
11122  			"ioc_status(0x%04x), loginfo(0x%08x)\n",
11123  			le16_to_cpu(mpi_reply->IOCStatus),
11124  			le32_to_cpu(mpi_reply->IOCLogInfo));
11125  	}
11126   out:
11127  	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11128  	mutex_unlock(&ioc->scsih_cmds.mutex);
11129  }
11130  
11131  
11132  /**
11133   * _scsih_ir_shutdown - IR shutdown notification
11134   * @ioc: per adapter object
11135   *
11136   * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
11137   * the host system is shutting down.
11138   */
11139  static void
_scsih_ir_shutdown(struct MPT3SAS_ADAPTER * ioc)11140  _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
11141  {
11142  	Mpi2RaidActionRequest_t *mpi_request;
11143  	Mpi2RaidActionReply_t *mpi_reply;
11144  	u16 smid;
11145  
11146  	/* is IR firmware build loaded ? */
11147  	if (!ioc->ir_firmware)
11148  		return;
11149  
11150  	/* are there any volumes ? */
11151  	if (list_empty(&ioc->raid_device_list))
11152  		return;
11153  
11154  	mutex_lock(&ioc->scsih_cmds.mutex);
11155  
11156  	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
11157  		ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
11158  		goto out;
11159  	}
11160  	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
11161  
11162  	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
11163  	if (!smid) {
11164  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
11165  		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11166  		goto out;
11167  	}
11168  
11169  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
11170  	ioc->scsih_cmds.smid = smid;
11171  	memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
11172  
11173  	mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
11174  	mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
11175  
11176  	if (!ioc->hide_ir_msg)
11177  		ioc_info(ioc, "IR shutdown (sending)\n");
11178  	init_completion(&ioc->scsih_cmds.done);
11179  	ioc->put_smid_default(ioc, smid);
11180  	wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
11181  
11182  	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
11183  		ioc_err(ioc, "%s: timeout\n", __func__);
11184  		goto out;
11185  	}
11186  
11187  	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
11188  		mpi_reply = ioc->scsih_cmds.reply;
11189  		if (!ioc->hide_ir_msg)
11190  			ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
11191  				 le16_to_cpu(mpi_reply->IOCStatus),
11192  				 le32_to_cpu(mpi_reply->IOCLogInfo));
11193  	}
11194  
11195   out:
11196  	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
11197  	mutex_unlock(&ioc->scsih_cmds.mutex);
11198  }
11199  
11200  /**
11201   * _scsih_get_shost_and_ioc - get shost and ioc
11202   *			and verify whether they are NULL or not
11203   * @pdev: PCI device struct
11204   * @shost: address of scsi host pointer
11205   * @ioc: address of HBA adapter pointer
11206   *
11207   * Return zero if *shost and *ioc are not NULL otherwise return error number.
11208   */
11209  static int
_scsih_get_shost_and_ioc(struct pci_dev * pdev,struct Scsi_Host ** shost,struct MPT3SAS_ADAPTER ** ioc)11210  _scsih_get_shost_and_ioc(struct pci_dev *pdev,
11211  	struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
11212  {
11213  	*shost = pci_get_drvdata(pdev);
11214  	if (*shost == NULL) {
11215  		dev_err(&pdev->dev, "pdev's driver data is null\n");
11216  		return -ENXIO;
11217  	}
11218  
11219  	*ioc = shost_priv(*shost);
11220  	if (*ioc == NULL) {
11221  		dev_err(&pdev->dev, "shost's private data is null\n");
11222  		return -ENXIO;
11223  	}
11224  
11225  	return 0;
11226  }
11227  
11228  /**
11229   * scsih_remove - detach and remove add host
11230   * @pdev: PCI device struct
11231   *
11232   * Routine called when unloading the driver.
11233   */
scsih_remove(struct pci_dev * pdev)11234  static void scsih_remove(struct pci_dev *pdev)
11235  {
11236  	struct Scsi_Host *shost;
11237  	struct MPT3SAS_ADAPTER *ioc;
11238  	struct _sas_port *mpt3sas_port, *next_port;
11239  	struct _raid_device *raid_device, *next;
11240  	struct MPT3SAS_TARGET *sas_target_priv_data;
11241  	struct _pcie_device *pcie_device, *pcienext;
11242  	struct workqueue_struct	*wq;
11243  	unsigned long flags;
11244  	Mpi2ConfigReply_t mpi_reply;
11245  	struct hba_port *port, *port_next;
11246  
11247  	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11248  		return;
11249  
11250  	ioc->remove_host = 1;
11251  
11252  	if (!pci_device_is_present(pdev)) {
11253  		mpt3sas_base_pause_mq_polling(ioc);
11254  		_scsih_flush_running_cmds(ioc);
11255  	}
11256  
11257  	_scsih_fw_event_cleanup_queue(ioc);
11258  
11259  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11260  	wq = ioc->firmware_event_thread;
11261  	ioc->firmware_event_thread = NULL;
11262  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11263  	if (wq)
11264  		destroy_workqueue(wq);
11265  	/*
11266  	 * Copy back the unmodified ioc page1. so that on next driver load,
11267  	 * current modified changes on ioc page1 won't take effect.
11268  	 */
11269  	if (ioc->is_aero_ioc)
11270  		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11271  				&ioc->ioc_pg1_copy);
11272  	/* release all the volumes */
11273  	_scsih_ir_shutdown(ioc);
11274  	mpt3sas_destroy_debugfs(ioc);
11275  	sas_remove_host(shost);
11276  	list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
11277  	    list) {
11278  		if (raid_device->starget) {
11279  			sas_target_priv_data =
11280  			    raid_device->starget->hostdata;
11281  			sas_target_priv_data->deleted = 1;
11282  			scsi_remove_target(&raid_device->starget->dev);
11283  		}
11284  		ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
11285  			 raid_device->handle, (u64)raid_device->wwid);
11286  		_scsih_raid_device_remove(ioc, raid_device);
11287  	}
11288  	list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
11289  		list) {
11290  		_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
11291  		list_del_init(&pcie_device->list);
11292  		pcie_device_put(pcie_device);
11293  	}
11294  
11295  	/* free ports attached to the sas_host */
11296  	list_for_each_entry_safe(mpt3sas_port, next_port,
11297  	   &ioc->sas_hba.sas_port_list, port_list) {
11298  		if (mpt3sas_port->remote_identify.device_type ==
11299  		    SAS_END_DEVICE)
11300  			mpt3sas_device_remove_by_sas_address(ioc,
11301  			    mpt3sas_port->remote_identify.sas_address,
11302  			    mpt3sas_port->hba_port);
11303  		else if (mpt3sas_port->remote_identify.device_type ==
11304  		    SAS_EDGE_EXPANDER_DEVICE ||
11305  		    mpt3sas_port->remote_identify.device_type ==
11306  		    SAS_FANOUT_EXPANDER_DEVICE)
11307  			mpt3sas_expander_remove(ioc,
11308  			    mpt3sas_port->remote_identify.sas_address,
11309  			    mpt3sas_port->hba_port);
11310  	}
11311  
11312  	list_for_each_entry_safe(port, port_next,
11313  	    &ioc->port_table_list, list) {
11314  		list_del(&port->list);
11315  		kfree(port);
11316  	}
11317  
11318  	/* free phys attached to the sas_host */
11319  	if (ioc->sas_hba.num_phys) {
11320  		kfree(ioc->sas_hba.phy);
11321  		ioc->sas_hba.phy = NULL;
11322  		ioc->sas_hba.num_phys = 0;
11323  	}
11324  
11325  	mpt3sas_base_detach(ioc);
11326  	mpt3sas_ctl_release(ioc);
11327  	spin_lock(&gioc_lock);
11328  	list_del(&ioc->list);
11329  	spin_unlock(&gioc_lock);
11330  	scsi_host_put(shost);
11331  }
11332  
11333  /**
11334   * scsih_shutdown - routine call during system shutdown
11335   * @pdev: PCI device struct
11336   */
11337  static void
scsih_shutdown(struct pci_dev * pdev)11338  scsih_shutdown(struct pci_dev *pdev)
11339  {
11340  	struct Scsi_Host *shost;
11341  	struct MPT3SAS_ADAPTER *ioc;
11342  	struct workqueue_struct	*wq;
11343  	unsigned long flags;
11344  	Mpi2ConfigReply_t mpi_reply;
11345  
11346  	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
11347  		return;
11348  
11349  	ioc->remove_host = 1;
11350  
11351  	if (!pci_device_is_present(pdev)) {
11352  		mpt3sas_base_pause_mq_polling(ioc);
11353  		_scsih_flush_running_cmds(ioc);
11354  	}
11355  
11356  	_scsih_fw_event_cleanup_queue(ioc);
11357  
11358  	spin_lock_irqsave(&ioc->fw_event_lock, flags);
11359  	wq = ioc->firmware_event_thread;
11360  	ioc->firmware_event_thread = NULL;
11361  	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
11362  	if (wq)
11363  		destroy_workqueue(wq);
11364  	/*
11365  	 * Copy back the unmodified ioc page1 so that on next driver load,
11366  	 * current modified changes on ioc page1 won't take effect.
11367  	 */
11368  	if (ioc->is_aero_ioc)
11369  		mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply,
11370  				&ioc->ioc_pg1_copy);
11371  
11372  	_scsih_ir_shutdown(ioc);
11373  	_scsih_nvme_shutdown(ioc);
11374  	mpt3sas_base_mask_interrupts(ioc);
11375  	mpt3sas_base_stop_watchdog(ioc);
11376  	ioc->shost_recovery = 1;
11377  	mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
11378  	ioc->shost_recovery = 0;
11379  	mpt3sas_base_free_irq(ioc);
11380  	mpt3sas_base_disable_msix(ioc);
11381  }
11382  
11383  
11384  /**
11385   * _scsih_probe_boot_devices - reports 1st device
11386   * @ioc: per adapter object
11387   *
11388   * If specified in bios page 2, this routine reports the 1st
11389   * device scsi-ml or sas transport for persistent boot device
11390   * purposes.  Please refer to function _scsih_determine_boot_device()
11391   */
11392  static void
_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER * ioc)11393  _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
11394  {
11395  	u32 channel;
11396  	void *device;
11397  	struct _sas_device *sas_device;
11398  	struct _raid_device *raid_device;
11399  	struct _pcie_device *pcie_device;
11400  	u16 handle;
11401  	u64 sas_address_parent;
11402  	u64 sas_address;
11403  	unsigned long flags;
11404  	int rc;
11405  	int tid;
11406  	struct hba_port *port;
11407  
11408  	 /* no Bios, return immediately */
11409  	if (!ioc->bios_pg3.BiosVersion)
11410  		return;
11411  
11412  	device = NULL;
11413  	if (ioc->req_boot_device.device) {
11414  		device =  ioc->req_boot_device.device;
11415  		channel = ioc->req_boot_device.channel;
11416  	} else if (ioc->req_alt_boot_device.device) {
11417  		device =  ioc->req_alt_boot_device.device;
11418  		channel = ioc->req_alt_boot_device.channel;
11419  	} else if (ioc->current_boot_device.device) {
11420  		device =  ioc->current_boot_device.device;
11421  		channel = ioc->current_boot_device.channel;
11422  	}
11423  
11424  	if (!device)
11425  		return;
11426  
11427  	if (channel == RAID_CHANNEL) {
11428  		raid_device = device;
11429  		/*
11430  		 * If this boot vd is already registered with SML then
11431  		 * no need to register it again as part of device scanning
11432  		 * after diag reset during driver load operation.
11433  		 */
11434  		if (raid_device->starget)
11435  			return;
11436  		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11437  		    raid_device->id, 0);
11438  		if (rc)
11439  			_scsih_raid_device_remove(ioc, raid_device);
11440  	} else if (channel == PCIE_CHANNEL) {
11441  		pcie_device = device;
11442  		/*
11443  		 * If this boot NVMe device is already registered with SML then
11444  		 * no need to register it again as part of device scanning
11445  		 * after diag reset during driver load operation.
11446  		 */
11447  		if (pcie_device->starget)
11448  			return;
11449  		spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11450  		tid = pcie_device->id;
11451  		list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
11452  		spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11453  		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
11454  		if (rc)
11455  			_scsih_pcie_device_remove(ioc, pcie_device);
11456  	} else {
11457  		sas_device = device;
11458  		/*
11459  		 * If this boot sas/sata device is already registered with SML
11460  		 * then no need to register it again as part of device scanning
11461  		 * after diag reset during driver load operation.
11462  		 */
11463  		if (sas_device->starget)
11464  			return;
11465  		spin_lock_irqsave(&ioc->sas_device_lock, flags);
11466  		handle = sas_device->handle;
11467  		sas_address_parent = sas_device->sas_address_parent;
11468  		sas_address = sas_device->sas_address;
11469  		port = sas_device->port;
11470  		list_move_tail(&sas_device->list, &ioc->sas_device_list);
11471  		spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11472  
11473  		if (ioc->hide_drives)
11474  			return;
11475  
11476  		if (!port)
11477  			return;
11478  
11479  		if (!mpt3sas_transport_port_add(ioc, handle,
11480  		    sas_address_parent, port)) {
11481  			_scsih_sas_device_remove(ioc, sas_device);
11482  		} else if (!sas_device->starget) {
11483  			if (!ioc->is_driver_loading) {
11484  				mpt3sas_transport_port_remove(ioc,
11485  				    sas_address,
11486  				    sas_address_parent, port);
11487  				_scsih_sas_device_remove(ioc, sas_device);
11488  			}
11489  		}
11490  	}
11491  }
11492  
11493  /**
11494   * _scsih_probe_raid - reporting raid volumes to scsi-ml
11495   * @ioc: per adapter object
11496   *
11497   * Called during initial loading of the driver.
11498   */
11499  static void
_scsih_probe_raid(struct MPT3SAS_ADAPTER * ioc)11500  _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
11501  {
11502  	struct _raid_device *raid_device, *raid_next;
11503  	int rc;
11504  
11505  	list_for_each_entry_safe(raid_device, raid_next,
11506  	    &ioc->raid_device_list, list) {
11507  		if (raid_device->starget)
11508  			continue;
11509  		rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
11510  		    raid_device->id, 0);
11511  		if (rc)
11512  			_scsih_raid_device_remove(ioc, raid_device);
11513  	}
11514  }
11515  
get_next_sas_device(struct MPT3SAS_ADAPTER * ioc)11516  static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
11517  {
11518  	struct _sas_device *sas_device = NULL;
11519  	unsigned long flags;
11520  
11521  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11522  	if (!list_empty(&ioc->sas_device_init_list)) {
11523  		sas_device = list_first_entry(&ioc->sas_device_init_list,
11524  				struct _sas_device, list);
11525  		sas_device_get(sas_device);
11526  	}
11527  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11528  
11529  	return sas_device;
11530  }
11531  
sas_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _sas_device * sas_device)11532  static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11533  		struct _sas_device *sas_device)
11534  {
11535  	unsigned long flags;
11536  
11537  	spin_lock_irqsave(&ioc->sas_device_lock, flags);
11538  
11539  	/*
11540  	 * Since we dropped the lock during the call to port_add(), we need to
11541  	 * be careful here that somebody else didn't move or delete this item
11542  	 * while we were busy with other things.
11543  	 *
11544  	 * If it was on the list, we need a put() for the reference the list
11545  	 * had. Either way, we need a get() for the destination list.
11546  	 */
11547  	if (!list_empty(&sas_device->list)) {
11548  		list_del_init(&sas_device->list);
11549  		sas_device_put(sas_device);
11550  	}
11551  
11552  	sas_device_get(sas_device);
11553  	list_add_tail(&sas_device->list, &ioc->sas_device_list);
11554  
11555  	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
11556  }
11557  
11558  /**
11559   * _scsih_probe_sas - reporting sas devices to sas transport
11560   * @ioc: per adapter object
11561   *
11562   * Called during initial loading of the driver.
11563   */
11564  static void
_scsih_probe_sas(struct MPT3SAS_ADAPTER * ioc)11565  _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
11566  {
11567  	struct _sas_device *sas_device;
11568  
11569  	if (ioc->hide_drives)
11570  		return;
11571  
11572  	while ((sas_device = get_next_sas_device(ioc))) {
11573  		if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
11574  		    sas_device->sas_address_parent, sas_device->port)) {
11575  			_scsih_sas_device_remove(ioc, sas_device);
11576  			sas_device_put(sas_device);
11577  			continue;
11578  		} else if (!sas_device->starget) {
11579  			/*
11580  			 * When asyn scanning is enabled, its not possible to
11581  			 * remove devices while scanning is turned on due to an
11582  			 * oops in scsi_sysfs_add_sdev()->add_device()->
11583  			 * sysfs_addrm_start()
11584  			 */
11585  			if (!ioc->is_driver_loading) {
11586  				mpt3sas_transport_port_remove(ioc,
11587  				    sas_device->sas_address,
11588  				    sas_device->sas_address_parent,
11589  				    sas_device->port);
11590  				_scsih_sas_device_remove(ioc, sas_device);
11591  				sas_device_put(sas_device);
11592  				continue;
11593  			}
11594  		}
11595  		sas_device_make_active(ioc, sas_device);
11596  		sas_device_put(sas_device);
11597  	}
11598  }
11599  
11600  /**
11601   * get_next_pcie_device - Get the next pcie device
11602   * @ioc: per adapter object
11603   *
11604   * Get the next pcie device from pcie_device_init_list list.
11605   *
11606   * Return: pcie device structure if pcie_device_init_list list is not empty
11607   * otherwise returns NULL
11608   */
get_next_pcie_device(struct MPT3SAS_ADAPTER * ioc)11609  static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
11610  {
11611  	struct _pcie_device *pcie_device = NULL;
11612  	unsigned long flags;
11613  
11614  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11615  	if (!list_empty(&ioc->pcie_device_init_list)) {
11616  		pcie_device = list_first_entry(&ioc->pcie_device_init_list,
11617  				struct _pcie_device, list);
11618  		pcie_device_get(pcie_device);
11619  	}
11620  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11621  
11622  	return pcie_device;
11623  }
11624  
11625  /**
11626   * pcie_device_make_active - Add pcie device to pcie_device_list list
11627   * @ioc: per adapter object
11628   * @pcie_device: pcie device object
11629   *
11630   * Add the pcie device which has registered with SCSI Transport Later to
11631   * pcie_device_list list
11632   */
pcie_device_make_active(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device)11633  static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
11634  		struct _pcie_device *pcie_device)
11635  {
11636  	unsigned long flags;
11637  
11638  	spin_lock_irqsave(&ioc->pcie_device_lock, flags);
11639  
11640  	if (!list_empty(&pcie_device->list)) {
11641  		list_del_init(&pcie_device->list);
11642  		pcie_device_put(pcie_device);
11643  	}
11644  	pcie_device_get(pcie_device);
11645  	list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
11646  
11647  	spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
11648  }
11649  
11650  /**
11651   * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
11652   * @ioc: per adapter object
11653   *
11654   * Called during initial loading of the driver.
11655   */
11656  static void
_scsih_probe_pcie(struct MPT3SAS_ADAPTER * ioc)11657  _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
11658  {
11659  	struct _pcie_device *pcie_device;
11660  	int rc;
11661  
11662  	/* PCIe Device List */
11663  	while ((pcie_device = get_next_pcie_device(ioc))) {
11664  		if (pcie_device->starget) {
11665  			pcie_device_put(pcie_device);
11666  			continue;
11667  		}
11668  		if (pcie_device->access_status ==
11669  		    MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
11670  			pcie_device_make_active(ioc, pcie_device);
11671  			pcie_device_put(pcie_device);
11672  			continue;
11673  		}
11674  		rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
11675  			pcie_device->id, 0);
11676  		if (rc) {
11677  			_scsih_pcie_device_remove(ioc, pcie_device);
11678  			pcie_device_put(pcie_device);
11679  			continue;
11680  		} else if (!pcie_device->starget) {
11681  			/*
11682  			 * When async scanning is enabled, its not possible to
11683  			 * remove devices while scanning is turned on due to an
11684  			 * oops in scsi_sysfs_add_sdev()->add_device()->
11685  			 * sysfs_addrm_start()
11686  			 */
11687  			if (!ioc->is_driver_loading) {
11688  			/* TODO-- Need to find out whether this condition will
11689  			 * occur or not
11690  			 */
11691  				_scsih_pcie_device_remove(ioc, pcie_device);
11692  				pcie_device_put(pcie_device);
11693  				continue;
11694  			}
11695  		}
11696  		pcie_device_make_active(ioc, pcie_device);
11697  		pcie_device_put(pcie_device);
11698  	}
11699  }
11700  
11701  /**
11702   * _scsih_probe_devices - probing for devices
11703   * @ioc: per adapter object
11704   *
11705   * Called during initial loading of the driver.
11706   */
11707  static void
_scsih_probe_devices(struct MPT3SAS_ADAPTER * ioc)11708  _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
11709  {
11710  	u16 volume_mapping_flags;
11711  
11712  	if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
11713  		return;  /* return when IOC doesn't support initiator mode */
11714  
11715  	_scsih_probe_boot_devices(ioc);
11716  
11717  	if (ioc->ir_firmware) {
11718  		volume_mapping_flags =
11719  		    le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
11720  		    MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
11721  		if (volume_mapping_flags ==
11722  		    MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
11723  			_scsih_probe_raid(ioc);
11724  			_scsih_probe_sas(ioc);
11725  		} else {
11726  			_scsih_probe_sas(ioc);
11727  			_scsih_probe_raid(ioc);
11728  		}
11729  	} else {
11730  		_scsih_probe_sas(ioc);
11731  		_scsih_probe_pcie(ioc);
11732  	}
11733  }
11734  
11735  /**
11736   * scsih_scan_start - scsi lld callback for .scan_start
11737   * @shost: SCSI host pointer
11738   *
11739   * The shost has the ability to discover targets on its own instead
11740   * of scanning the entire bus.  In our implemention, we will kick off
11741   * firmware discovery.
11742   */
11743  static void
scsih_scan_start(struct Scsi_Host * shost)11744  scsih_scan_start(struct Scsi_Host *shost)
11745  {
11746  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11747  	int rc;
11748  	if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
11749  		mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
11750  	else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
11751  		mpt3sas_enable_diag_buffer(ioc, 1);
11752  
11753  	if (disable_discovery > 0)
11754  		return;
11755  
11756  	ioc->start_scan = 1;
11757  	rc = mpt3sas_port_enable(ioc);
11758  
11759  	if (rc != 0)
11760  		ioc_info(ioc, "port enable: FAILED\n");
11761  }
11762  
11763  /**
11764   * _scsih_complete_devices_scanning - add the devices to sml and
11765   * complete ioc initialization.
11766   * @ioc: per adapter object
11767   *
11768   * Return nothing.
11769   */
_scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER * ioc)11770  static void _scsih_complete_devices_scanning(struct MPT3SAS_ADAPTER *ioc)
11771  {
11772  
11773  	if (ioc->wait_for_discovery_to_complete) {
11774  		ioc->wait_for_discovery_to_complete = 0;
11775  		_scsih_probe_devices(ioc);
11776  	}
11777  
11778  	mpt3sas_base_start_watchdog(ioc);
11779  	ioc->is_driver_loading = 0;
11780  }
11781  
11782  /**
11783   * scsih_scan_finished - scsi lld callback for .scan_finished
11784   * @shost: SCSI host pointer
11785   * @time: elapsed time of the scan in jiffies
11786   *
11787   * This function will be called periodicallyn until it returns 1 with the
11788   * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
11789   * we wait for firmware discovery to complete, then return 1.
11790   */
11791  static int
scsih_scan_finished(struct Scsi_Host * shost,unsigned long time)11792  scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
11793  {
11794  	struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
11795  	u32 ioc_state;
11796  	int issue_hard_reset = 0;
11797  
11798  	if (disable_discovery > 0) {
11799  		ioc->is_driver_loading = 0;
11800  		ioc->wait_for_discovery_to_complete = 0;
11801  		return 1;
11802  	}
11803  
11804  	if (time >= (300 * HZ)) {
11805  		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11806  		ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
11807  		ioc->is_driver_loading = 0;
11808  		return 1;
11809  	}
11810  
11811  	if (ioc->start_scan) {
11812  		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
11813  		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
11814  			mpt3sas_print_fault_code(ioc, ioc_state &
11815  			    MPI2_DOORBELL_DATA_MASK);
11816  			issue_hard_reset = 1;
11817  			goto out;
11818  		} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
11819  				MPI2_IOC_STATE_COREDUMP) {
11820  			mpt3sas_base_coredump_info(ioc, ioc_state &
11821  			    MPI2_DOORBELL_DATA_MASK);
11822  			mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
11823  			issue_hard_reset = 1;
11824  			goto out;
11825  		}
11826  		return 0;
11827  	}
11828  
11829  	if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) {
11830  		ioc_info(ioc,
11831  		    "port enable: aborted due to diag reset\n");
11832  		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11833  		goto out;
11834  	}
11835  	if (ioc->start_scan_failed) {
11836  		ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
11837  			 ioc->start_scan_failed);
11838  		ioc->is_driver_loading = 0;
11839  		ioc->wait_for_discovery_to_complete = 0;
11840  		ioc->remove_host = 1;
11841  		return 1;
11842  	}
11843  
11844  	ioc_info(ioc, "port enable: SUCCESS\n");
11845  	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11846  	_scsih_complete_devices_scanning(ioc);
11847  
11848  out:
11849  	if (issue_hard_reset) {
11850  		ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
11851  		if (mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET))
11852  			ioc->is_driver_loading = 0;
11853  	}
11854  	return 1;
11855  }
11856  
11857  /**
11858   * scsih_map_queues - map reply queues with request queues
11859   * @shost: SCSI host pointer
11860   */
scsih_map_queues(struct Scsi_Host * shost)11861  static void scsih_map_queues(struct Scsi_Host *shost)
11862  {
11863  	struct MPT3SAS_ADAPTER *ioc =
11864  	    (struct MPT3SAS_ADAPTER *)shost->hostdata;
11865  	struct blk_mq_queue_map *map;
11866  	int i, qoff, offset;
11867  	int nr_msix_vectors = ioc->iopoll_q_start_index;
11868  	int iopoll_q_count = ioc->reply_queue_count - nr_msix_vectors;
11869  
11870  	if (shost->nr_hw_queues == 1)
11871  		return;
11872  
11873  	for (i = 0, qoff = 0; i < shost->nr_maps; i++) {
11874  		map = &shost->tag_set.map[i];
11875  		map->nr_queues = 0;
11876  		offset = 0;
11877  		if (i == HCTX_TYPE_DEFAULT) {
11878  			map->nr_queues =
11879  			    nr_msix_vectors - ioc->high_iops_queues;
11880  			offset = ioc->high_iops_queues;
11881  		} else if (i == HCTX_TYPE_POLL)
11882  			map->nr_queues = iopoll_q_count;
11883  
11884  		if (!map->nr_queues)
11885  			BUG_ON(i == HCTX_TYPE_DEFAULT);
11886  
11887  		/*
11888  		 * The poll queue(s) doesn't have an IRQ (and hence IRQ
11889  		 * affinity), so use the regular blk-mq cpu mapping
11890  		 */
11891  		map->queue_offset = qoff;
11892  		if (i != HCTX_TYPE_POLL)
11893  			blk_mq_pci_map_queues(map, ioc->pdev, offset);
11894  		else
11895  			blk_mq_map_queues(map);
11896  
11897  		qoff += map->nr_queues;
11898  	}
11899  }
11900  
11901  /* shost template for SAS 2.0 HBA devices */
11902  static const struct scsi_host_template mpt2sas_driver_template = {
11903  	.module				= THIS_MODULE,
11904  	.name				= "Fusion MPT SAS Host",
11905  	.proc_name			= MPT2SAS_DRIVER_NAME,
11906  	.queuecommand			= scsih_qcmd,
11907  	.target_alloc			= scsih_target_alloc,
11908  	.slave_alloc			= scsih_slave_alloc,
11909  	.device_configure		= scsih_device_configure,
11910  	.target_destroy			= scsih_target_destroy,
11911  	.slave_destroy			= scsih_slave_destroy,
11912  	.scan_finished			= scsih_scan_finished,
11913  	.scan_start			= scsih_scan_start,
11914  	.change_queue_depth		= scsih_change_queue_depth,
11915  	.eh_abort_handler		= scsih_abort,
11916  	.eh_device_reset_handler	= scsih_dev_reset,
11917  	.eh_target_reset_handler	= scsih_target_reset,
11918  	.eh_host_reset_handler		= scsih_host_reset,
11919  	.bios_param			= scsih_bios_param,
11920  	.can_queue			= 1,
11921  	.this_id			= -1,
11922  	.sg_tablesize			= MPT2SAS_SG_DEPTH,
11923  	.max_sectors			= 32767,
11924  	.cmd_per_lun			= 7,
11925  	.shost_groups			= mpt3sas_host_groups,
11926  	.sdev_groups			= mpt3sas_dev_groups,
11927  	.track_queue_depth		= 1,
11928  	.cmd_size			= sizeof(struct scsiio_tracker),
11929  };
11930  
11931  /* raid transport support for SAS 2.0 HBA devices */
11932  static struct raid_function_template mpt2sas_raid_functions = {
11933  	.cookie		= &mpt2sas_driver_template,
11934  	.is_raid	= scsih_is_raid,
11935  	.get_resync	= scsih_get_resync,
11936  	.get_state	= scsih_get_state,
11937  };
11938  
11939  /* shost template for SAS 3.0 HBA devices */
11940  static const struct scsi_host_template mpt3sas_driver_template = {
11941  	.module				= THIS_MODULE,
11942  	.name				= "Fusion MPT SAS Host",
11943  	.proc_name			= MPT3SAS_DRIVER_NAME,
11944  	.queuecommand			= scsih_qcmd,
11945  	.target_alloc			= scsih_target_alloc,
11946  	.slave_alloc			= scsih_slave_alloc,
11947  	.device_configure		= scsih_device_configure,
11948  	.target_destroy			= scsih_target_destroy,
11949  	.slave_destroy			= scsih_slave_destroy,
11950  	.scan_finished			= scsih_scan_finished,
11951  	.scan_start			= scsih_scan_start,
11952  	.change_queue_depth		= scsih_change_queue_depth,
11953  	.eh_abort_handler		= scsih_abort,
11954  	.eh_device_reset_handler	= scsih_dev_reset,
11955  	.eh_target_reset_handler	= scsih_target_reset,
11956  	.eh_host_reset_handler		= scsih_host_reset,
11957  	.bios_param			= scsih_bios_param,
11958  	.can_queue			= 1,
11959  	.this_id			= -1,
11960  	.sg_tablesize			= MPT3SAS_SG_DEPTH,
11961  	.max_sectors			= 32767,
11962  	.max_segment_size		= 0xffffffff,
11963  	.cmd_per_lun			= 128,
11964  	.shost_groups			= mpt3sas_host_groups,
11965  	.sdev_groups			= mpt3sas_dev_groups,
11966  	.track_queue_depth		= 1,
11967  	.cmd_size			= sizeof(struct scsiio_tracker),
11968  	.map_queues			= scsih_map_queues,
11969  	.mq_poll			= mpt3sas_blk_mq_poll,
11970  };
11971  
11972  /* raid transport support for SAS 3.0 HBA devices */
11973  static struct raid_function_template mpt3sas_raid_functions = {
11974  	.cookie		= &mpt3sas_driver_template,
11975  	.is_raid	= scsih_is_raid,
11976  	.get_resync	= scsih_get_resync,
11977  	.get_state	= scsih_get_state,
11978  };
11979  
11980  /**
11981   * _scsih_determine_hba_mpi_version - determine in which MPI version class
11982   *					this device belongs to.
11983   * @pdev: PCI device struct
11984   *
11985   * return MPI2_VERSION for SAS 2.0 HBA devices,
11986   *	MPI25_VERSION for SAS 3.0 HBA devices, and
11987   *	MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
11988   */
11989  static u16
_scsih_determine_hba_mpi_version(struct pci_dev * pdev)11990  _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
11991  {
11992  
11993  	switch (pdev->device) {
11994  	case MPI2_MFGPAGE_DEVID_SSS6200:
11995  	case MPI2_MFGPAGE_DEVID_SAS2004:
11996  	case MPI2_MFGPAGE_DEVID_SAS2008:
11997  	case MPI2_MFGPAGE_DEVID_SAS2108_1:
11998  	case MPI2_MFGPAGE_DEVID_SAS2108_2:
11999  	case MPI2_MFGPAGE_DEVID_SAS2108_3:
12000  	case MPI2_MFGPAGE_DEVID_SAS2116_1:
12001  	case MPI2_MFGPAGE_DEVID_SAS2116_2:
12002  	case MPI2_MFGPAGE_DEVID_SAS2208_1:
12003  	case MPI2_MFGPAGE_DEVID_SAS2208_2:
12004  	case MPI2_MFGPAGE_DEVID_SAS2208_3:
12005  	case MPI2_MFGPAGE_DEVID_SAS2208_4:
12006  	case MPI2_MFGPAGE_DEVID_SAS2208_5:
12007  	case MPI2_MFGPAGE_DEVID_SAS2208_6:
12008  	case MPI2_MFGPAGE_DEVID_SAS2308_1:
12009  	case MPI2_MFGPAGE_DEVID_SAS2308_2:
12010  	case MPI2_MFGPAGE_DEVID_SAS2308_3:
12011  	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12012  	case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12013  		return MPI2_VERSION;
12014  	case MPI25_MFGPAGE_DEVID_SAS3004:
12015  	case MPI25_MFGPAGE_DEVID_SAS3008:
12016  	case MPI25_MFGPAGE_DEVID_SAS3108_1:
12017  	case MPI25_MFGPAGE_DEVID_SAS3108_2:
12018  	case MPI25_MFGPAGE_DEVID_SAS3108_5:
12019  	case MPI25_MFGPAGE_DEVID_SAS3108_6:
12020  		return MPI25_VERSION;
12021  	case MPI26_MFGPAGE_DEVID_SAS3216:
12022  	case MPI26_MFGPAGE_DEVID_SAS3224:
12023  	case MPI26_MFGPAGE_DEVID_SAS3316_1:
12024  	case MPI26_MFGPAGE_DEVID_SAS3316_2:
12025  	case MPI26_MFGPAGE_DEVID_SAS3316_3:
12026  	case MPI26_MFGPAGE_DEVID_SAS3316_4:
12027  	case MPI26_MFGPAGE_DEVID_SAS3324_1:
12028  	case MPI26_MFGPAGE_DEVID_SAS3324_2:
12029  	case MPI26_MFGPAGE_DEVID_SAS3324_3:
12030  	case MPI26_MFGPAGE_DEVID_SAS3324_4:
12031  	case MPI26_MFGPAGE_DEVID_SAS3508:
12032  	case MPI26_MFGPAGE_DEVID_SAS3508_1:
12033  	case MPI26_MFGPAGE_DEVID_SAS3408:
12034  	case MPI26_MFGPAGE_DEVID_SAS3516:
12035  	case MPI26_MFGPAGE_DEVID_SAS3516_1:
12036  	case MPI26_MFGPAGE_DEVID_SAS3416:
12037  	case MPI26_MFGPAGE_DEVID_SAS3616:
12038  	case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12039  	case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12040  	case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12041  	case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12042  	case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12043  	case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12044  	case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12045  	case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12046  	case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12047  		return MPI26_VERSION;
12048  	}
12049  	return 0;
12050  }
12051  
12052  /**
12053   * _scsih_probe - attach and add scsi host
12054   * @pdev: PCI device struct
12055   * @id: pci device id
12056   *
12057   * Return: 0 success, anything else error.
12058   */
12059  static int
_scsih_probe(struct pci_dev * pdev,const struct pci_device_id * id)12060  _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
12061  {
12062  	struct MPT3SAS_ADAPTER *ioc;
12063  	struct Scsi_Host *shost = NULL;
12064  	int rv;
12065  	u16 hba_mpi_version;
12066  	int iopoll_q_count = 0;
12067  
12068  	/* Determine in which MPI version class this pci device belongs */
12069  	hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
12070  	if (hba_mpi_version == 0)
12071  		return -ENODEV;
12072  
12073  	/* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
12074  	 * for other generation HBA's return with -ENODEV
12075  	 */
12076  	if ((hbas_to_enumerate == 1) && (hba_mpi_version !=  MPI2_VERSION))
12077  		return -ENODEV;
12078  
12079  	/* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
12080  	 * for other generation HBA's return with -ENODEV
12081  	 */
12082  	if ((hbas_to_enumerate == 2) && (!(hba_mpi_version ==  MPI25_VERSION
12083  		|| hba_mpi_version ==  MPI26_VERSION)))
12084  		return -ENODEV;
12085  
12086  	switch (hba_mpi_version) {
12087  	case MPI2_VERSION:
12088  		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
12089  			PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
12090  		/* Use mpt2sas driver host template for SAS 2.0 HBA's */
12091  		shost = scsi_host_alloc(&mpt2sas_driver_template,
12092  		  sizeof(struct MPT3SAS_ADAPTER));
12093  		if (!shost)
12094  			return -ENODEV;
12095  		ioc = shost_priv(shost);
12096  		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12097  		ioc->hba_mpi_version_belonged = hba_mpi_version;
12098  		ioc->id = mpt2_ids++;
12099  		sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
12100  		switch (pdev->device) {
12101  		case MPI2_MFGPAGE_DEVID_SSS6200:
12102  			ioc->is_warpdrive = 1;
12103  			ioc->hide_ir_msg = 1;
12104  			break;
12105  		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
12106  		case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
12107  			ioc->is_mcpu_endpoint = 1;
12108  			break;
12109  		default:
12110  			ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
12111  			break;
12112  		}
12113  
12114  		if (multipath_on_hba == -1 || multipath_on_hba == 0)
12115  			ioc->multipath_on_hba = 0;
12116  		else
12117  			ioc->multipath_on_hba = 1;
12118  
12119  		break;
12120  	case MPI25_VERSION:
12121  	case MPI26_VERSION:
12122  		/* Use mpt3sas driver host template for SAS 3.0 HBA's */
12123  		shost = scsi_host_alloc(&mpt3sas_driver_template,
12124  		  sizeof(struct MPT3SAS_ADAPTER));
12125  		if (!shost)
12126  			return -ENODEV;
12127  		ioc = shost_priv(shost);
12128  		memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
12129  		ioc->hba_mpi_version_belonged = hba_mpi_version;
12130  		ioc->id = mpt3_ids++;
12131  		sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
12132  		switch (pdev->device) {
12133  		case MPI26_MFGPAGE_DEVID_SAS3508:
12134  		case MPI26_MFGPAGE_DEVID_SAS3508_1:
12135  		case MPI26_MFGPAGE_DEVID_SAS3408:
12136  		case MPI26_MFGPAGE_DEVID_SAS3516:
12137  		case MPI26_MFGPAGE_DEVID_SAS3516_1:
12138  		case MPI26_MFGPAGE_DEVID_SAS3416:
12139  		case MPI26_MFGPAGE_DEVID_SAS3616:
12140  		case MPI26_ATLAS_PCIe_SWITCH_DEVID:
12141  			ioc->is_gen35_ioc = 1;
12142  			break;
12143  		case MPI26_MFGPAGE_DEVID_INVALID0_3816:
12144  		case MPI26_MFGPAGE_DEVID_INVALID0_3916:
12145  			dev_err(&pdev->dev,
12146  			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
12147  			    pdev->device, pdev->subsystem_vendor,
12148  			    pdev->subsystem_device);
12149  			return 1;
12150  		case MPI26_MFGPAGE_DEVID_INVALID1_3816:
12151  		case MPI26_MFGPAGE_DEVID_INVALID1_3916:
12152  			dev_err(&pdev->dev,
12153  			    "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
12154  			    pdev->device, pdev->subsystem_vendor,
12155  			    pdev->subsystem_device);
12156  			return 1;
12157  		case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
12158  		case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
12159  			dev_info(&pdev->dev,
12160  			    "HBA is in Configurable Secure mode\n");
12161  			fallthrough;
12162  		case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
12163  		case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
12164  			ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
12165  			break;
12166  		default:
12167  			ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
12168  		}
12169  		if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
12170  			pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
12171  			(ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
12172  			ioc->combined_reply_queue = 1;
12173  			if (ioc->is_gen35_ioc)
12174  				ioc->combined_reply_index_count =
12175  				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
12176  			else
12177  				ioc->combined_reply_index_count =
12178  				 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
12179  		}
12180  
12181  		switch (ioc->is_gen35_ioc) {
12182  		case 0:
12183  			if (multipath_on_hba == -1 || multipath_on_hba == 0)
12184  				ioc->multipath_on_hba = 0;
12185  			else
12186  				ioc->multipath_on_hba = 1;
12187  			break;
12188  		case 1:
12189  			if (multipath_on_hba == -1 || multipath_on_hba > 0)
12190  				ioc->multipath_on_hba = 1;
12191  			else
12192  				ioc->multipath_on_hba = 0;
12193  			break;
12194  		default:
12195  			break;
12196  		}
12197  
12198  		break;
12199  	default:
12200  		return -ENODEV;
12201  	}
12202  
12203  	INIT_LIST_HEAD(&ioc->list);
12204  	spin_lock(&gioc_lock);
12205  	list_add_tail(&ioc->list, &mpt3sas_ioc_list);
12206  	spin_unlock(&gioc_lock);
12207  	ioc->shost = shost;
12208  	ioc->pdev = pdev;
12209  	ioc->scsi_io_cb_idx = scsi_io_cb_idx;
12210  	ioc->tm_cb_idx = tm_cb_idx;
12211  	ioc->ctl_cb_idx = ctl_cb_idx;
12212  	ioc->base_cb_idx = base_cb_idx;
12213  	ioc->port_enable_cb_idx = port_enable_cb_idx;
12214  	ioc->transport_cb_idx = transport_cb_idx;
12215  	ioc->scsih_cb_idx = scsih_cb_idx;
12216  	ioc->config_cb_idx = config_cb_idx;
12217  	ioc->tm_tr_cb_idx = tm_tr_cb_idx;
12218  	ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
12219  	ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
12220  	ioc->logging_level = logging_level;
12221  	ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
12222  	/* Host waits for minimum of six seconds */
12223  	ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
12224  	/*
12225  	 * Enable MEMORY MOVE support flag.
12226  	 */
12227  	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
12228  	/* Enable ADDITIONAL QUERY support flag. */
12229  	ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
12230  
12231  	ioc->enable_sdev_max_qd = enable_sdev_max_qd;
12232  
12233  	/* misc semaphores and spin locks */
12234  	mutex_init(&ioc->reset_in_progress_mutex);
12235  	mutex_init(&ioc->hostdiag_unlock_mutex);
12236  	/* initializing pci_access_mutex lock */
12237  	mutex_init(&ioc->pci_access_mutex);
12238  	spin_lock_init(&ioc->ioc_reset_in_progress_lock);
12239  	spin_lock_init(&ioc->scsi_lookup_lock);
12240  	spin_lock_init(&ioc->sas_device_lock);
12241  	spin_lock_init(&ioc->sas_node_lock);
12242  	spin_lock_init(&ioc->fw_event_lock);
12243  	spin_lock_init(&ioc->raid_device_lock);
12244  	spin_lock_init(&ioc->pcie_device_lock);
12245  	spin_lock_init(&ioc->diag_trigger_lock);
12246  
12247  	INIT_LIST_HEAD(&ioc->sas_device_list);
12248  	INIT_LIST_HEAD(&ioc->sas_device_init_list);
12249  	INIT_LIST_HEAD(&ioc->sas_expander_list);
12250  	INIT_LIST_HEAD(&ioc->enclosure_list);
12251  	INIT_LIST_HEAD(&ioc->pcie_device_list);
12252  	INIT_LIST_HEAD(&ioc->pcie_device_init_list);
12253  	INIT_LIST_HEAD(&ioc->fw_event_list);
12254  	INIT_LIST_HEAD(&ioc->raid_device_list);
12255  	INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
12256  	INIT_LIST_HEAD(&ioc->delayed_tr_list);
12257  	INIT_LIST_HEAD(&ioc->delayed_sc_list);
12258  	INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
12259  	INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
12260  	INIT_LIST_HEAD(&ioc->reply_queue_list);
12261  	INIT_LIST_HEAD(&ioc->port_table_list);
12262  
12263  	sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
12264  
12265  	/* init shost parameters */
12266  	shost->max_cmd_len = 32;
12267  	shost->max_lun = max_lun;
12268  	shost->transportt = mpt3sas_transport_template;
12269  	shost->unique_id = ioc->id;
12270  
12271  	if (ioc->is_mcpu_endpoint) {
12272  		/* mCPU MPI support 64K max IO */
12273  		shost->max_sectors = 128;
12274  		ioc_info(ioc, "The max_sectors value is set to %d\n",
12275  			 shost->max_sectors);
12276  	} else {
12277  		if (max_sectors != 0xFFFF) {
12278  			if (max_sectors < 64) {
12279  				shost->max_sectors = 64;
12280  				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
12281  					 max_sectors);
12282  			} else if (max_sectors > 32767) {
12283  				shost->max_sectors = 32767;
12284  				ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
12285  					 max_sectors);
12286  			} else {
12287  				shost->max_sectors = max_sectors & 0xFFFE;
12288  				ioc_info(ioc, "The max_sectors value is set to %d\n",
12289  					 shost->max_sectors);
12290  			}
12291  		}
12292  	}
12293  	/* register EEDP capabilities with SCSI layer */
12294  	if (prot_mask >= 0)
12295  		scsi_host_set_prot(shost, (prot_mask & 0x07));
12296  	else
12297  		scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
12298  				   | SHOST_DIF_TYPE2_PROTECTION
12299  				   | SHOST_DIF_TYPE3_PROTECTION);
12300  
12301  	scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
12302  
12303  	/* event thread */
12304  	ioc->firmware_event_thread = alloc_ordered_workqueue(
12305  		"fw_event_%s%d", 0, ioc->driver_name, ioc->id);
12306  	if (!ioc->firmware_event_thread) {
12307  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12308  			__FILE__, __LINE__, __func__);
12309  		rv = -ENODEV;
12310  		goto out_thread_fail;
12311  	}
12312  
12313  	shost->host_tagset = 0;
12314  
12315  	if (ioc->is_gen35_ioc && host_tagset_enable)
12316  		shost->host_tagset = 1;
12317  
12318  	ioc->is_driver_loading = 1;
12319  	if ((mpt3sas_base_attach(ioc))) {
12320  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12321  			__FILE__, __LINE__, __func__);
12322  		rv = -ENODEV;
12323  		goto out_attach_fail;
12324  	}
12325  
12326  	if (ioc->is_warpdrive) {
12327  		if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_EXPOSE_ALL_DISKS)
12328  			ioc->hide_drives = 0;
12329  		else if (ioc->mfg_pg10_hide_flag ==  MFG_PAGE10_HIDE_ALL_DISKS)
12330  			ioc->hide_drives = 1;
12331  		else {
12332  			if (mpt3sas_get_num_volumes(ioc))
12333  				ioc->hide_drives = 1;
12334  			else
12335  				ioc->hide_drives = 0;
12336  		}
12337  	} else
12338  		ioc->hide_drives = 0;
12339  
12340  	shost->nr_hw_queues = 1;
12341  
12342  	if (shost->host_tagset) {
12343  		shost->nr_hw_queues =
12344  		    ioc->reply_queue_count - ioc->high_iops_queues;
12345  
12346  		iopoll_q_count =
12347  		    ioc->reply_queue_count - ioc->iopoll_q_start_index;
12348  
12349  		shost->nr_maps = iopoll_q_count ? 3 : 1;
12350  
12351  		dev_info(&ioc->pdev->dev,
12352  		    "Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
12353  		    shost->can_queue, shost->nr_hw_queues);
12354  	}
12355  
12356  	rv = scsi_add_host(shost, &pdev->dev);
12357  	if (rv) {
12358  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
12359  			__FILE__, __LINE__, __func__);
12360  		goto out_add_shost_fail;
12361  	}
12362  
12363  	scsi_scan_host(shost);
12364  	mpt3sas_setup_debugfs(ioc);
12365  	return 0;
12366  out_add_shost_fail:
12367  	mpt3sas_base_detach(ioc);
12368   out_attach_fail:
12369  	destroy_workqueue(ioc->firmware_event_thread);
12370   out_thread_fail:
12371  	spin_lock(&gioc_lock);
12372  	list_del(&ioc->list);
12373  	spin_unlock(&gioc_lock);
12374  	scsi_host_put(shost);
12375  	return rv;
12376  }
12377  
12378  /**
12379   * scsih_suspend - power management suspend main entry point
12380   * @dev: Device struct
12381   *
12382   * Return: 0 success, anything else error.
12383   */
12384  static int __maybe_unused
scsih_suspend(struct device * dev)12385  scsih_suspend(struct device *dev)
12386  {
12387  	struct pci_dev *pdev = to_pci_dev(dev);
12388  	struct Scsi_Host *shost;
12389  	struct MPT3SAS_ADAPTER *ioc;
12390  	int rc;
12391  
12392  	rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12393  	if (rc)
12394  		return rc;
12395  
12396  	mpt3sas_base_stop_watchdog(ioc);
12397  	scsi_block_requests(shost);
12398  	_scsih_nvme_shutdown(ioc);
12399  	ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state\n",
12400  		 pdev, pci_name(pdev));
12401  
12402  	mpt3sas_base_free_resources(ioc);
12403  	return 0;
12404  }
12405  
12406  /**
12407   * scsih_resume - power management resume main entry point
12408   * @dev: Device struct
12409   *
12410   * Return: 0 success, anything else error.
12411   */
12412  static int __maybe_unused
scsih_resume(struct device * dev)12413  scsih_resume(struct device *dev)
12414  {
12415  	struct pci_dev *pdev = to_pci_dev(dev);
12416  	struct Scsi_Host *shost;
12417  	struct MPT3SAS_ADAPTER *ioc;
12418  	pci_power_t device_state = pdev->current_state;
12419  	int r;
12420  
12421  	r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
12422  	if (r)
12423  		return r;
12424  
12425  	ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
12426  		 pdev, pci_name(pdev), device_state);
12427  
12428  	ioc->pdev = pdev;
12429  	r = mpt3sas_base_map_resources(ioc);
12430  	if (r)
12431  		return r;
12432  	ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
12433  	mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
12434  	scsi_unblock_requests(shost);
12435  	mpt3sas_base_start_watchdog(ioc);
12436  	return 0;
12437  }
12438  
12439  /**
12440   * scsih_pci_error_detected - Called when a PCI error is detected.
12441   * @pdev: PCI device struct
12442   * @state: PCI channel state
12443   *
12444   * Description: Called when a PCI error is detected.
12445   *
12446   * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
12447   */
12448  static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)12449  scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
12450  {
12451  	struct Scsi_Host *shost;
12452  	struct MPT3SAS_ADAPTER *ioc;
12453  
12454  	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12455  		return PCI_ERS_RESULT_DISCONNECT;
12456  
12457  	ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
12458  
12459  	switch (state) {
12460  	case pci_channel_io_normal:
12461  		return PCI_ERS_RESULT_CAN_RECOVER;
12462  	case pci_channel_io_frozen:
12463  		/* Fatal error, prepare for slot reset */
12464  		ioc->pci_error_recovery = 1;
12465  		scsi_block_requests(ioc->shost);
12466  		mpt3sas_base_stop_watchdog(ioc);
12467  		mpt3sas_base_free_resources(ioc);
12468  		return PCI_ERS_RESULT_NEED_RESET;
12469  	case pci_channel_io_perm_failure:
12470  		/* Permanent error, prepare for device removal */
12471  		ioc->pci_error_recovery = 1;
12472  		mpt3sas_base_stop_watchdog(ioc);
12473  		mpt3sas_base_pause_mq_polling(ioc);
12474  		_scsih_flush_running_cmds(ioc);
12475  		return PCI_ERS_RESULT_DISCONNECT;
12476  	}
12477  	return PCI_ERS_RESULT_NEED_RESET;
12478  }
12479  
12480  /**
12481   * scsih_pci_slot_reset - Called when PCI slot has been reset.
12482   * @pdev: PCI device struct
12483   *
12484   * Description: This routine is called by the pci error recovery
12485   * code after the PCI slot has been reset, just before we
12486   * should resume normal operations.
12487   */
12488  static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev * pdev)12489  scsih_pci_slot_reset(struct pci_dev *pdev)
12490  {
12491  	struct Scsi_Host *shost;
12492  	struct MPT3SAS_ADAPTER *ioc;
12493  	int rc;
12494  
12495  	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12496  		return PCI_ERS_RESULT_DISCONNECT;
12497  
12498  	ioc_info(ioc, "PCI error: slot reset callback!!\n");
12499  
12500  	ioc->pci_error_recovery = 0;
12501  	ioc->pdev = pdev;
12502  	pci_restore_state(pdev);
12503  	rc = mpt3sas_base_map_resources(ioc);
12504  	if (rc)
12505  		return PCI_ERS_RESULT_DISCONNECT;
12506  
12507  	ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
12508  	rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
12509  
12510  	ioc_warn(ioc, "hard reset: %s\n",
12511  		 (rc == 0) ? "success" : "failed");
12512  
12513  	if (!rc)
12514  		return PCI_ERS_RESULT_RECOVERED;
12515  	else
12516  		return PCI_ERS_RESULT_DISCONNECT;
12517  }
12518  
12519  /**
12520   * scsih_pci_resume() - resume normal ops after PCI reset
12521   * @pdev: pointer to PCI device
12522   *
12523   * Called when the error recovery driver tells us that its
12524   * OK to resume normal operation. Use completion to allow
12525   * halted scsi ops to resume.
12526   */
12527  static void
scsih_pci_resume(struct pci_dev * pdev)12528  scsih_pci_resume(struct pci_dev *pdev)
12529  {
12530  	struct Scsi_Host *shost;
12531  	struct MPT3SAS_ADAPTER *ioc;
12532  
12533  	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12534  		return;
12535  
12536  	ioc_info(ioc, "PCI error: resume callback!!\n");
12537  
12538  	mpt3sas_base_start_watchdog(ioc);
12539  	scsi_unblock_requests(ioc->shost);
12540  }
12541  
12542  /**
12543   * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
12544   * @pdev: pointer to PCI device
12545   */
12546  static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev * pdev)12547  scsih_pci_mmio_enabled(struct pci_dev *pdev)
12548  {
12549  	struct Scsi_Host *shost;
12550  	struct MPT3SAS_ADAPTER *ioc;
12551  
12552  	if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
12553  		return PCI_ERS_RESULT_DISCONNECT;
12554  
12555  	ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
12556  
12557  	/* TODO - dump whatever for debugging purposes */
12558  
12559  	/* This called only if scsih_pci_error_detected returns
12560  	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
12561  	 * works, no need to reset slot.
12562  	 */
12563  	return PCI_ERS_RESULT_RECOVERED;
12564  }
12565  
12566  /*
12567   * The pci device ids are defined in mpi/mpi2_cnfg.h.
12568   */
12569  static const struct pci_device_id mpt3sas_pci_table[] = {
12570  	/* Spitfire ~ 2004 */
12571  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
12572  		PCI_ANY_ID, PCI_ANY_ID },
12573  	/* Falcon ~ 2008 */
12574  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
12575  		PCI_ANY_ID, PCI_ANY_ID },
12576  	/* Liberator ~ 2108 */
12577  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
12578  		PCI_ANY_ID, PCI_ANY_ID },
12579  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
12580  		PCI_ANY_ID, PCI_ANY_ID },
12581  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
12582  		PCI_ANY_ID, PCI_ANY_ID },
12583  	/* Meteor ~ 2116 */
12584  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
12585  		PCI_ANY_ID, PCI_ANY_ID },
12586  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
12587  		PCI_ANY_ID, PCI_ANY_ID },
12588  	/* Thunderbolt ~ 2208 */
12589  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
12590  		PCI_ANY_ID, PCI_ANY_ID },
12591  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
12592  		PCI_ANY_ID, PCI_ANY_ID },
12593  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
12594  		PCI_ANY_ID, PCI_ANY_ID },
12595  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
12596  		PCI_ANY_ID, PCI_ANY_ID },
12597  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
12598  		PCI_ANY_ID, PCI_ANY_ID },
12599  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
12600  		PCI_ANY_ID, PCI_ANY_ID },
12601  	/* Mustang ~ 2308 */
12602  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
12603  		PCI_ANY_ID, PCI_ANY_ID },
12604  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
12605  		PCI_ANY_ID, PCI_ANY_ID },
12606  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
12607  		PCI_ANY_ID, PCI_ANY_ID },
12608  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
12609  		PCI_ANY_ID, PCI_ANY_ID },
12610  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
12611  		PCI_ANY_ID, PCI_ANY_ID },
12612  	/* SSS6200 */
12613  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
12614  		PCI_ANY_ID, PCI_ANY_ID },
12615  	/* Fury ~ 3004 and 3008 */
12616  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
12617  		PCI_ANY_ID, PCI_ANY_ID },
12618  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
12619  		PCI_ANY_ID, PCI_ANY_ID },
12620  	/* Invader ~ 3108 */
12621  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
12622  		PCI_ANY_ID, PCI_ANY_ID },
12623  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
12624  		PCI_ANY_ID, PCI_ANY_ID },
12625  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
12626  		PCI_ANY_ID, PCI_ANY_ID },
12627  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
12628  		PCI_ANY_ID, PCI_ANY_ID },
12629  	/* Cutlass ~ 3216 and 3224 */
12630  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
12631  		PCI_ANY_ID, PCI_ANY_ID },
12632  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
12633  		PCI_ANY_ID, PCI_ANY_ID },
12634  	/* Intruder ~ 3316 and 3324 */
12635  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
12636  		PCI_ANY_ID, PCI_ANY_ID },
12637  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
12638  		PCI_ANY_ID, PCI_ANY_ID },
12639  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
12640  		PCI_ANY_ID, PCI_ANY_ID },
12641  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
12642  		PCI_ANY_ID, PCI_ANY_ID },
12643  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
12644  		PCI_ANY_ID, PCI_ANY_ID },
12645  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
12646  		PCI_ANY_ID, PCI_ANY_ID },
12647  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
12648  		PCI_ANY_ID, PCI_ANY_ID },
12649  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
12650  		PCI_ANY_ID, PCI_ANY_ID },
12651  	/* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
12652  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
12653  		PCI_ANY_ID, PCI_ANY_ID },
12654  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
12655  		PCI_ANY_ID, PCI_ANY_ID },
12656  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
12657  		PCI_ANY_ID, PCI_ANY_ID },
12658  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
12659  		PCI_ANY_ID, PCI_ANY_ID },
12660  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
12661  		PCI_ANY_ID, PCI_ANY_ID },
12662  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
12663  		PCI_ANY_ID, PCI_ANY_ID },
12664  	/* Mercator ~ 3616*/
12665  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
12666  		PCI_ANY_ID, PCI_ANY_ID },
12667  
12668  	/* Aero SI 0x00E1 Configurable Secure
12669  	 * 0x00E2 Hard Secure
12670  	 */
12671  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
12672  		PCI_ANY_ID, PCI_ANY_ID },
12673  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
12674  		PCI_ANY_ID, PCI_ANY_ID },
12675  
12676  	/*
12677  	 *  Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
12678  	 */
12679  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
12680  		PCI_ANY_ID, PCI_ANY_ID },
12681  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
12682  		PCI_ANY_ID, PCI_ANY_ID },
12683  
12684  	/* Atlas PCIe Switch Management Port */
12685  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
12686  		PCI_ANY_ID, PCI_ANY_ID },
12687  
12688  	/* Sea SI 0x00E5 Configurable Secure
12689  	 * 0x00E6 Hard Secure
12690  	 */
12691  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
12692  		PCI_ANY_ID, PCI_ANY_ID },
12693  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12694  		PCI_ANY_ID, PCI_ANY_ID },
12695  
12696  	/*
12697  	 * ATTO Branded ExpressSAS H12xx GT
12698  	 */
12699  	{ MPI2_MFGPAGE_VENDORID_ATTO, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
12700  		PCI_ANY_ID, PCI_ANY_ID },
12701  
12702  	/*
12703  	 *  Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
12704  	 */
12705  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
12706  		PCI_ANY_ID, PCI_ANY_ID },
12707  	{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
12708  		PCI_ANY_ID, PCI_ANY_ID },
12709  
12710  	{0}     /* Terminating entry */
12711  };
12712  MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
12713  
12714  static struct pci_error_handlers _mpt3sas_err_handler = {
12715  	.error_detected	= scsih_pci_error_detected,
12716  	.mmio_enabled	= scsih_pci_mmio_enabled,
12717  	.slot_reset	= scsih_pci_slot_reset,
12718  	.resume		= scsih_pci_resume,
12719  };
12720  
12721  static SIMPLE_DEV_PM_OPS(scsih_pm_ops, scsih_suspend, scsih_resume);
12722  
12723  static struct pci_driver mpt3sas_driver = {
12724  	.name		= MPT3SAS_DRIVER_NAME,
12725  	.id_table	= mpt3sas_pci_table,
12726  	.probe		= _scsih_probe,
12727  	.remove		= scsih_remove,
12728  	.shutdown	= scsih_shutdown,
12729  	.err_handler	= &_mpt3sas_err_handler,
12730  	.driver.pm	= &scsih_pm_ops,
12731  };
12732  
12733  /**
12734   * scsih_init - main entry point for this driver.
12735   *
12736   * Return: 0 success, anything else error.
12737   */
12738  static int
scsih_init(void)12739  scsih_init(void)
12740  {
12741  	mpt2_ids = 0;
12742  	mpt3_ids = 0;
12743  
12744  	mpt3sas_base_initialize_callback_handler();
12745  
12746  	 /* queuecommand callback hander */
12747  	scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
12748  
12749  	/* task management callback handler */
12750  	tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
12751  
12752  	/* base internal commands callback handler */
12753  	base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
12754  	port_enable_cb_idx = mpt3sas_base_register_callback_handler(
12755  	    mpt3sas_port_enable_done);
12756  
12757  	/* transport internal commands callback handler */
12758  	transport_cb_idx = mpt3sas_base_register_callback_handler(
12759  	    mpt3sas_transport_done);
12760  
12761  	/* scsih internal commands callback handler */
12762  	scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
12763  
12764  	/* configuration page API internal commands callback handler */
12765  	config_cb_idx = mpt3sas_base_register_callback_handler(
12766  	    mpt3sas_config_done);
12767  
12768  	/* ctl module callback handler */
12769  	ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
12770  
12771  	tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
12772  	    _scsih_tm_tr_complete);
12773  
12774  	tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
12775  	    _scsih_tm_volume_tr_complete);
12776  
12777  	tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
12778  	    _scsih_sas_control_complete);
12779  
12780  	mpt3sas_init_debugfs();
12781  	return 0;
12782  }
12783  
12784  /**
12785   * scsih_exit - exit point for this driver (when it is a module).
12786   *
12787   * Return: 0 success, anything else error.
12788   */
12789  static void
scsih_exit(void)12790  scsih_exit(void)
12791  {
12792  
12793  	mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
12794  	mpt3sas_base_release_callback_handler(tm_cb_idx);
12795  	mpt3sas_base_release_callback_handler(base_cb_idx);
12796  	mpt3sas_base_release_callback_handler(port_enable_cb_idx);
12797  	mpt3sas_base_release_callback_handler(transport_cb_idx);
12798  	mpt3sas_base_release_callback_handler(scsih_cb_idx);
12799  	mpt3sas_base_release_callback_handler(config_cb_idx);
12800  	mpt3sas_base_release_callback_handler(ctl_cb_idx);
12801  
12802  	mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
12803  	mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
12804  	mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
12805  
12806  /* raid transport support */
12807  	if (hbas_to_enumerate != 1)
12808  		raid_class_release(mpt3sas_raid_template);
12809  	if (hbas_to_enumerate != 2)
12810  		raid_class_release(mpt2sas_raid_template);
12811  	sas_release_transport(mpt3sas_transport_template);
12812  	mpt3sas_exit_debugfs();
12813  }
12814  
12815  /**
12816   * _mpt3sas_init - main entry point for this driver.
12817   *
12818   * Return: 0 success, anything else error.
12819   */
12820  static int __init
_mpt3sas_init(void)12821  _mpt3sas_init(void)
12822  {
12823  	int error;
12824  
12825  	pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
12826  					MPT3SAS_DRIVER_VERSION);
12827  
12828  	mpt3sas_transport_template =
12829  	    sas_attach_transport(&mpt3sas_transport_functions);
12830  	if (!mpt3sas_transport_template)
12831  		return -ENODEV;
12832  
12833  	/* No need attach mpt3sas raid functions template
12834  	 * if hbas_to_enumarate value is one.
12835  	 */
12836  	if (hbas_to_enumerate != 1) {
12837  		mpt3sas_raid_template =
12838  				raid_class_attach(&mpt3sas_raid_functions);
12839  		if (!mpt3sas_raid_template) {
12840  			sas_release_transport(mpt3sas_transport_template);
12841  			return -ENODEV;
12842  		}
12843  	}
12844  
12845  	/* No need to attach mpt2sas raid functions template
12846  	 * if hbas_to_enumarate value is two
12847  	 */
12848  	if (hbas_to_enumerate != 2) {
12849  		mpt2sas_raid_template =
12850  				raid_class_attach(&mpt2sas_raid_functions);
12851  		if (!mpt2sas_raid_template) {
12852  			sas_release_transport(mpt3sas_transport_template);
12853  			return -ENODEV;
12854  		}
12855  	}
12856  
12857  	error = scsih_init();
12858  	if (error) {
12859  		scsih_exit();
12860  		return error;
12861  	}
12862  
12863  	mpt3sas_ctl_init(hbas_to_enumerate);
12864  
12865  	error = pci_register_driver(&mpt3sas_driver);
12866  	if (error) {
12867  		mpt3sas_ctl_exit(hbas_to_enumerate);
12868  		scsih_exit();
12869  	}
12870  
12871  	return error;
12872  }
12873  
12874  /**
12875   * _mpt3sas_exit - exit point for this driver (when it is a module).
12876   *
12877   */
12878  static void __exit
_mpt3sas_exit(void)12879  _mpt3sas_exit(void)
12880  {
12881  	pr_info("mpt3sas version %s unloading\n",
12882  				MPT3SAS_DRIVER_VERSION);
12883  
12884  	pci_unregister_driver(&mpt3sas_driver);
12885  
12886  	mpt3sas_ctl_exit(hbas_to_enumerate);
12887  
12888  	scsih_exit();
12889  }
12890  
12891  module_init(_mpt3sas_init);
12892  module_exit(_mpt3sas_exit);
12893