1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   * ipr.c -- driver for IBM Power Linux RAID adapters
4   *
5   * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6   *
7   * Copyright (C) 2003, 2004 IBM Corporation
8   */
9  
10  /*
11   * Notes:
12   *
13   * This driver is used to control the following SCSI adapters:
14   *
15   * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
16   *
17   * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
18   *              PCI-X Dual Channel Ultra 320 SCSI Adapter
19   *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
20   *              Embedded SCSI adapter on p615 and p655 systems
21   *
22   * Supported Hardware Features:
23   *	- Ultra 320 SCSI controller
24   *	- PCI-X host interface
25   *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
26   *	- Non-Volatile Write Cache
27   *	- Supports attachment of non-RAID disks, tape, and optical devices
28   *	- RAID Levels 0, 5, 10
29   *	- Hot spare
30   *	- Background Parity Checking
31   *	- Background Data Scrubbing
32   *	- Ability to increase the capacity of an existing RAID 5 disk array
33   *		by adding disks
34   *
35   * Driver Features:
36   *	- Tagged command queuing
37   *	- Adapter microcode download
38   *	- PCI hot plug
39   *	- SCSI device hot plug
40   *
41   */
42  
43  #include <linux/fs.h>
44  #include <linux/init.h>
45  #include <linux/types.h>
46  #include <linux/errno.h>
47  #include <linux/kernel.h>
48  #include <linux/slab.h>
49  #include <linux/vmalloc.h>
50  #include <linux/ioport.h>
51  #include <linux/delay.h>
52  #include <linux/pci.h>
53  #include <linux/wait.h>
54  #include <linux/spinlock.h>
55  #include <linux/sched.h>
56  #include <linux/interrupt.h>
57  #include <linux/blkdev.h>
58  #include <linux/firmware.h>
59  #include <linux/module.h>
60  #include <linux/moduleparam.h>
61  #include <linux/hdreg.h>
62  #include <linux/reboot.h>
63  #include <linux/stringify.h>
64  #include <asm/io.h>
65  #include <asm/irq.h>
66  #include <asm/processor.h>
67  #include <scsi/scsi.h>
68  #include <scsi/scsi_host.h>
69  #include <scsi/scsi_tcq.h>
70  #include <scsi/scsi_eh.h>
71  #include <scsi/scsi_cmnd.h>
72  #include "ipr.h"
73  
74  /*
75   *   Global Data
76   */
77  static LIST_HEAD(ipr_ioa_head);
78  static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
79  static unsigned int ipr_max_speed = 1;
80  static unsigned int ipr_fastfail = 0;
81  static unsigned int ipr_transop_timeout = 0;
82  static unsigned int ipr_debug = 0;
83  static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
84  static unsigned int ipr_dual_ioa_raid = 1;
85  static unsigned int ipr_number_of_msix = 16;
86  static unsigned int ipr_fast_reboot;
87  static DEFINE_SPINLOCK(ipr_driver_lock);
88  
89  /* This table describes the differences between DMA controller chips */
90  static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
91  	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
92  		.mailbox = 0x0042C,
93  		.max_cmds = 100,
94  		.cache_line_size = 0x20,
95  		.clear_isr = 1,
96  		.iopoll_weight = 0,
97  		{
98  			.set_interrupt_mask_reg = 0x0022C,
99  			.clr_interrupt_mask_reg = 0x00230,
100  			.clr_interrupt_mask_reg32 = 0x00230,
101  			.sense_interrupt_mask_reg = 0x0022C,
102  			.sense_interrupt_mask_reg32 = 0x0022C,
103  			.clr_interrupt_reg = 0x00228,
104  			.clr_interrupt_reg32 = 0x00228,
105  			.sense_interrupt_reg = 0x00224,
106  			.sense_interrupt_reg32 = 0x00224,
107  			.ioarrin_reg = 0x00404,
108  			.sense_uproc_interrupt_reg = 0x00214,
109  			.sense_uproc_interrupt_reg32 = 0x00214,
110  			.set_uproc_interrupt_reg = 0x00214,
111  			.set_uproc_interrupt_reg32 = 0x00214,
112  			.clr_uproc_interrupt_reg = 0x00218,
113  			.clr_uproc_interrupt_reg32 = 0x00218
114  		}
115  	},
116  	{ /* Snipe and Scamp */
117  		.mailbox = 0x0052C,
118  		.max_cmds = 100,
119  		.cache_line_size = 0x20,
120  		.clear_isr = 1,
121  		.iopoll_weight = 0,
122  		{
123  			.set_interrupt_mask_reg = 0x00288,
124  			.clr_interrupt_mask_reg = 0x0028C,
125  			.clr_interrupt_mask_reg32 = 0x0028C,
126  			.sense_interrupt_mask_reg = 0x00288,
127  			.sense_interrupt_mask_reg32 = 0x00288,
128  			.clr_interrupt_reg = 0x00284,
129  			.clr_interrupt_reg32 = 0x00284,
130  			.sense_interrupt_reg = 0x00280,
131  			.sense_interrupt_reg32 = 0x00280,
132  			.ioarrin_reg = 0x00504,
133  			.sense_uproc_interrupt_reg = 0x00290,
134  			.sense_uproc_interrupt_reg32 = 0x00290,
135  			.set_uproc_interrupt_reg = 0x00290,
136  			.set_uproc_interrupt_reg32 = 0x00290,
137  			.clr_uproc_interrupt_reg = 0x00294,
138  			.clr_uproc_interrupt_reg32 = 0x00294
139  		}
140  	},
141  	{ /* CRoC */
142  		.mailbox = 0x00044,
143  		.max_cmds = 1000,
144  		.cache_line_size = 0x20,
145  		.clear_isr = 0,
146  		.iopoll_weight = 64,
147  		{
148  			.set_interrupt_mask_reg = 0x00010,
149  			.clr_interrupt_mask_reg = 0x00018,
150  			.clr_interrupt_mask_reg32 = 0x0001C,
151  			.sense_interrupt_mask_reg = 0x00010,
152  			.sense_interrupt_mask_reg32 = 0x00014,
153  			.clr_interrupt_reg = 0x00008,
154  			.clr_interrupt_reg32 = 0x0000C,
155  			.sense_interrupt_reg = 0x00000,
156  			.sense_interrupt_reg32 = 0x00004,
157  			.ioarrin_reg = 0x00070,
158  			.sense_uproc_interrupt_reg = 0x00020,
159  			.sense_uproc_interrupt_reg32 = 0x00024,
160  			.set_uproc_interrupt_reg = 0x00020,
161  			.set_uproc_interrupt_reg32 = 0x00024,
162  			.clr_uproc_interrupt_reg = 0x00028,
163  			.clr_uproc_interrupt_reg32 = 0x0002C,
164  			.init_feedback_reg = 0x0005C,
165  			.dump_addr_reg = 0x00064,
166  			.dump_data_reg = 0x00068,
167  			.endian_swap_reg = 0x00084
168  		}
169  	},
170  };
171  
172  static const struct ipr_chip_t ipr_chip[] = {
173  	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
174  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
175  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
176  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
177  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
179  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
180  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
181  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
182  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
183  };
184  
185  static int ipr_max_bus_speeds[] = {
186  	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
187  };
188  
189  MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
190  MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
191  module_param_named(max_speed, ipr_max_speed, uint, 0);
192  MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
193  module_param_named(log_level, ipr_log_level, uint, 0);
194  MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
195  module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
196  MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
197  module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
198  MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
199  module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
200  MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
201  module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
202  MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
203  module_param_named(max_devs, ipr_max_devs, int, 0);
204  MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
205  		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
206  module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
207  MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
208  module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
209  MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
210  MODULE_LICENSE("GPL");
211  MODULE_VERSION(IPR_DRIVER_VERSION);
212  
213  /*  A constant array of IOASCs/URCs/Error Messages */
214  static const
215  struct ipr_error_table_t ipr_error_table[] = {
216  	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
217  	"8155: An unknown error was received"},
218  	{0x00330000, 0, 0,
219  	"Soft underlength error"},
220  	{0x005A0000, 0, 0,
221  	"Command to be cancelled not found"},
222  	{0x00808000, 0, 0,
223  	"Qualified success"},
224  	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
225  	"FFFE: Soft device bus error recovered by the IOA"},
226  	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
227  	"4101: Soft device bus fabric error"},
228  	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229  	"FFFC: Logical block guard error recovered by the device"},
230  	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231  	"FFFC: Logical block reference tag error recovered by the device"},
232  	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233  	"4171: Recovered scatter list tag / sequence number error"},
234  	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235  	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236  	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237  	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
238  	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239  	"FFFD: Recovered logical block reference tag error detected by the IOA"},
240  	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241  	"FFFD: Logical block guard error recovered by the IOA"},
242  	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
243  	"FFF9: Device sector reassign successful"},
244  	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
245  	"FFF7: Media error recovered by device rewrite procedures"},
246  	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
247  	"7001: IOA sector reassignment successful"},
248  	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
249  	"FFF9: Soft media error. Sector reassignment recommended"},
250  	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
251  	"FFF7: Media error recovered by IOA rewrite procedures"},
252  	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
253  	"FF3D: Soft PCI bus error recovered by the IOA"},
254  	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
255  	"FFF6: Device hardware error recovered by the IOA"},
256  	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
257  	"FFF6: Device hardware error recovered by the device"},
258  	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
259  	"FF3D: Soft IOA error recovered by the IOA"},
260  	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
261  	"FFFA: Undefined device response recovered by the IOA"},
262  	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
263  	"FFF6: Device bus error, message or command phase"},
264  	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
265  	"FFFE: Task Management Function failed"},
266  	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
267  	"FFF6: Failure prediction threshold exceeded"},
268  	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
269  	"8009: Impending cache battery pack failure"},
270  	{0x02040100, 0, 0,
271  	"Logical Unit in process of becoming ready"},
272  	{0x02040200, 0, 0,
273  	"Initializing command required"},
274  	{0x02040400, 0, 0,
275  	"34FF: Disk device format in progress"},
276  	{0x02040C00, 0, 0,
277  	"Logical unit not accessible, target port in unavailable state"},
278  	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
279  	"9070: IOA requested reset"},
280  	{0x023F0000, 0, 0,
281  	"Synchronization required"},
282  	{0x02408500, 0, 0,
283  	"IOA microcode download required"},
284  	{0x02408600, 0, 0,
285  	"Device bus connection is prohibited by host"},
286  	{0x024E0000, 0, 0,
287  	"No ready, IOA shutdown"},
288  	{0x025A0000, 0, 0,
289  	"Not ready, IOA has been shutdown"},
290  	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
291  	"3020: Storage subsystem configuration error"},
292  	{0x03110B00, 0, 0,
293  	"FFF5: Medium error, data unreadable, recommend reassign"},
294  	{0x03110C00, 0, 0,
295  	"7000: Medium error, data unreadable, do not reassign"},
296  	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
297  	"FFF3: Disk media format bad"},
298  	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
299  	"3002: Addressed device failed to respond to selection"},
300  	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
301  	"3100: Device bus error"},
302  	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
303  	"3109: IOA timed out a device command"},
304  	{0x04088000, 0, 0,
305  	"3120: SCSI bus is not operational"},
306  	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
307  	"4100: Hard device bus fabric error"},
308  	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
309  	"310C: Logical block guard error detected by the device"},
310  	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
311  	"310C: Logical block reference tag error detected by the device"},
312  	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
313  	"4170: Scatter list tag / sequence number error"},
314  	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
315  	"8150: Logical block CRC error on IOA to Host transfer"},
316  	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
317  	"4170: Logical block sequence number error on IOA to Host transfer"},
318  	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
319  	"310D: Logical block reference tag error detected by the IOA"},
320  	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
321  	"310D: Logical block guard error detected by the IOA"},
322  	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
323  	"9000: IOA reserved area data check"},
324  	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
325  	"9001: IOA reserved area invalid data pattern"},
326  	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
327  	"9002: IOA reserved area LRC error"},
328  	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
329  	"Hardware Error, IOA metadata access error"},
330  	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
331  	"102E: Out of alternate sectors for disk storage"},
332  	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
333  	"FFF4: Data transfer underlength error"},
334  	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
335  	"FFF4: Data transfer overlength error"},
336  	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
337  	"3400: Logical unit failure"},
338  	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
339  	"FFF4: Device microcode is corrupt"},
340  	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
341  	"8150: PCI bus error"},
342  	{0x04430000, 1, 0,
343  	"Unsupported device bus message received"},
344  	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
345  	"FFF4: Disk device problem"},
346  	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
347  	"8150: Permanent IOA failure"},
348  	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
349  	"3010: Disk device returned wrong response to IOA"},
350  	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
351  	"8151: IOA microcode error"},
352  	{0x04448500, 0, 0,
353  	"Device bus status error"},
354  	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
355  	"8157: IOA error requiring IOA reset to recover"},
356  	{0x04448700, 0, 0,
357  	"ATA device status error"},
358  	{0x04490000, 0, 0,
359  	"Message reject received from the device"},
360  	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
361  	"8008: A permanent cache battery pack failure occurred"},
362  	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
363  	"9090: Disk unit has been modified after the last known status"},
364  	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
365  	"9081: IOA detected device error"},
366  	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
367  	"9082: IOA detected device error"},
368  	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
369  	"3110: Device bus error, message or command phase"},
370  	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
371  	"3110: SAS Command / Task Management Function failed"},
372  	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
373  	"9091: Incorrect hardware configuration change has been detected"},
374  	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
375  	"9073: Invalid multi-adapter configuration"},
376  	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
377  	"4010: Incorrect connection between cascaded expanders"},
378  	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
379  	"4020: Connections exceed IOA design limits"},
380  	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
381  	"4030: Incorrect multipath connection"},
382  	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
383  	"4110: Unsupported enclosure function"},
384  	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
385  	"4120: SAS cable VPD cannot be read"},
386  	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
387  	"FFF4: Command to logical unit failed"},
388  	{0x05240000, 1, 0,
389  	"Illegal request, invalid request type or request packet"},
390  	{0x05250000, 0, 0,
391  	"Illegal request, invalid resource handle"},
392  	{0x05258000, 0, 0,
393  	"Illegal request, commands not allowed to this device"},
394  	{0x05258100, 0, 0,
395  	"Illegal request, command not allowed to a secondary adapter"},
396  	{0x05258200, 0, 0,
397  	"Illegal request, command not allowed to a non-optimized resource"},
398  	{0x05260000, 0, 0,
399  	"Illegal request, invalid field in parameter list"},
400  	{0x05260100, 0, 0,
401  	"Illegal request, parameter not supported"},
402  	{0x05260200, 0, 0,
403  	"Illegal request, parameter value invalid"},
404  	{0x052C0000, 0, 0,
405  	"Illegal request, command sequence error"},
406  	{0x052C8000, 1, 0,
407  	"Illegal request, dual adapter support not enabled"},
408  	{0x052C8100, 1, 0,
409  	"Illegal request, another cable connector was physically disabled"},
410  	{0x054E8000, 1, 0,
411  	"Illegal request, inconsistent group id/group count"},
412  	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
413  	"9031: Array protection temporarily suspended, protection resuming"},
414  	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
415  	"9040: Array protection temporarily suspended, protection resuming"},
416  	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
417  	"4080: IOA exceeded maximum operating temperature"},
418  	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
419  	"4085: Service required"},
420  	{0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
421  	"4086: SAS Adapter Hardware Configuration Error"},
422  	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
423  	"3140: Device bus not ready to ready transition"},
424  	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
425  	"FFFB: SCSI bus was reset"},
426  	{0x06290500, 0, 0,
427  	"FFFE: SCSI bus transition to single ended"},
428  	{0x06290600, 0, 0,
429  	"FFFE: SCSI bus transition to LVD"},
430  	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
431  	"FFFB: SCSI bus was reset by another initiator"},
432  	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
433  	"3029: A device replacement has occurred"},
434  	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
435  	"4102: Device bus fabric performance degradation"},
436  	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
437  	"9051: IOA cache data exists for a missing or failed device"},
438  	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
439  	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
440  	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
441  	"9025: Disk unit is not supported at its physical location"},
442  	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
443  	"3020: IOA detected a SCSI bus configuration error"},
444  	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
445  	"3150: SCSI bus configuration error"},
446  	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
447  	"9074: Asymmetric advanced function disk configuration"},
448  	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
449  	"4040: Incomplete multipath connection between IOA and enclosure"},
450  	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
451  	"4041: Incomplete multipath connection between enclosure and device"},
452  	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
453  	"9075: Incomplete multipath connection between IOA and remote IOA"},
454  	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
455  	"9076: Configuration error, missing remote IOA"},
456  	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
457  	"4050: Enclosure does not support a required multipath function"},
458  	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
459  	"4121: Configuration error, required cable is missing"},
460  	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
461  	"4122: Cable is not plugged into the correct location on remote IOA"},
462  	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
463  	"4123: Configuration error, invalid cable vital product data"},
464  	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
465  	"4124: Configuration error, both cable ends are plugged into the same IOA"},
466  	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
467  	"4070: Logically bad block written on device"},
468  	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
469  	"9041: Array protection temporarily suspended"},
470  	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
471  	"9042: Corrupt array parity detected on specified device"},
472  	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
473  	"9030: Array no longer protected due to missing or failed disk unit"},
474  	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
475  	"9071: Link operational transition"},
476  	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
477  	"9072: Link not operational transition"},
478  	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
479  	"9032: Array exposed but still protected"},
480  	{0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
481  	"70DD: Device forced failed by disrupt device command"},
482  	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
483  	"4061: Multipath redundancy level got better"},
484  	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
485  	"4060: Multipath redundancy level got worse"},
486  	{0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
487  	"9083: Device raw mode enabled"},
488  	{0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
489  	"9084: Device raw mode disabled"},
490  	{0x07270000, 0, 0,
491  	"Failure due to other device"},
492  	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
493  	"9008: IOA does not support functions expected by devices"},
494  	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
495  	"9010: Cache data associated with attached devices cannot be found"},
496  	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
497  	"9011: Cache data belongs to devices other than those attached"},
498  	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
499  	"9020: Array missing 2 or more devices with only 1 device present"},
500  	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
501  	"9021: Array missing 2 or more devices with 2 or more devices present"},
502  	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
503  	"9022: Exposed array is missing a required device"},
504  	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
505  	"9023: Array member(s) not at required physical locations"},
506  	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
507  	"9024: Array not functional due to present hardware configuration"},
508  	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
509  	"9026: Array not functional due to present hardware configuration"},
510  	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
511  	"9027: Array is missing a device and parity is out of sync"},
512  	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
513  	"9028: Maximum number of arrays already exist"},
514  	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
515  	"9050: Required cache data cannot be located for a disk unit"},
516  	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
517  	"9052: Cache data exists for a device that has been modified"},
518  	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
519  	"9054: IOA resources not available due to previous problems"},
520  	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
521  	"9092: Disk unit requires initialization before use"},
522  	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
523  	"9029: Incorrect hardware configuration change has been detected"},
524  	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
525  	"9060: One or more disk pairs are missing from an array"},
526  	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
527  	"9061: One or more disks are missing from an array"},
528  	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
529  	"9062: One or more disks are missing from an array"},
530  	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
531  	"9063: Maximum number of functional arrays has been exceeded"},
532  	{0x07279A00, 0, 0,
533  	"Data protect, other volume set problem"},
534  	{0x0B260000, 0, 0,
535  	"Aborted command, invalid descriptor"},
536  	{0x0B3F9000, 0, 0,
537  	"Target operating conditions have changed, dual adapter takeover"},
538  	{0x0B530200, 0, 0,
539  	"Aborted command, medium removal prevented"},
540  	{0x0B5A0000, 0, 0,
541  	"Command terminated by host"},
542  	{0x0B5B8000, 0, 0,
543  	"Aborted command, command terminated by host"}
544  };
545  
546  static const struct ipr_ses_table_entry ipr_ses_table[] = {
547  	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
548  	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
549  	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
550  	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
551  	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
552  	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
553  	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
554  	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
555  	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
556  	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
557  	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
558  	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
559  	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
560  };
561  
562  /*
563   *  Function Prototypes
564   */
565  static int ipr_reset_alert(struct ipr_cmnd *);
566  static void ipr_process_ccn(struct ipr_cmnd *);
567  static void ipr_process_error(struct ipr_cmnd *);
568  static void ipr_reset_ioa_job(struct ipr_cmnd *);
569  static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
570  				   enum ipr_shutdown_type);
571  
572  #ifdef CONFIG_SCSI_IPR_TRACE
573  /**
574   * ipr_trc_hook - Add a trace entry to the driver trace
575   * @ipr_cmd:	ipr command struct
576   * @type:		trace type
577   * @add_data:	additional data
578   *
579   * Return value:
580   * 	none
581   **/
ipr_trc_hook(struct ipr_cmnd * ipr_cmd,u8 type,u32 add_data)582  static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
583  			 u8 type, u32 add_data)
584  {
585  	struct ipr_trace_entry *trace_entry;
586  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
587  	unsigned int trace_index;
588  
589  	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
590  	trace_entry = &ioa_cfg->trace[trace_index];
591  	trace_entry->time = jiffies;
592  	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
593  	trace_entry->type = type;
594  	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
595  	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
596  	trace_entry->u.add_data = add_data;
597  	wmb();
598  }
599  #else
600  #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
601  #endif
602  
603  /**
604   * ipr_lock_and_done - Acquire lock and complete command
605   * @ipr_cmd:	ipr command struct
606   *
607   * Return value:
608   *	none
609   **/
ipr_lock_and_done(struct ipr_cmnd * ipr_cmd)610  static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
611  {
612  	unsigned long lock_flags;
613  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
614  
615  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
616  	ipr_cmd->done(ipr_cmd);
617  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
618  }
619  
620  /**
621   * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
622   * @ipr_cmd:	ipr command struct
623   *
624   * Return value:
625   * 	none
626   **/
ipr_reinit_ipr_cmnd(struct ipr_cmnd * ipr_cmd)627  static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
628  {
629  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
630  	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
631  	dma_addr_t dma_addr = ipr_cmd->dma_addr;
632  	int hrrq_id;
633  
634  	hrrq_id = ioarcb->cmd_pkt.hrrq_id;
635  	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
636  	ioarcb->cmd_pkt.hrrq_id = hrrq_id;
637  	ioarcb->data_transfer_length = 0;
638  	ioarcb->read_data_transfer_length = 0;
639  	ioarcb->ioadl_len = 0;
640  	ioarcb->read_ioadl_len = 0;
641  
642  	if (ipr_cmd->ioa_cfg->sis64) {
643  		ioarcb->u.sis64_addr_data.data_ioadl_addr =
644  			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
645  	} else {
646  		ioarcb->write_ioadl_addr =
647  			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
648  		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
649  	}
650  
651  	ioasa->hdr.ioasc = 0;
652  	ioasa->hdr.residual_data_len = 0;
653  	ipr_cmd->scsi_cmd = NULL;
654  	ipr_cmd->sense_buffer[0] = 0;
655  	ipr_cmd->dma_use_sg = 0;
656  }
657  
658  /**
659   * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
660   * @ipr_cmd:	ipr command struct
661   * @fast_done:	fast done function call-back
662   *
663   * Return value:
664   * 	none
665   **/
ipr_init_ipr_cmnd(struct ipr_cmnd * ipr_cmd,void (* fast_done)(struct ipr_cmnd *))666  static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
667  			      void (*fast_done) (struct ipr_cmnd *))
668  {
669  	ipr_reinit_ipr_cmnd(ipr_cmd);
670  	ipr_cmd->u.scratch = 0;
671  	ipr_cmd->sibling = NULL;
672  	ipr_cmd->eh_comp = NULL;
673  	ipr_cmd->fast_done = fast_done;
674  	timer_setup(&ipr_cmd->timer, NULL, 0);
675  }
676  
677  /**
678   * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
679   * @hrrq:	hrr queue
680   *
681   * Return value:
682   * 	pointer to ipr command struct
683   **/
684  static
__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue * hrrq)685  struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
686  {
687  	struct ipr_cmnd *ipr_cmd = NULL;
688  
689  	if (likely(!list_empty(&hrrq->hrrq_free_q))) {
690  		ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
691  			struct ipr_cmnd, queue);
692  		list_del(&ipr_cmd->queue);
693  	}
694  
695  
696  	return ipr_cmd;
697  }
698  
699  /**
700   * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
701   * @ioa_cfg:	ioa config struct
702   *
703   * Return value:
704   *	pointer to ipr command struct
705   **/
706  static
ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg * ioa_cfg)707  struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
708  {
709  	struct ipr_cmnd *ipr_cmd =
710  		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
711  	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
712  	return ipr_cmd;
713  }
714  
715  /**
716   * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
717   * @ioa_cfg:	ioa config struct
718   * @clr_ints:     interrupts to clear
719   *
720   * This function masks all interrupts on the adapter, then clears the
721   * interrupts specified in the mask
722   *
723   * Return value:
724   * 	none
725   **/
ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg * ioa_cfg,u32 clr_ints)726  static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
727  					  u32 clr_ints)
728  {
729  	int i;
730  
731  	/* Stop new interrupts */
732  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
733  		spin_lock(&ioa_cfg->hrrq[i]._lock);
734  		ioa_cfg->hrrq[i].allow_interrupts = 0;
735  		spin_unlock(&ioa_cfg->hrrq[i]._lock);
736  	}
737  
738  	/* Set interrupt mask to stop all new interrupts */
739  	if (ioa_cfg->sis64)
740  		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
741  	else
742  		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
743  
744  	/* Clear any pending interrupts */
745  	if (ioa_cfg->sis64)
746  		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
747  	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
748  	readl(ioa_cfg->regs.sense_interrupt_reg);
749  }
750  
751  /**
752   * ipr_save_pcix_cmd_reg - Save PCI-X command register
753   * @ioa_cfg:	ioa config struct
754   *
755   * Return value:
756   * 	0 on success / -EIO on failure
757   **/
ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)758  static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
759  {
760  	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
761  	int rc;
762  
763  	if (pcix_cmd_reg == 0)
764  		return 0;
765  
766  	rc = pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
767  				  &ioa_cfg->saved_pcix_cmd_reg);
768  	if (rc != PCIBIOS_SUCCESSFUL) {
769  		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
770  		return -EIO;
771  	}
772  
773  	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
774  	return 0;
775  }
776  
777  /**
778   * ipr_set_pcix_cmd_reg - Setup PCI-X command register
779   * @ioa_cfg:	ioa config struct
780   *
781   * Return value:
782   * 	0 on success / -EIO on failure
783   **/
ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg * ioa_cfg)784  static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785  {
786  	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787  	int rc;
788  
789  	if (pcix_cmd_reg) {
790  		rc = pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791  					   ioa_cfg->saved_pcix_cmd_reg);
792  		if (rc != PCIBIOS_SUCCESSFUL) {
793  			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
794  			return -EIO;
795  		}
796  	}
797  
798  	return 0;
799  }
800  
801  
802  /**
803   * __ipr_scsi_eh_done - mid-layer done function for aborted ops
804   * @ipr_cmd:	ipr command struct
805   *
806   * This function is invoked by the interrupt handler for
807   * ops generated by the SCSI mid-layer which are being aborted.
808   *
809   * Return value:
810   * 	none
811   **/
__ipr_scsi_eh_done(struct ipr_cmnd * ipr_cmd)812  static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
813  {
814  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
815  
816  	scsi_cmd->result |= (DID_ERROR << 16);
817  
818  	scsi_dma_unmap(ipr_cmd->scsi_cmd);
819  	scsi_done(scsi_cmd);
820  	if (ipr_cmd->eh_comp)
821  		complete(ipr_cmd->eh_comp);
822  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
823  }
824  
825  /**
826   * ipr_scsi_eh_done - mid-layer done function for aborted ops
827   * @ipr_cmd:	ipr command struct
828   *
829   * This function is invoked by the interrupt handler for
830   * ops generated by the SCSI mid-layer which are being aborted.
831   *
832   * Return value:
833   * 	none
834   **/
ipr_scsi_eh_done(struct ipr_cmnd * ipr_cmd)835  static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
836  {
837  	unsigned long hrrq_flags;
838  	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
839  
840  	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
841  	__ipr_scsi_eh_done(ipr_cmd);
842  	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
843  }
844  
845  /**
846   * ipr_fail_all_ops - Fails all outstanding ops.
847   * @ioa_cfg:	ioa config struct
848   *
849   * This function fails all outstanding ops.
850   *
851   * Return value:
852   * 	none
853   **/
ipr_fail_all_ops(struct ipr_ioa_cfg * ioa_cfg)854  static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
855  {
856  	struct ipr_cmnd *ipr_cmd, *temp;
857  	struct ipr_hrr_queue *hrrq;
858  
859  	ENTER;
860  	for_each_hrrq(hrrq, ioa_cfg) {
861  		spin_lock(&hrrq->_lock);
862  		list_for_each_entry_safe(ipr_cmd,
863  					temp, &hrrq->hrrq_pending_q, queue) {
864  			list_del(&ipr_cmd->queue);
865  
866  			ipr_cmd->s.ioasa.hdr.ioasc =
867  				cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
868  			ipr_cmd->s.ioasa.hdr.ilid =
869  				cpu_to_be32(IPR_DRIVER_ILID);
870  
871  			if (ipr_cmd->scsi_cmd)
872  				ipr_cmd->done = __ipr_scsi_eh_done;
873  
874  			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
875  				     IPR_IOASC_IOA_WAS_RESET);
876  			del_timer(&ipr_cmd->timer);
877  			ipr_cmd->done(ipr_cmd);
878  		}
879  		spin_unlock(&hrrq->_lock);
880  	}
881  	LEAVE;
882  }
883  
884  /**
885   * ipr_send_command -  Send driver initiated requests.
886   * @ipr_cmd:		ipr command struct
887   *
888   * This function sends a command to the adapter using the correct write call.
889   * In the case of sis64, calculate the ioarcb size required. Then or in the
890   * appropriate bits.
891   *
892   * Return value:
893   * 	none
894   **/
ipr_send_command(struct ipr_cmnd * ipr_cmd)895  static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
896  {
897  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
898  	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
899  
900  	if (ioa_cfg->sis64) {
901  		/* The default size is 256 bytes */
902  		send_dma_addr |= 0x1;
903  
904  		/* If the number of ioadls * size of ioadl > 128 bytes,
905  		   then use a 512 byte ioarcb */
906  		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
907  			send_dma_addr |= 0x4;
908  		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
909  	} else
910  		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
911  }
912  
913  /**
914   * ipr_do_req -  Send driver initiated requests.
915   * @ipr_cmd:		ipr command struct
916   * @done:			done function
917   * @timeout_func:	timeout function
918   * @timeout:		timeout value
919   *
920   * This function sends the specified command to the adapter with the
921   * timeout given. The done function is invoked on command completion.
922   *
923   * Return value:
924   * 	none
925   **/
ipr_do_req(struct ipr_cmnd * ipr_cmd,void (* done)(struct ipr_cmnd *),void (* timeout_func)(struct timer_list *),u32 timeout)926  static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
927  		       void (*done) (struct ipr_cmnd *),
928  		       void (*timeout_func) (struct timer_list *), u32 timeout)
929  {
930  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
931  
932  	ipr_cmd->done = done;
933  
934  	ipr_cmd->timer.expires = jiffies + timeout;
935  	ipr_cmd->timer.function = timeout_func;
936  
937  	add_timer(&ipr_cmd->timer);
938  
939  	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
940  
941  	ipr_send_command(ipr_cmd);
942  }
943  
944  /**
945   * ipr_internal_cmd_done - Op done function for an internally generated op.
946   * @ipr_cmd:	ipr command struct
947   *
948   * This function is the op done function for an internally generated,
949   * blocking op. It simply wakes the sleeping thread.
950   *
951   * Return value:
952   * 	none
953   **/
ipr_internal_cmd_done(struct ipr_cmnd * ipr_cmd)954  static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
955  {
956  	if (ipr_cmd->sibling)
957  		ipr_cmd->sibling = NULL;
958  	else
959  		complete(&ipr_cmd->completion);
960  }
961  
962  /**
963   * ipr_init_ioadl - initialize the ioadl for the correct SIS type
964   * @ipr_cmd:	ipr command struct
965   * @dma_addr:	dma address
966   * @len:	transfer length
967   * @flags:	ioadl flag value
968   *
969   * This function initializes an ioadl in the case where there is only a single
970   * descriptor.
971   *
972   * Return value:
973   * 	nothing
974   **/
ipr_init_ioadl(struct ipr_cmnd * ipr_cmd,dma_addr_t dma_addr,u32 len,int flags)975  static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
976  			   u32 len, int flags)
977  {
978  	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
979  	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
980  
981  	ipr_cmd->dma_use_sg = 1;
982  
983  	if (ipr_cmd->ioa_cfg->sis64) {
984  		ioadl64->flags = cpu_to_be32(flags);
985  		ioadl64->data_len = cpu_to_be32(len);
986  		ioadl64->address = cpu_to_be64(dma_addr);
987  
988  		ipr_cmd->ioarcb.ioadl_len =
989  		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
990  		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
991  	} else {
992  		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
993  		ioadl->address = cpu_to_be32(dma_addr);
994  
995  		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
996  			ipr_cmd->ioarcb.read_ioadl_len =
997  				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
998  			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
999  		} else {
1000  			ipr_cmd->ioarcb.ioadl_len =
1001  			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1002  			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1003  		}
1004  	}
1005  }
1006  
1007  /**
1008   * ipr_send_blocking_cmd - Send command and sleep on its completion.
1009   * @ipr_cmd:	ipr command struct
1010   * @timeout_func:	function to invoke if command times out
1011   * @timeout:	timeout
1012   *
1013   * Return value:
1014   * 	none
1015   **/
ipr_send_blocking_cmd(struct ipr_cmnd * ipr_cmd,void (* timeout_func)(struct timer_list *),u32 timeout)1016  static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1017  				  void (*timeout_func) (struct timer_list *),
1018  				  u32 timeout)
1019  {
1020  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1021  
1022  	init_completion(&ipr_cmd->completion);
1023  	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1024  
1025  	spin_unlock_irq(ioa_cfg->host->host_lock);
1026  	wait_for_completion(&ipr_cmd->completion);
1027  	spin_lock_irq(ioa_cfg->host->host_lock);
1028  }
1029  
ipr_get_hrrq_index(struct ipr_ioa_cfg * ioa_cfg)1030  static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1031  {
1032  	unsigned int hrrq;
1033  
1034  	if (ioa_cfg->hrrq_num == 1)
1035  		hrrq = 0;
1036  	else {
1037  		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1038  		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1039  	}
1040  	return hrrq;
1041  }
1042  
1043  /**
1044   * ipr_send_hcam - Send an HCAM to the adapter.
1045   * @ioa_cfg:	ioa config struct
1046   * @type:		HCAM type
1047   * @hostrcb:	hostrcb struct
1048   *
1049   * This function will send a Host Controlled Async command to the adapter.
1050   * If HCAMs are currently not allowed to be issued to the adapter, it will
1051   * place the hostrcb on the free queue.
1052   *
1053   * Return value:
1054   * 	none
1055   **/
ipr_send_hcam(struct ipr_ioa_cfg * ioa_cfg,u8 type,struct ipr_hostrcb * hostrcb)1056  static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1057  			  struct ipr_hostrcb *hostrcb)
1058  {
1059  	struct ipr_cmnd *ipr_cmd;
1060  	struct ipr_ioarcb *ioarcb;
1061  
1062  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
1063  		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1064  		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
1065  		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1066  
1067  		ipr_cmd->u.hostrcb = hostrcb;
1068  		ioarcb = &ipr_cmd->ioarcb;
1069  
1070  		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1071  		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1072  		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1073  		ioarcb->cmd_pkt.cdb[1] = type;
1074  		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1075  		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1076  
1077  		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1078  			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
1079  
1080  		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1081  			ipr_cmd->done = ipr_process_ccn;
1082  		else
1083  			ipr_cmd->done = ipr_process_error;
1084  
1085  		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1086  
1087  		ipr_send_command(ipr_cmd);
1088  	} else {
1089  		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1090  	}
1091  }
1092  
1093  /**
1094   * ipr_init_res_entry - Initialize a resource entry struct.
1095   * @res:	resource entry struct
1096   * @cfgtew:	config table entry wrapper struct
1097   *
1098   * Return value:
1099   * 	none
1100   **/
ipr_init_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1101  static void ipr_init_res_entry(struct ipr_resource_entry *res,
1102  			       struct ipr_config_table_entry_wrapper *cfgtew)
1103  {
1104  	int found = 0;
1105  	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1106  	struct ipr_resource_entry *gscsi_res = NULL;
1107  
1108  	res->needs_sync_complete = 0;
1109  	res->in_erp = 0;
1110  	res->add_to_ml = 0;
1111  	res->del_from_ml = 0;
1112  	res->resetting_device = 0;
1113  	res->reset_occurred = 0;
1114  	res->sdev = NULL;
1115  
1116  	if (ioa_cfg->sis64) {
1117  		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1118  		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1119  		res->qmodel = IPR_QUEUEING_MODEL64(res);
1120  		res->type = cfgtew->u.cfgte64->res_type;
1121  
1122  		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1123  			sizeof(res->res_path));
1124  
1125  		res->bus = 0;
1126  		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1127  			sizeof(res->dev_lun.scsi_lun));
1128  		res->lun = scsilun_to_int(&res->dev_lun);
1129  
1130  		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1131  			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1132  				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1133  					found = 1;
1134  					res->target = gscsi_res->target;
1135  					break;
1136  				}
1137  			}
1138  			if (!found) {
1139  				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1140  								  ioa_cfg->max_devs_supported);
1141  				set_bit(res->target, ioa_cfg->target_ids);
1142  			}
1143  		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1144  			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1145  			res->target = 0;
1146  		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1147  			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1148  			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1149  							  ioa_cfg->max_devs_supported);
1150  			set_bit(res->target, ioa_cfg->array_ids);
1151  		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1152  			res->bus = IPR_VSET_VIRTUAL_BUS;
1153  			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1154  							  ioa_cfg->max_devs_supported);
1155  			set_bit(res->target, ioa_cfg->vset_ids);
1156  		} else {
1157  			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1158  							  ioa_cfg->max_devs_supported);
1159  			set_bit(res->target, ioa_cfg->target_ids);
1160  		}
1161  	} else {
1162  		res->qmodel = IPR_QUEUEING_MODEL(res);
1163  		res->flags = cfgtew->u.cfgte->flags;
1164  		if (res->flags & IPR_IS_IOA_RESOURCE)
1165  			res->type = IPR_RES_TYPE_IOAFP;
1166  		else
1167  			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1168  
1169  		res->bus = cfgtew->u.cfgte->res_addr.bus;
1170  		res->target = cfgtew->u.cfgte->res_addr.target;
1171  		res->lun = cfgtew->u.cfgte->res_addr.lun;
1172  		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1173  	}
1174  }
1175  
1176  /**
1177   * ipr_is_same_device - Determine if two devices are the same.
1178   * @res:	resource entry struct
1179   * @cfgtew:	config table entry wrapper struct
1180   *
1181   * Return value:
1182   * 	1 if the devices are the same / 0 otherwise
1183   **/
ipr_is_same_device(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1184  static int ipr_is_same_device(struct ipr_resource_entry *res,
1185  			      struct ipr_config_table_entry_wrapper *cfgtew)
1186  {
1187  	if (res->ioa_cfg->sis64) {
1188  		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1189  					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1190  			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1191  					sizeof(cfgtew->u.cfgte64->lun))) {
1192  			return 1;
1193  		}
1194  	} else {
1195  		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1196  		    res->target == cfgtew->u.cfgte->res_addr.target &&
1197  		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1198  			return 1;
1199  	}
1200  
1201  	return 0;
1202  }
1203  
1204  /**
1205   * __ipr_format_res_path - Format the resource path for printing.
1206   * @res_path:	resource path
1207   * @buffer:	buffer
1208   * @len:	length of buffer provided
1209   *
1210   * Return value:
1211   * 	pointer to buffer
1212   **/
__ipr_format_res_path(u8 * res_path,char * buffer,int len)1213  static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
1214  {
1215  	int i;
1216  	char *p = buffer;
1217  
1218  	*p = '\0';
1219  	p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
1220  	for (i = 1; res_path[i] != 0xff && i < IPR_RES_PATH_BYTES; i++)
1221  		p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
1222  
1223  	return buffer;
1224  }
1225  
1226  /**
1227   * ipr_format_res_path - Format the resource path for printing.
1228   * @ioa_cfg:	ioa config struct
1229   * @res_path:	resource path
1230   * @buffer:	buffer
1231   * @len:	length of buffer provided
1232   *
1233   * Return value:
1234   *	pointer to buffer
1235   **/
ipr_format_res_path(struct ipr_ioa_cfg * ioa_cfg,u8 * res_path,char * buffer,int len)1236  static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1237  				 u8 *res_path, char *buffer, int len)
1238  {
1239  	char *p = buffer;
1240  
1241  	*p = '\0';
1242  	p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1243  	__ipr_format_res_path(res_path, p, len - (p - buffer));
1244  	return buffer;
1245  }
1246  
1247  /**
1248   * ipr_update_res_entry - Update the resource entry.
1249   * @res:	resource entry struct
1250   * @cfgtew:	config table entry wrapper struct
1251   *
1252   * Return value:
1253   *      none
1254   **/
ipr_update_res_entry(struct ipr_resource_entry * res,struct ipr_config_table_entry_wrapper * cfgtew)1255  static void ipr_update_res_entry(struct ipr_resource_entry *res,
1256  				 struct ipr_config_table_entry_wrapper *cfgtew)
1257  {
1258  	char buffer[IPR_MAX_RES_PATH_LENGTH];
1259  	int new_path = 0;
1260  
1261  	if (res->ioa_cfg->sis64) {
1262  		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1263  		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
1264  		res->type = cfgtew->u.cfgte64->res_type;
1265  
1266  		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1267  			sizeof(struct ipr_std_inq_data));
1268  
1269  		res->qmodel = IPR_QUEUEING_MODEL64(res);
1270  		res->res_handle = cfgtew->u.cfgte64->res_handle;
1271  		res->dev_id = cfgtew->u.cfgte64->dev_id;
1272  
1273  		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1274  			sizeof(res->dev_lun.scsi_lun));
1275  
1276  		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1277  					sizeof(res->res_path))) {
1278  			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1279  				sizeof(res->res_path));
1280  			new_path = 1;
1281  		}
1282  
1283  		if (res->sdev && new_path)
1284  			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1285  				    ipr_format_res_path(res->ioa_cfg,
1286  					res->res_path, buffer, sizeof(buffer)));
1287  	} else {
1288  		res->flags = cfgtew->u.cfgte->flags;
1289  		if (res->flags & IPR_IS_IOA_RESOURCE)
1290  			res->type = IPR_RES_TYPE_IOAFP;
1291  		else
1292  			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1293  
1294  		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1295  			sizeof(struct ipr_std_inq_data));
1296  
1297  		res->qmodel = IPR_QUEUEING_MODEL(res);
1298  		res->res_handle = cfgtew->u.cfgte->res_handle;
1299  	}
1300  }
1301  
1302  /**
1303   * ipr_clear_res_target - Clear the bit in the bit map representing the target
1304   * 			  for the resource.
1305   * @res:	resource entry struct
1306   *
1307   * Return value:
1308   *      none
1309   **/
ipr_clear_res_target(struct ipr_resource_entry * res)1310  static void ipr_clear_res_target(struct ipr_resource_entry *res)
1311  {
1312  	struct ipr_resource_entry *gscsi_res = NULL;
1313  	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1314  
1315  	if (!ioa_cfg->sis64)
1316  		return;
1317  
1318  	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1319  		clear_bit(res->target, ioa_cfg->array_ids);
1320  	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1321  		clear_bit(res->target, ioa_cfg->vset_ids);
1322  	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1323  		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1324  			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1325  				return;
1326  		clear_bit(res->target, ioa_cfg->target_ids);
1327  
1328  	} else if (res->bus == 0)
1329  		clear_bit(res->target, ioa_cfg->target_ids);
1330  }
1331  
1332  /**
1333   * ipr_handle_config_change - Handle a config change from the adapter
1334   * @ioa_cfg:	ioa config struct
1335   * @hostrcb:	hostrcb
1336   *
1337   * Return value:
1338   * 	none
1339   **/
ipr_handle_config_change(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1340  static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1341  				     struct ipr_hostrcb *hostrcb)
1342  {
1343  	struct ipr_resource_entry *res = NULL;
1344  	struct ipr_config_table_entry_wrapper cfgtew;
1345  	__be32 cc_res_handle;
1346  
1347  	u32 is_ndn = 1;
1348  
1349  	if (ioa_cfg->sis64) {
1350  		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1351  		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1352  	} else {
1353  		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1354  		cc_res_handle = cfgtew.u.cfgte->res_handle;
1355  	}
1356  
1357  	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1358  		if (res->res_handle == cc_res_handle) {
1359  			is_ndn = 0;
1360  			break;
1361  		}
1362  	}
1363  
1364  	if (is_ndn) {
1365  		if (list_empty(&ioa_cfg->free_res_q)) {
1366  			ipr_send_hcam(ioa_cfg,
1367  				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1368  				      hostrcb);
1369  			return;
1370  		}
1371  
1372  		res = list_entry(ioa_cfg->free_res_q.next,
1373  				 struct ipr_resource_entry, queue);
1374  
1375  		list_del(&res->queue);
1376  		ipr_init_res_entry(res, &cfgtew);
1377  		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1378  	}
1379  
1380  	ipr_update_res_entry(res, &cfgtew);
1381  
1382  	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1383  		if (res->sdev) {
1384  			res->del_from_ml = 1;
1385  			res->res_handle = IPR_INVALID_RES_HANDLE;
1386  			schedule_work(&ioa_cfg->work_q);
1387  		} else {
1388  			ipr_clear_res_target(res);
1389  			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1390  		}
1391  	} else if (!res->sdev || res->del_from_ml) {
1392  		res->add_to_ml = 1;
1393  		schedule_work(&ioa_cfg->work_q);
1394  	}
1395  
1396  	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1397  }
1398  
1399  /**
1400   * ipr_process_ccn - Op done function for a CCN.
1401   * @ipr_cmd:	ipr command struct
1402   *
1403   * This function is the op done function for a configuration
1404   * change notification host controlled async from the adapter.
1405   *
1406   * Return value:
1407   * 	none
1408   **/
ipr_process_ccn(struct ipr_cmnd * ipr_cmd)1409  static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1410  {
1411  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1412  	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1413  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1414  
1415  	list_del_init(&hostrcb->queue);
1416  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
1417  
1418  	if (ioasc) {
1419  		if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1420  		    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
1421  			dev_err(&ioa_cfg->pdev->dev,
1422  				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1423  
1424  		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1425  	} else {
1426  		ipr_handle_config_change(ioa_cfg, hostrcb);
1427  	}
1428  }
1429  
1430  /**
1431   * strip_whitespace - Strip and pad trailing whitespace.
1432   * @i:		size of buffer
1433   * @buf:	string to modify
1434   *
1435   * This function will strip all trailing whitespace and
1436   * NUL terminate the string.
1437   *
1438   **/
strip_whitespace(int i,char * buf)1439  static void strip_whitespace(int i, char *buf)
1440  {
1441  	if (i < 1)
1442  		return;
1443  	i--;
1444  	while (i && buf[i] == ' ')
1445  		i--;
1446  	buf[i+1] = '\0';
1447  }
1448  
1449  /**
1450   * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1451   * @prefix:		string to print at start of printk
1452   * @hostrcb:	hostrcb pointer
1453   * @vpd:		vendor/product id/sn struct
1454   *
1455   * Return value:
1456   * 	none
1457   **/
ipr_log_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_vpd * vpd)1458  static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1459  				struct ipr_vpd *vpd)
1460  {
1461  	char vendor_id[IPR_VENDOR_ID_LEN + 1];
1462  	char product_id[IPR_PROD_ID_LEN + 1];
1463  	char sn[IPR_SERIAL_NUM_LEN + 1];
1464  
1465  	memcpy(vendor_id, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1466  	strip_whitespace(IPR_VENDOR_ID_LEN, vendor_id);
1467  
1468  	memcpy(product_id, vpd->vpids.product_id, IPR_PROD_ID_LEN);
1469  	strip_whitespace(IPR_PROD_ID_LEN, product_id);
1470  
1471  	memcpy(sn, vpd->sn, IPR_SERIAL_NUM_LEN);
1472  	strip_whitespace(IPR_SERIAL_NUM_LEN, sn);
1473  
1474  	ipr_hcam_err(hostrcb, "%s VPID/SN: %s %s %s\n", prefix,
1475  		     vendor_id, product_id, sn);
1476  }
1477  
1478  /**
1479   * ipr_log_vpd - Log the passed VPD to the error log.
1480   * @vpd:		vendor/product id/sn struct
1481   *
1482   * Return value:
1483   * 	none
1484   **/
ipr_log_vpd(struct ipr_vpd * vpd)1485  static void ipr_log_vpd(struct ipr_vpd *vpd)
1486  {
1487  	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1488  		    + IPR_SERIAL_NUM_LEN];
1489  
1490  	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1491  	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1492  	       IPR_PROD_ID_LEN);
1493  	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1494  	ipr_err("Vendor/Product ID: %s\n", buffer);
1495  
1496  	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1497  	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1498  	ipr_err("    Serial Number: %s\n", buffer);
1499  }
1500  
1501  /**
1502   * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1503   * @prefix:		string to print at start of printk
1504   * @hostrcb:	hostrcb pointer
1505   * @vpd:		vendor/product id/sn/wwn struct
1506   *
1507   * Return value:
1508   * 	none
1509   **/
ipr_log_ext_vpd_compact(char * prefix,struct ipr_hostrcb * hostrcb,struct ipr_ext_vpd * vpd)1510  static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1511  				    struct ipr_ext_vpd *vpd)
1512  {
1513  	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1514  	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1515  		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1516  }
1517  
1518  /**
1519   * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1520   * @vpd:		vendor/product id/sn/wwn struct
1521   *
1522   * Return value:
1523   * 	none
1524   **/
ipr_log_ext_vpd(struct ipr_ext_vpd * vpd)1525  static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1526  {
1527  	ipr_log_vpd(&vpd->vpd);
1528  	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1529  		be32_to_cpu(vpd->wwid[1]));
1530  }
1531  
1532  /**
1533   * ipr_log_enhanced_cache_error - Log a cache error.
1534   * @ioa_cfg:	ioa config struct
1535   * @hostrcb:	hostrcb struct
1536   *
1537   * Return value:
1538   * 	none
1539   **/
ipr_log_enhanced_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1540  static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1541  					 struct ipr_hostrcb *hostrcb)
1542  {
1543  	struct ipr_hostrcb_type_12_error *error;
1544  
1545  	if (ioa_cfg->sis64)
1546  		error = &hostrcb->hcam.u.error64.u.type_12_error;
1547  	else
1548  		error = &hostrcb->hcam.u.error.u.type_12_error;
1549  
1550  	ipr_err("-----Current Configuration-----\n");
1551  	ipr_err("Cache Directory Card Information:\n");
1552  	ipr_log_ext_vpd(&error->ioa_vpd);
1553  	ipr_err("Adapter Card Information:\n");
1554  	ipr_log_ext_vpd(&error->cfc_vpd);
1555  
1556  	ipr_err("-----Expected Configuration-----\n");
1557  	ipr_err("Cache Directory Card Information:\n");
1558  	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1559  	ipr_err("Adapter Card Information:\n");
1560  	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1561  
1562  	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1563  		     be32_to_cpu(error->ioa_data[0]),
1564  		     be32_to_cpu(error->ioa_data[1]),
1565  		     be32_to_cpu(error->ioa_data[2]));
1566  }
1567  
1568  /**
1569   * ipr_log_cache_error - Log a cache error.
1570   * @ioa_cfg:	ioa config struct
1571   * @hostrcb:	hostrcb struct
1572   *
1573   * Return value:
1574   * 	none
1575   **/
ipr_log_cache_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1576  static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1577  				struct ipr_hostrcb *hostrcb)
1578  {
1579  	struct ipr_hostrcb_type_02_error *error =
1580  		&hostrcb->hcam.u.error.u.type_02_error;
1581  
1582  	ipr_err("-----Current Configuration-----\n");
1583  	ipr_err("Cache Directory Card Information:\n");
1584  	ipr_log_vpd(&error->ioa_vpd);
1585  	ipr_err("Adapter Card Information:\n");
1586  	ipr_log_vpd(&error->cfc_vpd);
1587  
1588  	ipr_err("-----Expected Configuration-----\n");
1589  	ipr_err("Cache Directory Card Information:\n");
1590  	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1591  	ipr_err("Adapter Card Information:\n");
1592  	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1593  
1594  	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1595  		     be32_to_cpu(error->ioa_data[0]),
1596  		     be32_to_cpu(error->ioa_data[1]),
1597  		     be32_to_cpu(error->ioa_data[2]));
1598  }
1599  
1600  /**
1601   * ipr_log_enhanced_config_error - Log a configuration error.
1602   * @ioa_cfg:	ioa config struct
1603   * @hostrcb:	hostrcb struct
1604   *
1605   * Return value:
1606   * 	none
1607   **/
ipr_log_enhanced_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1608  static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1609  					  struct ipr_hostrcb *hostrcb)
1610  {
1611  	int errors_logged, i;
1612  	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1613  	struct ipr_hostrcb_type_13_error *error;
1614  
1615  	error = &hostrcb->hcam.u.error.u.type_13_error;
1616  	errors_logged = be32_to_cpu(error->errors_logged);
1617  
1618  	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1619  		be32_to_cpu(error->errors_detected), errors_logged);
1620  
1621  	dev_entry = error->dev;
1622  
1623  	for (i = 0; i < errors_logged; i++, dev_entry++) {
1624  		ipr_err_separator;
1625  
1626  		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1627  		ipr_log_ext_vpd(&dev_entry->vpd);
1628  
1629  		ipr_err("-----New Device Information-----\n");
1630  		ipr_log_ext_vpd(&dev_entry->new_vpd);
1631  
1632  		ipr_err("Cache Directory Card Information:\n");
1633  		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1634  
1635  		ipr_err("Adapter Card Information:\n");
1636  		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1637  	}
1638  }
1639  
1640  /**
1641   * ipr_log_sis64_config_error - Log a device error.
1642   * @ioa_cfg:	ioa config struct
1643   * @hostrcb:	hostrcb struct
1644   *
1645   * Return value:
1646   * 	none
1647   **/
ipr_log_sis64_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1648  static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1649  				       struct ipr_hostrcb *hostrcb)
1650  {
1651  	int errors_logged, i;
1652  	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1653  	struct ipr_hostrcb_type_23_error *error;
1654  	char buffer[IPR_MAX_RES_PATH_LENGTH];
1655  
1656  	error = &hostrcb->hcam.u.error64.u.type_23_error;
1657  	errors_logged = be32_to_cpu(error->errors_logged);
1658  
1659  	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1660  		be32_to_cpu(error->errors_detected), errors_logged);
1661  
1662  	dev_entry = error->dev;
1663  
1664  	for (i = 0; i < errors_logged; i++, dev_entry++) {
1665  		ipr_err_separator;
1666  
1667  		ipr_err("Device %d : %s", i + 1,
1668  			__ipr_format_res_path(dev_entry->res_path,
1669  					      buffer, sizeof(buffer)));
1670  		ipr_log_ext_vpd(&dev_entry->vpd);
1671  
1672  		ipr_err("-----New Device Information-----\n");
1673  		ipr_log_ext_vpd(&dev_entry->new_vpd);
1674  
1675  		ipr_err("Cache Directory Card Information:\n");
1676  		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1677  
1678  		ipr_err("Adapter Card Information:\n");
1679  		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1680  	}
1681  }
1682  
1683  /**
1684   * ipr_log_config_error - Log a configuration error.
1685   * @ioa_cfg:	ioa config struct
1686   * @hostrcb:	hostrcb struct
1687   *
1688   * Return value:
1689   * 	none
1690   **/
ipr_log_config_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1691  static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1692  				 struct ipr_hostrcb *hostrcb)
1693  {
1694  	int errors_logged, i;
1695  	struct ipr_hostrcb_device_data_entry *dev_entry;
1696  	struct ipr_hostrcb_type_03_error *error;
1697  
1698  	error = &hostrcb->hcam.u.error.u.type_03_error;
1699  	errors_logged = be32_to_cpu(error->errors_logged);
1700  
1701  	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1702  		be32_to_cpu(error->errors_detected), errors_logged);
1703  
1704  	dev_entry = error->dev;
1705  
1706  	for (i = 0; i < errors_logged; i++, dev_entry++) {
1707  		ipr_err_separator;
1708  
1709  		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1710  		ipr_log_vpd(&dev_entry->vpd);
1711  
1712  		ipr_err("-----New Device Information-----\n");
1713  		ipr_log_vpd(&dev_entry->new_vpd);
1714  
1715  		ipr_err("Cache Directory Card Information:\n");
1716  		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1717  
1718  		ipr_err("Adapter Card Information:\n");
1719  		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1720  
1721  		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1722  			be32_to_cpu(dev_entry->ioa_data[0]),
1723  			be32_to_cpu(dev_entry->ioa_data[1]),
1724  			be32_to_cpu(dev_entry->ioa_data[2]),
1725  			be32_to_cpu(dev_entry->ioa_data[3]),
1726  			be32_to_cpu(dev_entry->ioa_data[4]));
1727  	}
1728  }
1729  
1730  /**
1731   * ipr_log_enhanced_array_error - Log an array configuration error.
1732   * @ioa_cfg:	ioa config struct
1733   * @hostrcb:	hostrcb struct
1734   *
1735   * Return value:
1736   * 	none
1737   **/
ipr_log_enhanced_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1738  static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1739  					 struct ipr_hostrcb *hostrcb)
1740  {
1741  	int i, num_entries;
1742  	struct ipr_hostrcb_type_14_error *error;
1743  	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1744  	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1745  
1746  	error = &hostrcb->hcam.u.error.u.type_14_error;
1747  
1748  	ipr_err_separator;
1749  
1750  	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1751  		error->protection_level,
1752  		ioa_cfg->host->host_no,
1753  		error->last_func_vset_res_addr.bus,
1754  		error->last_func_vset_res_addr.target,
1755  		error->last_func_vset_res_addr.lun);
1756  
1757  	ipr_err_separator;
1758  
1759  	array_entry = error->array_member;
1760  	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1761  			    ARRAY_SIZE(error->array_member));
1762  
1763  	for (i = 0; i < num_entries; i++, array_entry++) {
1764  		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1765  			continue;
1766  
1767  		if (be32_to_cpu(error->exposed_mode_adn) == i)
1768  			ipr_err("Exposed Array Member %d:\n", i);
1769  		else
1770  			ipr_err("Array Member %d:\n", i);
1771  
1772  		ipr_log_ext_vpd(&array_entry->vpd);
1773  		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1774  		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1775  				 "Expected Location");
1776  
1777  		ipr_err_separator;
1778  	}
1779  }
1780  
1781  /**
1782   * ipr_log_array_error - Log an array configuration error.
1783   * @ioa_cfg:	ioa config struct
1784   * @hostrcb:	hostrcb struct
1785   *
1786   * Return value:
1787   * 	none
1788   **/
ipr_log_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1789  static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1790  				struct ipr_hostrcb *hostrcb)
1791  {
1792  	int i;
1793  	struct ipr_hostrcb_type_04_error *error;
1794  	struct ipr_hostrcb_array_data_entry *array_entry;
1795  	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1796  
1797  	error = &hostrcb->hcam.u.error.u.type_04_error;
1798  
1799  	ipr_err_separator;
1800  
1801  	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1802  		error->protection_level,
1803  		ioa_cfg->host->host_no,
1804  		error->last_func_vset_res_addr.bus,
1805  		error->last_func_vset_res_addr.target,
1806  		error->last_func_vset_res_addr.lun);
1807  
1808  	ipr_err_separator;
1809  
1810  	array_entry = error->array_member;
1811  
1812  	for (i = 0; i < 18; i++) {
1813  		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1814  			continue;
1815  
1816  		if (be32_to_cpu(error->exposed_mode_adn) == i)
1817  			ipr_err("Exposed Array Member %d:\n", i);
1818  		else
1819  			ipr_err("Array Member %d:\n", i);
1820  
1821  		ipr_log_vpd(&array_entry->vpd);
1822  
1823  		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1824  		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1825  				 "Expected Location");
1826  
1827  		ipr_err_separator;
1828  
1829  		if (i == 9)
1830  			array_entry = error->array_member2;
1831  		else
1832  			array_entry++;
1833  	}
1834  }
1835  
1836  /**
1837   * ipr_log_hex_data - Log additional hex IOA error data.
1838   * @ioa_cfg:	ioa config struct
1839   * @data:		IOA error data
1840   * @len:		data length
1841   *
1842   * Return value:
1843   * 	none
1844   **/
ipr_log_hex_data(struct ipr_ioa_cfg * ioa_cfg,__be32 * data,int len)1845  static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
1846  {
1847  	int i;
1848  
1849  	if (len == 0)
1850  		return;
1851  
1852  	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1853  		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1854  
1855  	for (i = 0; i < len / 4; i += 4) {
1856  		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1857  			be32_to_cpu(data[i]),
1858  			be32_to_cpu(data[i+1]),
1859  			be32_to_cpu(data[i+2]),
1860  			be32_to_cpu(data[i+3]));
1861  	}
1862  }
1863  
1864  /**
1865   * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1866   * @ioa_cfg:	ioa config struct
1867   * @hostrcb:	hostrcb struct
1868   *
1869   * Return value:
1870   * 	none
1871   **/
ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1872  static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1873  					    struct ipr_hostrcb *hostrcb)
1874  {
1875  	struct ipr_hostrcb_type_17_error *error;
1876  
1877  	if (ioa_cfg->sis64)
1878  		error = &hostrcb->hcam.u.error64.u.type_17_error;
1879  	else
1880  		error = &hostrcb->hcam.u.error.u.type_17_error;
1881  
1882  	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1883  	strim(error->failure_reason);
1884  
1885  	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1886  		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1887  	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1888  	ipr_log_hex_data(ioa_cfg, error->data,
1889  			 be32_to_cpu(hostrcb->hcam.length) -
1890  			 (offsetof(struct ipr_hostrcb_error, u) +
1891  			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1892  }
1893  
1894  /**
1895   * ipr_log_dual_ioa_error - Log a dual adapter error.
1896   * @ioa_cfg:	ioa config struct
1897   * @hostrcb:	hostrcb struct
1898   *
1899   * Return value:
1900   * 	none
1901   **/
ipr_log_dual_ioa_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)1902  static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1903  				   struct ipr_hostrcb *hostrcb)
1904  {
1905  	struct ipr_hostrcb_type_07_error *error;
1906  
1907  	error = &hostrcb->hcam.u.error.u.type_07_error;
1908  	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1909  	strim(error->failure_reason);
1910  
1911  	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1912  		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1913  	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1914  	ipr_log_hex_data(ioa_cfg, error->data,
1915  			 be32_to_cpu(hostrcb->hcam.length) -
1916  			 (offsetof(struct ipr_hostrcb_error, u) +
1917  			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1918  }
1919  
1920  static const struct {
1921  	u8 active;
1922  	char *desc;
1923  } path_active_desc[] = {
1924  	{ IPR_PATH_NO_INFO, "Path" },
1925  	{ IPR_PATH_ACTIVE, "Active path" },
1926  	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1927  };
1928  
1929  static const struct {
1930  	u8 state;
1931  	char *desc;
1932  } path_state_desc[] = {
1933  	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1934  	{ IPR_PATH_HEALTHY, "is healthy" },
1935  	{ IPR_PATH_DEGRADED, "is degraded" },
1936  	{ IPR_PATH_FAILED, "is failed" }
1937  };
1938  
1939  /**
1940   * ipr_log_fabric_path - Log a fabric path error
1941   * @hostrcb:	hostrcb struct
1942   * @fabric:		fabric descriptor
1943   *
1944   * Return value:
1945   * 	none
1946   **/
ipr_log_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_fabric_desc * fabric)1947  static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1948  				struct ipr_hostrcb_fabric_desc *fabric)
1949  {
1950  	int i, j;
1951  	u8 path_state = fabric->path_state;
1952  	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1953  	u8 state = path_state & IPR_PATH_STATE_MASK;
1954  
1955  	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1956  		if (path_active_desc[i].active != active)
1957  			continue;
1958  
1959  		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1960  			if (path_state_desc[j].state != state)
1961  				continue;
1962  
1963  			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1964  				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1965  					     path_active_desc[i].desc, path_state_desc[j].desc,
1966  					     fabric->ioa_port);
1967  			} else if (fabric->cascaded_expander == 0xff) {
1968  				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1969  					     path_active_desc[i].desc, path_state_desc[j].desc,
1970  					     fabric->ioa_port, fabric->phy);
1971  			} else if (fabric->phy == 0xff) {
1972  				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1973  					     path_active_desc[i].desc, path_state_desc[j].desc,
1974  					     fabric->ioa_port, fabric->cascaded_expander);
1975  			} else {
1976  				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1977  					     path_active_desc[i].desc, path_state_desc[j].desc,
1978  					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1979  			}
1980  			return;
1981  		}
1982  	}
1983  
1984  	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1985  		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1986  }
1987  
1988  /**
1989   * ipr_log64_fabric_path - Log a fabric path error
1990   * @hostrcb:	hostrcb struct
1991   * @fabric:		fabric descriptor
1992   *
1993   * Return value:
1994   * 	none
1995   **/
ipr_log64_fabric_path(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_fabric_desc * fabric)1996  static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1997  				  struct ipr_hostrcb64_fabric_desc *fabric)
1998  {
1999  	int i, j;
2000  	u8 path_state = fabric->path_state;
2001  	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2002  	u8 state = path_state & IPR_PATH_STATE_MASK;
2003  	char buffer[IPR_MAX_RES_PATH_LENGTH];
2004  
2005  	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2006  		if (path_active_desc[i].active != active)
2007  			continue;
2008  
2009  		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2010  			if (path_state_desc[j].state != state)
2011  				continue;
2012  
2013  			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2014  				     path_active_desc[i].desc, path_state_desc[j].desc,
2015  				     ipr_format_res_path(hostrcb->ioa_cfg,
2016  						fabric->res_path,
2017  						buffer, sizeof(buffer)));
2018  			return;
2019  		}
2020  	}
2021  
2022  	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
2023  		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2024  				    buffer, sizeof(buffer)));
2025  }
2026  
2027  static const struct {
2028  	u8 type;
2029  	char *desc;
2030  } path_type_desc[] = {
2031  	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
2032  	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
2033  	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2034  	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2035  };
2036  
2037  static const struct {
2038  	u8 status;
2039  	char *desc;
2040  } path_status_desc[] = {
2041  	{ IPR_PATH_CFG_NO_PROB, "Functional" },
2042  	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
2043  	{ IPR_PATH_CFG_FAILED, "Failed" },
2044  	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
2045  	{ IPR_PATH_NOT_DETECTED, "Missing" },
2046  	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2047  };
2048  
2049  static const char *link_rate[] = {
2050  	"unknown",
2051  	"disabled",
2052  	"phy reset problem",
2053  	"spinup hold",
2054  	"port selector",
2055  	"unknown",
2056  	"unknown",
2057  	"unknown",
2058  	"1.5Gbps",
2059  	"3.0Gbps",
2060  	"unknown",
2061  	"unknown",
2062  	"unknown",
2063  	"unknown",
2064  	"unknown",
2065  	"unknown"
2066  };
2067  
2068  /**
2069   * ipr_log_path_elem - Log a fabric path element.
2070   * @hostrcb:	hostrcb struct
2071   * @cfg:		fabric path element struct
2072   *
2073   * Return value:
2074   * 	none
2075   **/
ipr_log_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb_config_element * cfg)2076  static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2077  			      struct ipr_hostrcb_config_element *cfg)
2078  {
2079  	int i, j;
2080  	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2081  	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2082  
2083  	if (type == IPR_PATH_CFG_NOT_EXIST)
2084  		return;
2085  
2086  	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2087  		if (path_type_desc[i].type != type)
2088  			continue;
2089  
2090  		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2091  			if (path_status_desc[j].status != status)
2092  				continue;
2093  
2094  			if (type == IPR_PATH_CFG_IOA_PORT) {
2095  				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2096  					     path_status_desc[j].desc, path_type_desc[i].desc,
2097  					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2098  					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2099  			} else {
2100  				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2101  					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2102  						     path_status_desc[j].desc, path_type_desc[i].desc,
2103  						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2104  						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2105  				} else if (cfg->cascaded_expander == 0xff) {
2106  					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2107  						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2108  						     path_type_desc[i].desc, cfg->phy,
2109  						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2110  						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2111  				} else if (cfg->phy == 0xff) {
2112  					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2113  						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2114  						     path_type_desc[i].desc, cfg->cascaded_expander,
2115  						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2116  						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2117  				} else {
2118  					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2119  						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2120  						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2121  						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2122  						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2123  				}
2124  			}
2125  			return;
2126  		}
2127  	}
2128  
2129  	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2130  		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2131  		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2132  		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2133  }
2134  
2135  /**
2136   * ipr_log64_path_elem - Log a fabric path element.
2137   * @hostrcb:	hostrcb struct
2138   * @cfg:		fabric path element struct
2139   *
2140   * Return value:
2141   * 	none
2142   **/
ipr_log64_path_elem(struct ipr_hostrcb * hostrcb,struct ipr_hostrcb64_config_element * cfg)2143  static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2144  				struct ipr_hostrcb64_config_element *cfg)
2145  {
2146  	int i, j;
2147  	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2148  	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2149  	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2150  	char buffer[IPR_MAX_RES_PATH_LENGTH];
2151  
2152  	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2153  		return;
2154  
2155  	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2156  		if (path_type_desc[i].type != type)
2157  			continue;
2158  
2159  		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2160  			if (path_status_desc[j].status != status)
2161  				continue;
2162  
2163  			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2164  				     path_status_desc[j].desc, path_type_desc[i].desc,
2165  				     ipr_format_res_path(hostrcb->ioa_cfg,
2166  					cfg->res_path, buffer, sizeof(buffer)),
2167  					link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2168  					be32_to_cpu(cfg->wwid[0]),
2169  					be32_to_cpu(cfg->wwid[1]));
2170  			return;
2171  		}
2172  	}
2173  	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2174  		     "WWN=%08X%08X\n", cfg->type_status,
2175  		     ipr_format_res_path(hostrcb->ioa_cfg,
2176  			cfg->res_path, buffer, sizeof(buffer)),
2177  			link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2178  			be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2179  }
2180  
2181  /**
2182   * ipr_log_fabric_error - Log a fabric error.
2183   * @ioa_cfg:	ioa config struct
2184   * @hostrcb:	hostrcb struct
2185   *
2186   * Return value:
2187   * 	none
2188   **/
ipr_log_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2189  static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2190  				 struct ipr_hostrcb *hostrcb)
2191  {
2192  	struct ipr_hostrcb_type_20_error *error;
2193  	struct ipr_hostrcb_fabric_desc *fabric;
2194  	struct ipr_hostrcb_config_element *cfg;
2195  	int i, add_len;
2196  
2197  	error = &hostrcb->hcam.u.error.u.type_20_error;
2198  	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2199  	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200  
2201  	add_len = be32_to_cpu(hostrcb->hcam.length) -
2202  		(offsetof(struct ipr_hostrcb_error, u) +
2203  		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2204  
2205  	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2206  		ipr_log_fabric_path(hostrcb, fabric);
2207  		for_each_fabric_cfg(fabric, cfg)
2208  			ipr_log_path_elem(hostrcb, cfg);
2209  
2210  		add_len -= be16_to_cpu(fabric->length);
2211  		fabric = (struct ipr_hostrcb_fabric_desc *)
2212  			((unsigned long)fabric + be16_to_cpu(fabric->length));
2213  	}
2214  
2215  	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2216  }
2217  
2218  /**
2219   * ipr_log_sis64_array_error - Log a sis64 array error.
2220   * @ioa_cfg:	ioa config struct
2221   * @hostrcb:	hostrcb struct
2222   *
2223   * Return value:
2224   * 	none
2225   **/
ipr_log_sis64_array_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2226  static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2227  				      struct ipr_hostrcb *hostrcb)
2228  {
2229  	int i, num_entries;
2230  	struct ipr_hostrcb_type_24_error *error;
2231  	struct ipr_hostrcb64_array_data_entry *array_entry;
2232  	char buffer[IPR_MAX_RES_PATH_LENGTH];
2233  	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2234  
2235  	error = &hostrcb->hcam.u.error64.u.type_24_error;
2236  
2237  	ipr_err_separator;
2238  
2239  	ipr_err("RAID %s Array Configuration: %s\n",
2240  		error->protection_level,
2241  		ipr_format_res_path(ioa_cfg, error->last_res_path,
2242  			buffer, sizeof(buffer)));
2243  
2244  	ipr_err_separator;
2245  
2246  	array_entry = error->array_member;
2247  	num_entries = min_t(u32, error->num_entries,
2248  			    ARRAY_SIZE(error->array_member));
2249  
2250  	for (i = 0; i < num_entries; i++, array_entry++) {
2251  
2252  		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2253  			continue;
2254  
2255  		if (error->exposed_mode_adn == i)
2256  			ipr_err("Exposed Array Member %d:\n", i);
2257  		else
2258  			ipr_err("Array Member %d:\n", i);
2259  
2260  		ipr_err("Array Member %d:\n", i);
2261  		ipr_log_ext_vpd(&array_entry->vpd);
2262  		ipr_err("Current Location: %s\n",
2263  			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2264  				buffer, sizeof(buffer)));
2265  		ipr_err("Expected Location: %s\n",
2266  			 ipr_format_res_path(ioa_cfg,
2267  				array_entry->expected_res_path,
2268  				buffer, sizeof(buffer)));
2269  
2270  		ipr_err_separator;
2271  	}
2272  }
2273  
2274  /**
2275   * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2276   * @ioa_cfg:	ioa config struct
2277   * @hostrcb:	hostrcb struct
2278   *
2279   * Return value:
2280   * 	none
2281   **/
ipr_log_sis64_fabric_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2282  static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2283  				       struct ipr_hostrcb *hostrcb)
2284  {
2285  	struct ipr_hostrcb_type_30_error *error;
2286  	struct ipr_hostrcb64_fabric_desc *fabric;
2287  	struct ipr_hostrcb64_config_element *cfg;
2288  	int i, add_len;
2289  
2290  	error = &hostrcb->hcam.u.error64.u.type_30_error;
2291  
2292  	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2293  	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2294  
2295  	add_len = be32_to_cpu(hostrcb->hcam.length) -
2296  		(offsetof(struct ipr_hostrcb64_error, u) +
2297  		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2298  
2299  	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2300  		ipr_log64_fabric_path(hostrcb, fabric);
2301  		for_each_fabric_cfg(fabric, cfg)
2302  			ipr_log64_path_elem(hostrcb, cfg);
2303  
2304  		add_len -= be16_to_cpu(fabric->length);
2305  		fabric = (struct ipr_hostrcb64_fabric_desc *)
2306  			((unsigned long)fabric + be16_to_cpu(fabric->length));
2307  	}
2308  
2309  	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
2310  }
2311  
2312  /**
2313   * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2314   * @ioa_cfg:    ioa config struct
2315   * @hostrcb:    hostrcb struct
2316   *
2317   * Return value:
2318   *      none
2319   **/
ipr_log_sis64_service_required_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2320  static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2321  				       struct ipr_hostrcb *hostrcb)
2322  {
2323  	struct ipr_hostrcb_type_41_error *error;
2324  
2325  	error = &hostrcb->hcam.u.error64.u.type_41_error;
2326  
2327  	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2328  	ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2329  	ipr_log_hex_data(ioa_cfg, error->data,
2330  			 be32_to_cpu(hostrcb->hcam.length) -
2331  			 (offsetof(struct ipr_hostrcb_error, u) +
2332  			  offsetof(struct ipr_hostrcb_type_41_error, data)));
2333  }
2334  /**
2335   * ipr_log_generic_error - Log an adapter error.
2336   * @ioa_cfg:	ioa config struct
2337   * @hostrcb:	hostrcb struct
2338   *
2339   * Return value:
2340   * 	none
2341   **/
ipr_log_generic_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2342  static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2343  				  struct ipr_hostrcb *hostrcb)
2344  {
2345  	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2346  			 be32_to_cpu(hostrcb->hcam.length));
2347  }
2348  
2349  /**
2350   * ipr_log_sis64_device_error - Log a cache error.
2351   * @ioa_cfg:	ioa config struct
2352   * @hostrcb:	hostrcb struct
2353   *
2354   * Return value:
2355   * 	none
2356   **/
ipr_log_sis64_device_error(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2357  static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2358  					 struct ipr_hostrcb *hostrcb)
2359  {
2360  	struct ipr_hostrcb_type_21_error *error;
2361  	char buffer[IPR_MAX_RES_PATH_LENGTH];
2362  
2363  	error = &hostrcb->hcam.u.error64.u.type_21_error;
2364  
2365  	ipr_err("-----Failing Device Information-----\n");
2366  	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2367  		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2368  		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2369  	ipr_err("Device Resource Path: %s\n",
2370  		__ipr_format_res_path(error->res_path,
2371  				      buffer, sizeof(buffer)));
2372  	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2373  	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2374  	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2375  	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
2376  	ipr_err("SCSI Sense Data:\n");
2377  	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2378  	ipr_err("SCSI Command Descriptor Block: \n");
2379  	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2380  
2381  	ipr_err("Additional IOA Data:\n");
2382  	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2383  }
2384  
2385  /**
2386   * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2387   * @ioasc:	IOASC
2388   *
2389   * This function will return the index of into the ipr_error_table
2390   * for the specified IOASC. If the IOASC is not in the table,
2391   * 0 will be returned, which points to the entry used for unknown errors.
2392   *
2393   * Return value:
2394   * 	index into the ipr_error_table
2395   **/
ipr_get_error(u32 ioasc)2396  static u32 ipr_get_error(u32 ioasc)
2397  {
2398  	int i;
2399  
2400  	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2401  		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2402  			return i;
2403  
2404  	return 0;
2405  }
2406  
2407  /**
2408   * ipr_handle_log_data - Log an adapter error.
2409   * @ioa_cfg:	ioa config struct
2410   * @hostrcb:	hostrcb struct
2411   *
2412   * This function logs an adapter error to the system.
2413   *
2414   * Return value:
2415   * 	none
2416   **/
ipr_handle_log_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_hostrcb * hostrcb)2417  static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2418  				struct ipr_hostrcb *hostrcb)
2419  {
2420  	u32 ioasc;
2421  	int error_index;
2422  	struct ipr_hostrcb_type_21_error *error;
2423  
2424  	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2425  		return;
2426  
2427  	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2428  		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2429  
2430  	if (ioa_cfg->sis64)
2431  		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2432  	else
2433  		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2434  
2435  	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2436  	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2437  		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2438  		scsi_report_bus_reset(ioa_cfg->host,
2439  				      hostrcb->hcam.u.error.fd_res_addr.bus);
2440  	}
2441  
2442  	error_index = ipr_get_error(ioasc);
2443  
2444  	if (!ipr_error_table[error_index].log_hcam)
2445  		return;
2446  
2447  	if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2448  	    hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2449  		error = &hostrcb->hcam.u.error64.u.type_21_error;
2450  
2451  		if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2452  			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2453  				return;
2454  	}
2455  
2456  	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2457  
2458  	/* Set indication we have logged an error */
2459  	ioa_cfg->errors_logged++;
2460  
2461  	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2462  		return;
2463  	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2464  		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2465  
2466  	switch (hostrcb->hcam.overlay_id) {
2467  	case IPR_HOST_RCB_OVERLAY_ID_2:
2468  		ipr_log_cache_error(ioa_cfg, hostrcb);
2469  		break;
2470  	case IPR_HOST_RCB_OVERLAY_ID_3:
2471  		ipr_log_config_error(ioa_cfg, hostrcb);
2472  		break;
2473  	case IPR_HOST_RCB_OVERLAY_ID_4:
2474  	case IPR_HOST_RCB_OVERLAY_ID_6:
2475  		ipr_log_array_error(ioa_cfg, hostrcb);
2476  		break;
2477  	case IPR_HOST_RCB_OVERLAY_ID_7:
2478  		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2479  		break;
2480  	case IPR_HOST_RCB_OVERLAY_ID_12:
2481  		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2482  		break;
2483  	case IPR_HOST_RCB_OVERLAY_ID_13:
2484  		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2485  		break;
2486  	case IPR_HOST_RCB_OVERLAY_ID_14:
2487  	case IPR_HOST_RCB_OVERLAY_ID_16:
2488  		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2489  		break;
2490  	case IPR_HOST_RCB_OVERLAY_ID_17:
2491  		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2492  		break;
2493  	case IPR_HOST_RCB_OVERLAY_ID_20:
2494  		ipr_log_fabric_error(ioa_cfg, hostrcb);
2495  		break;
2496  	case IPR_HOST_RCB_OVERLAY_ID_21:
2497  		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2498  		break;
2499  	case IPR_HOST_RCB_OVERLAY_ID_23:
2500  		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2501  		break;
2502  	case IPR_HOST_RCB_OVERLAY_ID_24:
2503  	case IPR_HOST_RCB_OVERLAY_ID_26:
2504  		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2505  		break;
2506  	case IPR_HOST_RCB_OVERLAY_ID_30:
2507  		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2508  		break;
2509  	case IPR_HOST_RCB_OVERLAY_ID_41:
2510  		ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2511  		break;
2512  	case IPR_HOST_RCB_OVERLAY_ID_1:
2513  	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2514  	default:
2515  		ipr_log_generic_error(ioa_cfg, hostrcb);
2516  		break;
2517  	}
2518  }
2519  
ipr_get_free_hostrcb(struct ipr_ioa_cfg * ioa)2520  static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2521  {
2522  	struct ipr_hostrcb *hostrcb;
2523  
2524  	hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2525  					struct ipr_hostrcb, queue);
2526  
2527  	if (unlikely(!hostrcb)) {
2528  		dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2529  		hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2530  						struct ipr_hostrcb, queue);
2531  	}
2532  
2533  	list_del_init(&hostrcb->queue);
2534  	return hostrcb;
2535  }
2536  
2537  /**
2538   * ipr_process_error - Op done function for an adapter error log.
2539   * @ipr_cmd:	ipr command struct
2540   *
2541   * This function is the op done function for an error log host
2542   * controlled async from the adapter. It will log the error and
2543   * send the HCAM back to the adapter.
2544   *
2545   * Return value:
2546   * 	none
2547   **/
ipr_process_error(struct ipr_cmnd * ipr_cmd)2548  static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2549  {
2550  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2551  	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2552  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2553  	u32 fd_ioasc;
2554  
2555  	if (ioa_cfg->sis64)
2556  		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2557  	else
2558  		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2559  
2560  	list_del_init(&hostrcb->queue);
2561  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
2562  
2563  	if (!ioasc) {
2564  		ipr_handle_log_data(ioa_cfg, hostrcb);
2565  		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2566  			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2567  	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2568  		   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
2569  		dev_err(&ioa_cfg->pdev->dev,
2570  			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2571  	}
2572  
2573  	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
2574  	schedule_work(&ioa_cfg->work_q);
2575  	hostrcb = ipr_get_free_hostrcb(ioa_cfg);
2576  
2577  	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2578  }
2579  
2580  /**
2581   * ipr_timeout -  An internally generated op has timed out.
2582   * @t: Timer context used to fetch ipr command struct
2583   *
2584   * This function blocks host requests and initiates an
2585   * adapter reset.
2586   *
2587   * Return value:
2588   * 	none
2589   **/
ipr_timeout(struct timer_list * t)2590  static void ipr_timeout(struct timer_list *t)
2591  {
2592  	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2593  	unsigned long lock_flags = 0;
2594  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2595  
2596  	ENTER;
2597  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2598  
2599  	ioa_cfg->errors_logged++;
2600  	dev_err(&ioa_cfg->pdev->dev,
2601  		"Adapter being reset due to command timeout.\n");
2602  
2603  	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2604  		ioa_cfg->sdt_state = GET_DUMP;
2605  
2606  	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2607  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2608  
2609  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2610  	LEAVE;
2611  }
2612  
2613  /**
2614   * ipr_oper_timeout -  Adapter timed out transitioning to operational
2615   * @t: Timer context used to fetch ipr command struct
2616   *
2617   * This function blocks host requests and initiates an
2618   * adapter reset.
2619   *
2620   * Return value:
2621   * 	none
2622   **/
ipr_oper_timeout(struct timer_list * t)2623  static void ipr_oper_timeout(struct timer_list *t)
2624  {
2625  	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
2626  	unsigned long lock_flags = 0;
2627  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2628  
2629  	ENTER;
2630  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2631  
2632  	ioa_cfg->errors_logged++;
2633  	dev_err(&ioa_cfg->pdev->dev,
2634  		"Adapter timed out transitioning to operational.\n");
2635  
2636  	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2637  		ioa_cfg->sdt_state = GET_DUMP;
2638  
2639  	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2640  		if (ipr_fastfail)
2641  			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2642  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2643  	}
2644  
2645  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2646  	LEAVE;
2647  }
2648  
2649  /**
2650   * ipr_find_ses_entry - Find matching SES in SES table
2651   * @res:	resource entry struct of SES
2652   *
2653   * Return value:
2654   * 	pointer to SES table entry / NULL on failure
2655   **/
2656  static const struct ipr_ses_table_entry *
ipr_find_ses_entry(struct ipr_resource_entry * res)2657  ipr_find_ses_entry(struct ipr_resource_entry *res)
2658  {
2659  	int i, j, matches;
2660  	struct ipr_std_inq_vpids *vpids;
2661  	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2662  
2663  	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2664  		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2665  			if (ste->compare_product_id_byte[j] == 'X') {
2666  				vpids = &res->std_inq_data.vpids;
2667  				if (vpids->product_id[j] == ste->product_id[j])
2668  					matches++;
2669  				else
2670  					break;
2671  			} else
2672  				matches++;
2673  		}
2674  
2675  		if (matches == IPR_PROD_ID_LEN)
2676  			return ste;
2677  	}
2678  
2679  	return NULL;
2680  }
2681  
2682  /**
2683   * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2684   * @ioa_cfg:	ioa config struct
2685   * @bus:		SCSI bus
2686   * @bus_width:	bus width
2687   *
2688   * Return value:
2689   *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2690   *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2691   *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2692   *	max 160MHz = max 320MB/sec).
2693   **/
ipr_get_max_scsi_speed(struct ipr_ioa_cfg * ioa_cfg,u8 bus,u8 bus_width)2694  static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2695  {
2696  	struct ipr_resource_entry *res;
2697  	const struct ipr_ses_table_entry *ste;
2698  	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2699  
2700  	/* Loop through each config table entry in the config table buffer */
2701  	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2702  		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2703  			continue;
2704  
2705  		if (bus != res->bus)
2706  			continue;
2707  
2708  		if (!(ste = ipr_find_ses_entry(res)))
2709  			continue;
2710  
2711  		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2712  	}
2713  
2714  	return max_xfer_rate;
2715  }
2716  
2717  /**
2718   * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2719   * @ioa_cfg:		ioa config struct
2720   * @max_delay:		max delay in micro-seconds to wait
2721   *
2722   * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2723   *
2724   * Return value:
2725   * 	0 on success / other on failure
2726   **/
ipr_wait_iodbg_ack(struct ipr_ioa_cfg * ioa_cfg,int max_delay)2727  static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2728  {
2729  	volatile u32 pcii_reg;
2730  	int delay = 1;
2731  
2732  	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2733  	while (delay < max_delay) {
2734  		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2735  
2736  		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2737  			return 0;
2738  
2739  		/* udelay cannot be used if delay is more than a few milliseconds */
2740  		if ((delay / 1000) > MAX_UDELAY_MS)
2741  			mdelay(delay / 1000);
2742  		else
2743  			udelay(delay);
2744  
2745  		delay += delay;
2746  	}
2747  	return -EIO;
2748  }
2749  
2750  /**
2751   * ipr_get_sis64_dump_data_section - Dump IOA memory
2752   * @ioa_cfg:			ioa config struct
2753   * @start_addr:			adapter address to dump
2754   * @dest:			destination kernel buffer
2755   * @length_in_words:		length to dump in 4 byte words
2756   *
2757   * Return value:
2758   * 	0 on success
2759   **/
ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2760  static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2761  					   u32 start_addr,
2762  					   __be32 *dest, u32 length_in_words)
2763  {
2764  	int i;
2765  
2766  	for (i = 0; i < length_in_words; i++) {
2767  		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2768  		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2769  		dest++;
2770  	}
2771  
2772  	return 0;
2773  }
2774  
2775  /**
2776   * ipr_get_ldump_data_section - Dump IOA memory
2777   * @ioa_cfg:			ioa config struct
2778   * @start_addr:			adapter address to dump
2779   * @dest:				destination kernel buffer
2780   * @length_in_words:	length to dump in 4 byte words
2781   *
2782   * Return value:
2783   * 	0 on success / -EIO on failure
2784   **/
ipr_get_ldump_data_section(struct ipr_ioa_cfg * ioa_cfg,u32 start_addr,__be32 * dest,u32 length_in_words)2785  static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2786  				      u32 start_addr,
2787  				      __be32 *dest, u32 length_in_words)
2788  {
2789  	volatile u32 temp_pcii_reg;
2790  	int i, delay = 0;
2791  
2792  	if (ioa_cfg->sis64)
2793  		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2794  						       dest, length_in_words);
2795  
2796  	/* Write IOA interrupt reg starting LDUMP state  */
2797  	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2798  	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2799  
2800  	/* Wait for IO debug acknowledge */
2801  	if (ipr_wait_iodbg_ack(ioa_cfg,
2802  			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2803  		dev_err(&ioa_cfg->pdev->dev,
2804  			"IOA dump long data transfer timeout\n");
2805  		return -EIO;
2806  	}
2807  
2808  	/* Signal LDUMP interlocked - clear IO debug ack */
2809  	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2810  	       ioa_cfg->regs.clr_interrupt_reg);
2811  
2812  	/* Write Mailbox with starting address */
2813  	writel(start_addr, ioa_cfg->ioa_mailbox);
2814  
2815  	/* Signal address valid - clear IOA Reset alert */
2816  	writel(IPR_UPROCI_RESET_ALERT,
2817  	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2818  
2819  	for (i = 0; i < length_in_words; i++) {
2820  		/* Wait for IO debug acknowledge */
2821  		if (ipr_wait_iodbg_ack(ioa_cfg,
2822  				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2823  			dev_err(&ioa_cfg->pdev->dev,
2824  				"IOA dump short data transfer timeout\n");
2825  			return -EIO;
2826  		}
2827  
2828  		/* Read data from mailbox and increment destination pointer */
2829  		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2830  		dest++;
2831  
2832  		/* For all but the last word of data, signal data received */
2833  		if (i < (length_in_words - 1)) {
2834  			/* Signal dump data received - Clear IO debug Ack */
2835  			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2836  			       ioa_cfg->regs.clr_interrupt_reg);
2837  		}
2838  	}
2839  
2840  	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2841  	writel(IPR_UPROCI_RESET_ALERT,
2842  	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2843  
2844  	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2845  	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2846  
2847  	/* Signal dump data received - Clear IO debug Ack */
2848  	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2849  	       ioa_cfg->regs.clr_interrupt_reg);
2850  
2851  	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2852  	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2853  		temp_pcii_reg =
2854  		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2855  
2856  		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2857  			return 0;
2858  
2859  		udelay(10);
2860  		delay += 10;
2861  	}
2862  
2863  	return 0;
2864  }
2865  
2866  #ifdef CONFIG_SCSI_IPR_DUMP
2867  /**
2868   * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2869   * @ioa_cfg:		ioa config struct
2870   * @pci_address:	adapter address
2871   * @length:			length of data to copy
2872   *
2873   * Copy data from PCI adapter to kernel buffer.
2874   * Note: length MUST be a 4 byte multiple
2875   * Return value:
2876   * 	0 on success / other on failure
2877   **/
ipr_sdt_copy(struct ipr_ioa_cfg * ioa_cfg,unsigned long pci_address,u32 length)2878  static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2879  			unsigned long pci_address, u32 length)
2880  {
2881  	int bytes_copied = 0;
2882  	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
2883  	__be32 *page;
2884  	unsigned long lock_flags = 0;
2885  	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2886  
2887  	if (ioa_cfg->sis64)
2888  		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2889  	else
2890  		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2891  
2892  	while (bytes_copied < length &&
2893  	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
2894  		if (ioa_dump->page_offset >= PAGE_SIZE ||
2895  		    ioa_dump->page_offset == 0) {
2896  			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2897  
2898  			if (!page) {
2899  				ipr_trace;
2900  				return bytes_copied;
2901  			}
2902  
2903  			ioa_dump->page_offset = 0;
2904  			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2905  			ioa_dump->next_page_index++;
2906  		} else
2907  			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2908  
2909  		rem_len = length - bytes_copied;
2910  		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2911  		cur_len = min(rem_len, rem_page_len);
2912  
2913  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2914  		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2915  			rc = -EIO;
2916  		} else {
2917  			rc = ipr_get_ldump_data_section(ioa_cfg,
2918  							pci_address + bytes_copied,
2919  							&page[ioa_dump->page_offset / 4],
2920  							(cur_len / sizeof(u32)));
2921  		}
2922  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2923  
2924  		if (!rc) {
2925  			ioa_dump->page_offset += cur_len;
2926  			bytes_copied += cur_len;
2927  		} else {
2928  			ipr_trace;
2929  			break;
2930  		}
2931  		schedule();
2932  	}
2933  
2934  	return bytes_copied;
2935  }
2936  
2937  /**
2938   * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2939   * @hdr:	dump entry header struct
2940   *
2941   * Return value:
2942   * 	nothing
2943   **/
ipr_init_dump_entry_hdr(struct ipr_dump_entry_header * hdr)2944  static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2945  {
2946  	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2947  	hdr->num_elems = 1;
2948  	hdr->offset = sizeof(*hdr);
2949  	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2950  }
2951  
2952  /**
2953   * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2954   * @ioa_cfg:	ioa config struct
2955   * @driver_dump:	driver dump struct
2956   *
2957   * Return value:
2958   * 	nothing
2959   **/
ipr_dump_ioa_type_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2960  static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2961  				   struct ipr_driver_dump *driver_dump)
2962  {
2963  	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2964  
2965  	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2966  	driver_dump->ioa_type_entry.hdr.len =
2967  		sizeof(struct ipr_dump_ioa_type_entry) -
2968  		sizeof(struct ipr_dump_entry_header);
2969  	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2970  	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2971  	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2972  	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2973  		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2974  		ucode_vpd->minor_release[1];
2975  	driver_dump->hdr.num_entries++;
2976  }
2977  
2978  /**
2979   * ipr_dump_version_data - Fill in the driver version in the dump.
2980   * @ioa_cfg:	ioa config struct
2981   * @driver_dump:	driver dump struct
2982   *
2983   * Return value:
2984   * 	nothing
2985   **/
ipr_dump_version_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)2986  static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2987  				  struct ipr_driver_dump *driver_dump)
2988  {
2989  	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2990  	driver_dump->version_entry.hdr.len =
2991  		sizeof(struct ipr_dump_version_entry) -
2992  		sizeof(struct ipr_dump_entry_header);
2993  	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2994  	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2995  	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2996  	driver_dump->hdr.num_entries++;
2997  }
2998  
2999  /**
3000   * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3001   * @ioa_cfg:	ioa config struct
3002   * @driver_dump:	driver dump struct
3003   *
3004   * Return value:
3005   * 	nothing
3006   **/
ipr_dump_trace_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)3007  static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3008  				   struct ipr_driver_dump *driver_dump)
3009  {
3010  	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3011  	driver_dump->trace_entry.hdr.len =
3012  		sizeof(struct ipr_dump_trace_entry) -
3013  		sizeof(struct ipr_dump_entry_header);
3014  	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3015  	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3016  	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3017  	driver_dump->hdr.num_entries++;
3018  }
3019  
3020  /**
3021   * ipr_dump_location_data - Fill in the IOA location in the dump.
3022   * @ioa_cfg:	ioa config struct
3023   * @driver_dump:	driver dump struct
3024   *
3025   * Return value:
3026   * 	nothing
3027   **/
ipr_dump_location_data(struct ipr_ioa_cfg * ioa_cfg,struct ipr_driver_dump * driver_dump)3028  static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3029  				   struct ipr_driver_dump *driver_dump)
3030  {
3031  	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3032  	driver_dump->location_entry.hdr.len =
3033  		sizeof(struct ipr_dump_location_entry) -
3034  		sizeof(struct ipr_dump_entry_header);
3035  	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3036  	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
3037  	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
3038  	driver_dump->hdr.num_entries++;
3039  }
3040  
3041  /**
3042   * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3043   * @ioa_cfg:	ioa config struct
3044   * @dump:		dump struct
3045   *
3046   * Return value:
3047   * 	nothing
3048   **/
ipr_get_ioa_dump(struct ipr_ioa_cfg * ioa_cfg,struct ipr_dump * dump)3049  static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3050  {
3051  	unsigned long start_addr, sdt_word;
3052  	unsigned long lock_flags = 0;
3053  	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3054  	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
3055  	u32 num_entries, max_num_entries, start_off, end_off;
3056  	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
3057  	struct ipr_sdt *sdt;
3058  	int valid = 1;
3059  	int i;
3060  
3061  	ENTER;
3062  
3063  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3064  
3065  	if (ioa_cfg->sdt_state != READ_DUMP) {
3066  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3067  		return;
3068  	}
3069  
3070  	if (ioa_cfg->sis64) {
3071  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3072  		ssleep(IPR_DUMP_DELAY_SECONDS);
3073  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3074  	}
3075  
3076  	start_addr = readl(ioa_cfg->ioa_mailbox);
3077  
3078  	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
3079  		dev_err(&ioa_cfg->pdev->dev,
3080  			"Invalid dump table format: %lx\n", start_addr);
3081  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3082  		return;
3083  	}
3084  
3085  	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3086  
3087  	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3088  
3089  	/* Initialize the overall dump header */
3090  	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3091  	driver_dump->hdr.num_entries = 1;
3092  	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3093  	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3094  	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3095  	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3096  
3097  	ipr_dump_version_data(ioa_cfg, driver_dump);
3098  	ipr_dump_location_data(ioa_cfg, driver_dump);
3099  	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3100  	ipr_dump_trace_data(ioa_cfg, driver_dump);
3101  
3102  	/* Update dump_header */
3103  	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3104  
3105  	/* IOA Dump entry */
3106  	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
3107  	ioa_dump->hdr.len = 0;
3108  	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3109  	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3110  
3111  	/* First entries in sdt are actually a list of dump addresses and
3112  	 lengths to gather the real dump data.  sdt represents the pointer
3113  	 to the ioa generated dump table.  Dump data will be extracted based
3114  	 on entries in this table */
3115  	sdt = &ioa_dump->sdt;
3116  
3117  	if (ioa_cfg->sis64) {
3118  		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3119  		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3120  	} else {
3121  		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3122  		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3123  	}
3124  
3125  	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3126  			(max_num_entries * sizeof(struct ipr_sdt_entry));
3127  	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
3128  					bytes_to_copy / sizeof(__be32));
3129  
3130  	/* Smart Dump table is ready to use and the first entry is valid */
3131  	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3132  	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
3133  		dev_err(&ioa_cfg->pdev->dev,
3134  			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
3135  			rc, be32_to_cpu(sdt->hdr.state));
3136  		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3137  		ioa_cfg->sdt_state = DUMP_OBTAINED;
3138  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3139  		return;
3140  	}
3141  
3142  	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3143  
3144  	if (num_entries > max_num_entries)
3145  		num_entries = max_num_entries;
3146  
3147  	/* Update dump length to the actual data to be copied */
3148  	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3149  	if (ioa_cfg->sis64)
3150  		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3151  	else
3152  		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
3153  
3154  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3155  
3156  	for (i = 0; i < num_entries; i++) {
3157  		if (ioa_dump->hdr.len > max_dump_size) {
3158  			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3159  			break;
3160  		}
3161  
3162  		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
3163  			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3164  			if (ioa_cfg->sis64)
3165  				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3166  			else {
3167  				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3168  				end_off = be32_to_cpu(sdt->entry[i].end_token);
3169  
3170  				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3171  					bytes_to_copy = end_off - start_off;
3172  				else
3173  					valid = 0;
3174  			}
3175  			if (valid) {
3176  				if (bytes_to_copy > max_dump_size) {
3177  					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3178  					continue;
3179  				}
3180  
3181  				/* Copy data from adapter to driver buffers */
3182  				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3183  							    bytes_to_copy);
3184  
3185  				ioa_dump->hdr.len += bytes_copied;
3186  
3187  				if (bytes_copied != bytes_to_copy) {
3188  					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3189  					break;
3190  				}
3191  			}
3192  		}
3193  	}
3194  
3195  	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3196  
3197  	/* Update dump_header */
3198  	driver_dump->hdr.len += ioa_dump->hdr.len;
3199  	wmb();
3200  	ioa_cfg->sdt_state = DUMP_OBTAINED;
3201  	LEAVE;
3202  }
3203  
3204  #else
3205  #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3206  #endif
3207  
3208  /**
3209   * ipr_release_dump - Free adapter dump memory
3210   * @kref:	kref struct
3211   *
3212   * Return value:
3213   *	nothing
3214   **/
ipr_release_dump(struct kref * kref)3215  static void ipr_release_dump(struct kref *kref)
3216  {
3217  	struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
3218  	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3219  	unsigned long lock_flags = 0;
3220  	int i;
3221  
3222  	ENTER;
3223  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3224  	ioa_cfg->dump = NULL;
3225  	ioa_cfg->sdt_state = INACTIVE;
3226  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3227  
3228  	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3229  		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3230  
3231  	vfree(dump->ioa_dump.ioa_data);
3232  	kfree(dump);
3233  	LEAVE;
3234  }
3235  
ipr_add_remove_thread(struct work_struct * work)3236  static void ipr_add_remove_thread(struct work_struct *work)
3237  {
3238  	unsigned long lock_flags;
3239  	struct ipr_resource_entry *res;
3240  	struct scsi_device *sdev;
3241  	struct ipr_ioa_cfg *ioa_cfg =
3242  		container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3243  	u8 bus, target, lun;
3244  	int did_work;
3245  
3246  	ENTER;
3247  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3248  
3249  restart:
3250  	do {
3251  		did_work = 0;
3252  		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3253  			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3254  			return;
3255  		}
3256  
3257  		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3258  			if (res->del_from_ml && res->sdev) {
3259  				did_work = 1;
3260  				sdev = res->sdev;
3261  				if (!scsi_device_get(sdev)) {
3262  					if (!res->add_to_ml)
3263  						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3264  					else
3265  						res->del_from_ml = 0;
3266  					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267  					scsi_remove_device(sdev);
3268  					scsi_device_put(sdev);
3269  					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3270  				}
3271  				break;
3272  			}
3273  		}
3274  	} while (did_work);
3275  
3276  	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3277  		if (res->add_to_ml) {
3278  			bus = res->bus;
3279  			target = res->target;
3280  			lun = res->lun;
3281  			res->add_to_ml = 0;
3282  			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3283  			scsi_add_device(ioa_cfg->host, bus, target, lun);
3284  			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3285  			goto restart;
3286  		}
3287  	}
3288  
3289  	ioa_cfg->scan_done = 1;
3290  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3291  	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3292  	LEAVE;
3293  }
3294  
3295  /**
3296   * ipr_worker_thread - Worker thread
3297   * @work:		ioa config struct
3298   *
3299   * Called at task level from a work thread. This function takes care
3300   * of adding and removing device from the mid-layer as configuration
3301   * changes are detected by the adapter.
3302   *
3303   * Return value:
3304   * 	nothing
3305   **/
ipr_worker_thread(struct work_struct * work)3306  static void ipr_worker_thread(struct work_struct *work)
3307  {
3308  	unsigned long lock_flags;
3309  	struct ipr_dump *dump;
3310  	struct ipr_ioa_cfg *ioa_cfg =
3311  		container_of(work, struct ipr_ioa_cfg, work_q);
3312  
3313  	ENTER;
3314  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3315  
3316  	if (ioa_cfg->sdt_state == READ_DUMP) {
3317  		dump = ioa_cfg->dump;
3318  		if (!dump) {
3319  			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3320  			return;
3321  		}
3322  		kref_get(&dump->kref);
3323  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3324  		ipr_get_ioa_dump(ioa_cfg, dump);
3325  		kref_put(&dump->kref, ipr_release_dump);
3326  
3327  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3328  		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3329  			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3330  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3331  		return;
3332  	}
3333  
3334  	if (ioa_cfg->scsi_unblock) {
3335  		ioa_cfg->scsi_unblock = 0;
3336  		ioa_cfg->scsi_blocked = 0;
3337  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3338  		scsi_unblock_requests(ioa_cfg->host);
3339  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3340  		if (ioa_cfg->scsi_blocked)
3341  			scsi_block_requests(ioa_cfg->host);
3342  	}
3343  
3344  	if (!ioa_cfg->scan_enabled) {
3345  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3346  		return;
3347  	}
3348  
3349  	schedule_work(&ioa_cfg->scsi_add_work_q);
3350  
3351  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352  	LEAVE;
3353  }
3354  
3355  #ifdef CONFIG_SCSI_IPR_TRACE
3356  /**
3357   * ipr_read_trace - Dump the adapter trace
3358   * @filp:		open sysfs file
3359   * @kobj:		kobject struct
3360   * @bin_attr:		bin_attribute struct
3361   * @buf:		buffer
3362   * @off:		offset
3363   * @count:		buffer size
3364   *
3365   * Return value:
3366   *	number of bytes printed to buffer
3367   **/
ipr_read_trace(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)3368  static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3369  			      struct bin_attribute *bin_attr,
3370  			      char *buf, loff_t off, size_t count)
3371  {
3372  	struct device *dev = kobj_to_dev(kobj);
3373  	struct Scsi_Host *shost = class_to_shost(dev);
3374  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3375  	unsigned long lock_flags = 0;
3376  	ssize_t ret;
3377  
3378  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3379  	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3380  				IPR_TRACE_SIZE);
3381  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3382  
3383  	return ret;
3384  }
3385  
3386  static struct bin_attribute ipr_trace_attr = {
3387  	.attr =	{
3388  		.name = "trace",
3389  		.mode = S_IRUGO,
3390  	},
3391  	.size = 0,
3392  	.read = ipr_read_trace,
3393  };
3394  #endif
3395  
3396  /**
3397   * ipr_show_fw_version - Show the firmware version
3398   * @dev:	class device struct
3399   * @attr:	device attribute (unused)
3400   * @buf:	buffer
3401   *
3402   * Return value:
3403   *	number of bytes printed to buffer
3404   **/
ipr_show_fw_version(struct device * dev,struct device_attribute * attr,char * buf)3405  static ssize_t ipr_show_fw_version(struct device *dev,
3406  				   struct device_attribute *attr, char *buf)
3407  {
3408  	struct Scsi_Host *shost = class_to_shost(dev);
3409  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3410  	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3411  	unsigned long lock_flags = 0;
3412  	int len;
3413  
3414  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3415  	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3416  		       ucode_vpd->major_release, ucode_vpd->card_type,
3417  		       ucode_vpd->minor_release[0],
3418  		       ucode_vpd->minor_release[1]);
3419  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3420  	return len;
3421  }
3422  
3423  static struct device_attribute ipr_fw_version_attr = {
3424  	.attr = {
3425  		.name =		"fw_version",
3426  		.mode =		S_IRUGO,
3427  	},
3428  	.show = ipr_show_fw_version,
3429  };
3430  
3431  /**
3432   * ipr_show_log_level - Show the adapter's error logging level
3433   * @dev:	class device struct
3434   * @attr:	device attribute (unused)
3435   * @buf:	buffer
3436   *
3437   * Return value:
3438   * 	number of bytes printed to buffer
3439   **/
ipr_show_log_level(struct device * dev,struct device_attribute * attr,char * buf)3440  static ssize_t ipr_show_log_level(struct device *dev,
3441  				   struct device_attribute *attr, char *buf)
3442  {
3443  	struct Scsi_Host *shost = class_to_shost(dev);
3444  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3445  	unsigned long lock_flags = 0;
3446  	int len;
3447  
3448  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3449  	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3450  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3451  	return len;
3452  }
3453  
3454  /**
3455   * ipr_store_log_level - Change the adapter's error logging level
3456   * @dev:	class device struct
3457   * @attr:	device attribute (unused)
3458   * @buf:	buffer
3459   * @count:	buffer size
3460   *
3461   * Return value:
3462   * 	number of bytes printed to buffer
3463   **/
ipr_store_log_level(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3464  static ssize_t ipr_store_log_level(struct device *dev,
3465  				   struct device_attribute *attr,
3466  				   const char *buf, size_t count)
3467  {
3468  	struct Scsi_Host *shost = class_to_shost(dev);
3469  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3470  	unsigned long lock_flags = 0;
3471  
3472  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3473  	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3474  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3475  	return strlen(buf);
3476  }
3477  
3478  static struct device_attribute ipr_log_level_attr = {
3479  	.attr = {
3480  		.name =		"log_level",
3481  		.mode =		S_IRUGO | S_IWUSR,
3482  	},
3483  	.show = ipr_show_log_level,
3484  	.store = ipr_store_log_level
3485  };
3486  
3487  /**
3488   * ipr_store_diagnostics - IOA Diagnostics interface
3489   * @dev:	device struct
3490   * @attr:	device attribute (unused)
3491   * @buf:	buffer
3492   * @count:	buffer size
3493   *
3494   * This function will reset the adapter and wait a reasonable
3495   * amount of time for any errors that the adapter might log.
3496   *
3497   * Return value:
3498   * 	count on success / other on failure
3499   **/
ipr_store_diagnostics(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3500  static ssize_t ipr_store_diagnostics(struct device *dev,
3501  				     struct device_attribute *attr,
3502  				     const char *buf, size_t count)
3503  {
3504  	struct Scsi_Host *shost = class_to_shost(dev);
3505  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3506  	unsigned long lock_flags = 0;
3507  	int rc = count;
3508  
3509  	if (!capable(CAP_SYS_ADMIN))
3510  		return -EACCES;
3511  
3512  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3513  	while (ioa_cfg->in_reset_reload) {
3514  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3515  		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3516  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3517  	}
3518  
3519  	ioa_cfg->errors_logged = 0;
3520  	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3521  
3522  	if (ioa_cfg->in_reset_reload) {
3523  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3524  		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3525  
3526  		/* Wait for a second for any errors to be logged */
3527  		msleep(1000);
3528  	} else {
3529  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3530  		return -EIO;
3531  	}
3532  
3533  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3534  	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3535  		rc = -EIO;
3536  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3537  
3538  	return rc;
3539  }
3540  
3541  static struct device_attribute ipr_diagnostics_attr = {
3542  	.attr = {
3543  		.name =		"run_diagnostics",
3544  		.mode =		S_IWUSR,
3545  	},
3546  	.store = ipr_store_diagnostics
3547  };
3548  
3549  /**
3550   * ipr_show_adapter_state - Show the adapter's state
3551   * @dev:	device struct
3552   * @attr:	device attribute (unused)
3553   * @buf:	buffer
3554   *
3555   * Return value:
3556   * 	number of bytes printed to buffer
3557   **/
ipr_show_adapter_state(struct device * dev,struct device_attribute * attr,char * buf)3558  static ssize_t ipr_show_adapter_state(struct device *dev,
3559  				      struct device_attribute *attr, char *buf)
3560  {
3561  	struct Scsi_Host *shost = class_to_shost(dev);
3562  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3563  	unsigned long lock_flags = 0;
3564  	int len;
3565  
3566  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3567  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
3568  		len = snprintf(buf, PAGE_SIZE, "offline\n");
3569  	else
3570  		len = snprintf(buf, PAGE_SIZE, "online\n");
3571  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3572  	return len;
3573  }
3574  
3575  /**
3576   * ipr_store_adapter_state - Change adapter state
3577   * @dev:	device struct
3578   * @attr:	device attribute (unused)
3579   * @buf:	buffer
3580   * @count:	buffer size
3581   *
3582   * This function will change the adapter's state.
3583   *
3584   * Return value:
3585   * 	count on success / other on failure
3586   **/
ipr_store_adapter_state(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3587  static ssize_t ipr_store_adapter_state(struct device *dev,
3588  				       struct device_attribute *attr,
3589  				       const char *buf, size_t count)
3590  {
3591  	struct Scsi_Host *shost = class_to_shost(dev);
3592  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3593  	unsigned long lock_flags;
3594  	int result = count, i;
3595  
3596  	if (!capable(CAP_SYS_ADMIN))
3597  		return -EACCES;
3598  
3599  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3600  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3601  	    !strncmp(buf, "online", 6)) {
3602  		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3603  			spin_lock(&ioa_cfg->hrrq[i]._lock);
3604  			ioa_cfg->hrrq[i].ioa_is_dead = 0;
3605  			spin_unlock(&ioa_cfg->hrrq[i]._lock);
3606  		}
3607  		wmb();
3608  		ioa_cfg->reset_retries = 0;
3609  		ioa_cfg->in_ioa_bringdown = 0;
3610  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3611  	}
3612  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3613  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3614  
3615  	return result;
3616  }
3617  
3618  static struct device_attribute ipr_ioa_state_attr = {
3619  	.attr = {
3620  		.name =		"online_state",
3621  		.mode =		S_IRUGO | S_IWUSR,
3622  	},
3623  	.show = ipr_show_adapter_state,
3624  	.store = ipr_store_adapter_state
3625  };
3626  
3627  /**
3628   * ipr_store_reset_adapter - Reset the adapter
3629   * @dev:	device struct
3630   * @attr:	device attribute (unused)
3631   * @buf:	buffer
3632   * @count:	buffer size
3633   *
3634   * This function will reset the adapter.
3635   *
3636   * Return value:
3637   * 	count on success / other on failure
3638   **/
ipr_store_reset_adapter(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3639  static ssize_t ipr_store_reset_adapter(struct device *dev,
3640  				       struct device_attribute *attr,
3641  				       const char *buf, size_t count)
3642  {
3643  	struct Scsi_Host *shost = class_to_shost(dev);
3644  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3645  	unsigned long lock_flags;
3646  	int result = count;
3647  
3648  	if (!capable(CAP_SYS_ADMIN))
3649  		return -EACCES;
3650  
3651  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3652  	if (!ioa_cfg->in_reset_reload)
3653  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3654  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3655  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3656  
3657  	return result;
3658  }
3659  
3660  static struct device_attribute ipr_ioa_reset_attr = {
3661  	.attr = {
3662  		.name =		"reset_host",
3663  		.mode =		S_IWUSR,
3664  	},
3665  	.store = ipr_store_reset_adapter
3666  };
3667  
3668  static int ipr_iopoll(struct irq_poll *iop, int budget);
3669   /**
3670   * ipr_show_iopoll_weight - Show ipr polling mode
3671   * @dev:	class device struct
3672   * @attr:	device attribute (unused)
3673   * @buf:	buffer
3674   *
3675   * Return value:
3676   *	number of bytes printed to buffer
3677   **/
ipr_show_iopoll_weight(struct device * dev,struct device_attribute * attr,char * buf)3678  static ssize_t ipr_show_iopoll_weight(struct device *dev,
3679  				   struct device_attribute *attr, char *buf)
3680  {
3681  	struct Scsi_Host *shost = class_to_shost(dev);
3682  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3683  	unsigned long lock_flags = 0;
3684  	int len;
3685  
3686  	spin_lock_irqsave(shost->host_lock, lock_flags);
3687  	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3688  	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3689  
3690  	return len;
3691  }
3692  
3693  /**
3694   * ipr_store_iopoll_weight - Change the adapter's polling mode
3695   * @dev:	class device struct
3696   * @attr:	device attribute (unused)
3697   * @buf:	buffer
3698   * @count:	buffer size
3699   *
3700   * Return value:
3701   *	number of bytes printed to buffer
3702   **/
ipr_store_iopoll_weight(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3703  static ssize_t ipr_store_iopoll_weight(struct device *dev,
3704  					struct device_attribute *attr,
3705  					const char *buf, size_t count)
3706  {
3707  	struct Scsi_Host *shost = class_to_shost(dev);
3708  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3709  	unsigned long user_iopoll_weight;
3710  	unsigned long lock_flags = 0;
3711  	int i;
3712  
3713  	if (!ioa_cfg->sis64) {
3714  		dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
3715  		return -EINVAL;
3716  	}
3717  	if (kstrtoul(buf, 10, &user_iopoll_weight))
3718  		return -EINVAL;
3719  
3720  	if (user_iopoll_weight > 256) {
3721  		dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
3722  		return -EINVAL;
3723  	}
3724  
3725  	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
3726  		dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
3727  		return strlen(buf);
3728  	}
3729  
3730  	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3731  		for (i = 1; i < ioa_cfg->hrrq_num; i++)
3732  			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
3733  	}
3734  
3735  	spin_lock_irqsave(shost->host_lock, lock_flags);
3736  	ioa_cfg->iopoll_weight = user_iopoll_weight;
3737  	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
3738  		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
3739  			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
3740  					ioa_cfg->iopoll_weight, ipr_iopoll);
3741  		}
3742  	}
3743  	spin_unlock_irqrestore(shost->host_lock, lock_flags);
3744  
3745  	return strlen(buf);
3746  }
3747  
3748  static struct device_attribute ipr_iopoll_weight_attr = {
3749  	.attr = {
3750  		.name =		"iopoll_weight",
3751  		.mode =		S_IRUGO | S_IWUSR,
3752  	},
3753  	.show = ipr_show_iopoll_weight,
3754  	.store = ipr_store_iopoll_weight
3755  };
3756  
3757  /**
3758   * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3759   * @buf_len:		buffer length
3760   *
3761   * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3762   * list to use for microcode download
3763   *
3764   * Return value:
3765   * 	pointer to sglist / NULL on failure
3766   **/
ipr_alloc_ucode_buffer(int buf_len)3767  static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3768  {
3769  	int sg_size, order;
3770  	struct ipr_sglist *sglist;
3771  
3772  	/* Get the minimum size per scatter/gather element */
3773  	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3774  
3775  	/* Get the actual size per element */
3776  	order = get_order(sg_size);
3777  
3778  	/* Allocate a scatter/gather list for the DMA */
3779  	sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
3780  	if (sglist == NULL) {
3781  		ipr_trace;
3782  		return NULL;
3783  	}
3784  	sglist->order = order;
3785  	sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3786  					      &sglist->num_sg);
3787  	if (!sglist->scatterlist) {
3788  		kfree(sglist);
3789  		return NULL;
3790  	}
3791  
3792  	return sglist;
3793  }
3794  
3795  /**
3796   * ipr_free_ucode_buffer - Frees a microcode download buffer
3797   * @sglist:		scatter/gather list pointer
3798   *
3799   * Free a DMA'able ucode download buffer previously allocated with
3800   * ipr_alloc_ucode_buffer
3801   *
3802   * Return value:
3803   * 	nothing
3804   **/
ipr_free_ucode_buffer(struct ipr_sglist * sglist)3805  static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3806  {
3807  	sgl_free_order(sglist->scatterlist, sglist->order);
3808  	kfree(sglist);
3809  }
3810  
3811  /**
3812   * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3813   * @sglist:		scatter/gather list pointer
3814   * @buffer:		buffer pointer
3815   * @len:		buffer length
3816   *
3817   * Copy a microcode image from a user buffer into a buffer allocated by
3818   * ipr_alloc_ucode_buffer
3819   *
3820   * Return value:
3821   * 	0 on success / other on failure
3822   **/
ipr_copy_ucode_buffer(struct ipr_sglist * sglist,u8 * buffer,u32 len)3823  static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3824  				 u8 *buffer, u32 len)
3825  {
3826  	int bsize_elem, i, result = 0;
3827  	struct scatterlist *sg;
3828  
3829  	/* Determine the actual number of bytes per element */
3830  	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3831  
3832  	sg = sglist->scatterlist;
3833  
3834  	for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
3835  			buffer += bsize_elem) {
3836  		struct page *page = sg_page(sg);
3837  
3838  		memcpy_to_page(page, 0, buffer, bsize_elem);
3839  
3840  		sg->length = bsize_elem;
3841  
3842  		if (result != 0) {
3843  			ipr_trace;
3844  			return result;
3845  		}
3846  	}
3847  
3848  	if (len % bsize_elem) {
3849  		struct page *page = sg_page(sg);
3850  
3851  		memcpy_to_page(page, 0, buffer, len % bsize_elem);
3852  
3853  		sg->length = len % bsize_elem;
3854  	}
3855  
3856  	sglist->buffer_len = len;
3857  	return result;
3858  }
3859  
3860  /**
3861   * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3862   * @ipr_cmd:		ipr command struct
3863   * @sglist:		scatter/gather list
3864   *
3865   * Builds a microcode download IOA data list (IOADL).
3866   *
3867   **/
ipr_build_ucode_ioadl64(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3868  static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3869  				    struct ipr_sglist *sglist)
3870  {
3871  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3872  	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3873  	struct scatterlist *scatterlist = sglist->scatterlist;
3874  	struct scatterlist *sg;
3875  	int i;
3876  
3877  	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3878  	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3879  	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3880  
3881  	ioarcb->ioadl_len =
3882  		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3883  	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3884  		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3885  		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
3886  		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
3887  	}
3888  
3889  	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3890  }
3891  
3892  /**
3893   * ipr_build_ucode_ioadl - Build a microcode download IOADL
3894   * @ipr_cmd:	ipr command struct
3895   * @sglist:		scatter/gather list
3896   *
3897   * Builds a microcode download IOA data list (IOADL).
3898   *
3899   **/
ipr_build_ucode_ioadl(struct ipr_cmnd * ipr_cmd,struct ipr_sglist * sglist)3900  static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3901  				  struct ipr_sglist *sglist)
3902  {
3903  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3904  	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3905  	struct scatterlist *scatterlist = sglist->scatterlist;
3906  	struct scatterlist *sg;
3907  	int i;
3908  
3909  	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3910  	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3911  	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3912  
3913  	ioarcb->ioadl_len =
3914  		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3915  
3916  	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
3917  		ioadl[i].flags_and_data_len =
3918  			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
3919  		ioadl[i].address =
3920  			cpu_to_be32(sg_dma_address(sg));
3921  	}
3922  
3923  	ioadl[i-1].flags_and_data_len |=
3924  		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3925  }
3926  
3927  /**
3928   * ipr_update_ioa_ucode - Update IOA's microcode
3929   * @ioa_cfg:	ioa config struct
3930   * @sglist:		scatter/gather list
3931   *
3932   * Initiate an adapter reset to update the IOA's microcode
3933   *
3934   * Return value:
3935   * 	0 on success / -EIO on failure
3936   **/
ipr_update_ioa_ucode(struct ipr_ioa_cfg * ioa_cfg,struct ipr_sglist * sglist)3937  static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3938  				struct ipr_sglist *sglist)
3939  {
3940  	unsigned long lock_flags;
3941  
3942  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3943  	while (ioa_cfg->in_reset_reload) {
3944  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3945  		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3946  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3947  	}
3948  
3949  	if (ioa_cfg->ucode_sglist) {
3950  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3951  		dev_err(&ioa_cfg->pdev->dev,
3952  			"Microcode download already in progress\n");
3953  		return -EIO;
3954  	}
3955  
3956  	sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
3957  					sglist->scatterlist, sglist->num_sg,
3958  					DMA_TO_DEVICE);
3959  
3960  	if (!sglist->num_dma_sg) {
3961  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3962  		dev_err(&ioa_cfg->pdev->dev,
3963  			"Failed to map microcode download buffer!\n");
3964  		return -EIO;
3965  	}
3966  
3967  	ioa_cfg->ucode_sglist = sglist;
3968  	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3969  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3970  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3971  
3972  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3973  	ioa_cfg->ucode_sglist = NULL;
3974  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3975  	return 0;
3976  }
3977  
3978  /**
3979   * ipr_store_update_fw - Update the firmware on the adapter
3980   * @dev:	device struct
3981   * @attr:	device attribute (unused)
3982   * @buf:	buffer
3983   * @count:	buffer size
3984   *
3985   * This function will update the firmware on the adapter.
3986   *
3987   * Return value:
3988   * 	count on success / other on failure
3989   **/
ipr_store_update_fw(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)3990  static ssize_t ipr_store_update_fw(struct device *dev,
3991  				   struct device_attribute *attr,
3992  				   const char *buf, size_t count)
3993  {
3994  	struct Scsi_Host *shost = class_to_shost(dev);
3995  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3996  	struct ipr_ucode_image_header *image_hdr;
3997  	const struct firmware *fw_entry;
3998  	struct ipr_sglist *sglist;
3999  	char fname[100];
4000  	char *src;
4001  	char *endline;
4002  	int result, dnld_size;
4003  
4004  	if (!capable(CAP_SYS_ADMIN))
4005  		return -EACCES;
4006  
4007  	snprintf(fname, sizeof(fname), "%s", buf);
4008  
4009  	endline = strchr(fname, '\n');
4010  	if (endline)
4011  		*endline = '\0';
4012  
4013  	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4014  		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4015  		return -EIO;
4016  	}
4017  
4018  	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4019  
4020  	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4021  	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4022  	sglist = ipr_alloc_ucode_buffer(dnld_size);
4023  
4024  	if (!sglist) {
4025  		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4026  		release_firmware(fw_entry);
4027  		return -ENOMEM;
4028  	}
4029  
4030  	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4031  
4032  	if (result) {
4033  		dev_err(&ioa_cfg->pdev->dev,
4034  			"Microcode buffer copy to DMA buffer failed\n");
4035  		goto out;
4036  	}
4037  
4038  	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
4039  
4040  	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
4041  
4042  	if (!result)
4043  		result = count;
4044  out:
4045  	ipr_free_ucode_buffer(sglist);
4046  	release_firmware(fw_entry);
4047  	return result;
4048  }
4049  
4050  static struct device_attribute ipr_update_fw_attr = {
4051  	.attr = {
4052  		.name =		"update_fw",
4053  		.mode =		S_IWUSR,
4054  	},
4055  	.store = ipr_store_update_fw
4056  };
4057  
4058  /**
4059   * ipr_show_fw_type - Show the adapter's firmware type.
4060   * @dev:	class device struct
4061   * @attr:	device attribute (unused)
4062   * @buf:	buffer
4063   *
4064   * Return value:
4065   *	number of bytes printed to buffer
4066   **/
ipr_show_fw_type(struct device * dev,struct device_attribute * attr,char * buf)4067  static ssize_t ipr_show_fw_type(struct device *dev,
4068  				struct device_attribute *attr, char *buf)
4069  {
4070  	struct Scsi_Host *shost = class_to_shost(dev);
4071  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4072  	unsigned long lock_flags = 0;
4073  	int len;
4074  
4075  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4076  	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4077  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4078  	return len;
4079  }
4080  
4081  static struct device_attribute ipr_ioa_fw_type_attr = {
4082  	.attr = {
4083  		.name =		"fw_type",
4084  		.mode =		S_IRUGO,
4085  	},
4086  	.show = ipr_show_fw_type
4087  };
4088  
ipr_read_async_err_log(struct file * filep,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4089  static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4090  				struct bin_attribute *bin_attr, char *buf,
4091  				loff_t off, size_t count)
4092  {
4093  	struct device *cdev = kobj_to_dev(kobj);
4094  	struct Scsi_Host *shost = class_to_shost(cdev);
4095  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4096  	struct ipr_hostrcb *hostrcb;
4097  	unsigned long lock_flags = 0;
4098  	int ret;
4099  
4100  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4101  	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4102  					struct ipr_hostrcb, queue);
4103  	if (!hostrcb) {
4104  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4105  		return 0;
4106  	}
4107  	ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4108  				sizeof(hostrcb->hcam));
4109  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4110  	return ret;
4111  }
4112  
ipr_next_async_err_log(struct file * filep,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4113  static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4114  				struct bin_attribute *bin_attr, char *buf,
4115  				loff_t off, size_t count)
4116  {
4117  	struct device *cdev = kobj_to_dev(kobj);
4118  	struct Scsi_Host *shost = class_to_shost(cdev);
4119  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4120  	struct ipr_hostrcb *hostrcb;
4121  	unsigned long lock_flags = 0;
4122  
4123  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4124  	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4125  					struct ipr_hostrcb, queue);
4126  	if (!hostrcb) {
4127  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128  		return count;
4129  	}
4130  
4131  	/* Reclaim hostrcb before exit */
4132  	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4133  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4134  	return count;
4135  }
4136  
4137  static struct bin_attribute ipr_ioa_async_err_log = {
4138  	.attr = {
4139  		.name =		"async_err_log",
4140  		.mode =		S_IRUGO | S_IWUSR,
4141  	},
4142  	.size = 0,
4143  	.read = ipr_read_async_err_log,
4144  	.write = ipr_next_async_err_log
4145  };
4146  
4147  static struct attribute *ipr_ioa_attrs[] = {
4148  	&ipr_fw_version_attr.attr,
4149  	&ipr_log_level_attr.attr,
4150  	&ipr_diagnostics_attr.attr,
4151  	&ipr_ioa_state_attr.attr,
4152  	&ipr_ioa_reset_attr.attr,
4153  	&ipr_update_fw_attr.attr,
4154  	&ipr_ioa_fw_type_attr.attr,
4155  	&ipr_iopoll_weight_attr.attr,
4156  	NULL,
4157  };
4158  
4159  ATTRIBUTE_GROUPS(ipr_ioa);
4160  
4161  #ifdef CONFIG_SCSI_IPR_DUMP
4162  /**
4163   * ipr_read_dump - Dump the adapter
4164   * @filp:		open sysfs file
4165   * @kobj:		kobject struct
4166   * @bin_attr:		bin_attribute struct
4167   * @buf:		buffer
4168   * @off:		offset
4169   * @count:		buffer size
4170   *
4171   * Return value:
4172   *	number of bytes printed to buffer
4173   **/
ipr_read_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4174  static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
4175  			     struct bin_attribute *bin_attr,
4176  			     char *buf, loff_t off, size_t count)
4177  {
4178  	struct device *cdev = kobj_to_dev(kobj);
4179  	struct Scsi_Host *shost = class_to_shost(cdev);
4180  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4181  	struct ipr_dump *dump;
4182  	unsigned long lock_flags = 0;
4183  	char *src;
4184  	int len, sdt_end;
4185  	size_t rc = count;
4186  
4187  	if (!capable(CAP_SYS_ADMIN))
4188  		return -EACCES;
4189  
4190  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4191  	dump = ioa_cfg->dump;
4192  
4193  	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4194  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4195  		return 0;
4196  	}
4197  	kref_get(&dump->kref);
4198  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4199  
4200  	if (off > dump->driver_dump.hdr.len) {
4201  		kref_put(&dump->kref, ipr_release_dump);
4202  		return 0;
4203  	}
4204  
4205  	if (off + count > dump->driver_dump.hdr.len) {
4206  		count = dump->driver_dump.hdr.len - off;
4207  		rc = count;
4208  	}
4209  
4210  	if (count && off < sizeof(dump->driver_dump)) {
4211  		if (off + count > sizeof(dump->driver_dump))
4212  			len = sizeof(dump->driver_dump) - off;
4213  		else
4214  			len = count;
4215  		src = (u8 *)&dump->driver_dump + off;
4216  		memcpy(buf, src, len);
4217  		buf += len;
4218  		off += len;
4219  		count -= len;
4220  	}
4221  
4222  	off -= sizeof(dump->driver_dump);
4223  
4224  	if (ioa_cfg->sis64)
4225  		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4226  			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4227  			   sizeof(struct ipr_sdt_entry));
4228  	else
4229  		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4230  			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4231  
4232  	if (count && off < sdt_end) {
4233  		if (off + count > sdt_end)
4234  			len = sdt_end - off;
4235  		else
4236  			len = count;
4237  		src = (u8 *)&dump->ioa_dump + off;
4238  		memcpy(buf, src, len);
4239  		buf += len;
4240  		off += len;
4241  		count -= len;
4242  	}
4243  
4244  	off -= sdt_end;
4245  
4246  	while (count) {
4247  		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4248  			len = PAGE_ALIGN(off) - off;
4249  		else
4250  			len = count;
4251  		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4252  		src += off & ~PAGE_MASK;
4253  		memcpy(buf, src, len);
4254  		buf += len;
4255  		off += len;
4256  		count -= len;
4257  	}
4258  
4259  	kref_put(&dump->kref, ipr_release_dump);
4260  	return rc;
4261  }
4262  
4263  /**
4264   * ipr_alloc_dump - Prepare for adapter dump
4265   * @ioa_cfg:	ioa config struct
4266   *
4267   * Return value:
4268   *	0 on success / other on failure
4269   **/
ipr_alloc_dump(struct ipr_ioa_cfg * ioa_cfg)4270  static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4271  {
4272  	struct ipr_dump *dump;
4273  	__be32 **ioa_data;
4274  	unsigned long lock_flags = 0;
4275  
4276  	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
4277  
4278  	if (!dump) {
4279  		ipr_err("Dump memory allocation failed\n");
4280  		return -ENOMEM;
4281  	}
4282  
4283  	if (ioa_cfg->sis64)
4284  		ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4285  					      sizeof(__be32 *)));
4286  	else
4287  		ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4288  					      sizeof(__be32 *)));
4289  
4290  	if (!ioa_data) {
4291  		ipr_err("Dump memory allocation failed\n");
4292  		kfree(dump);
4293  		return -ENOMEM;
4294  	}
4295  
4296  	dump->ioa_dump.ioa_data = ioa_data;
4297  
4298  	kref_init(&dump->kref);
4299  	dump->ioa_cfg = ioa_cfg;
4300  
4301  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4302  
4303  	if (INACTIVE != ioa_cfg->sdt_state) {
4304  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4305  		vfree(dump->ioa_dump.ioa_data);
4306  		kfree(dump);
4307  		return 0;
4308  	}
4309  
4310  	ioa_cfg->dump = dump;
4311  	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
4312  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
4313  		ioa_cfg->dump_taken = 1;
4314  		schedule_work(&ioa_cfg->work_q);
4315  	}
4316  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4317  
4318  	return 0;
4319  }
4320  
4321  /**
4322   * ipr_free_dump - Free adapter dump memory
4323   * @ioa_cfg:	ioa config struct
4324   *
4325   * Return value:
4326   *	0 on success / other on failure
4327   **/
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4328  static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4329  {
4330  	struct ipr_dump *dump;
4331  	unsigned long lock_flags = 0;
4332  
4333  	ENTER;
4334  
4335  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4336  	dump = ioa_cfg->dump;
4337  	if (!dump) {
4338  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4339  		return 0;
4340  	}
4341  
4342  	ioa_cfg->dump = NULL;
4343  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4344  
4345  	kref_put(&dump->kref, ipr_release_dump);
4346  
4347  	LEAVE;
4348  	return 0;
4349  }
4350  
4351  /**
4352   * ipr_write_dump - Setup dump state of adapter
4353   * @filp:		open sysfs file
4354   * @kobj:		kobject struct
4355   * @bin_attr:		bin_attribute struct
4356   * @buf:		buffer
4357   * @off:		offset
4358   * @count:		buffer size
4359   *
4360   * Return value:
4361   *	number of bytes printed to buffer
4362   **/
ipr_write_dump(struct file * filp,struct kobject * kobj,struct bin_attribute * bin_attr,char * buf,loff_t off,size_t count)4363  static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
4364  			      struct bin_attribute *bin_attr,
4365  			      char *buf, loff_t off, size_t count)
4366  {
4367  	struct device *cdev = kobj_to_dev(kobj);
4368  	struct Scsi_Host *shost = class_to_shost(cdev);
4369  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4370  	int rc;
4371  
4372  	if (!capable(CAP_SYS_ADMIN))
4373  		return -EACCES;
4374  
4375  	if (buf[0] == '1')
4376  		rc = ipr_alloc_dump(ioa_cfg);
4377  	else if (buf[0] == '0')
4378  		rc = ipr_free_dump(ioa_cfg);
4379  	else
4380  		return -EINVAL;
4381  
4382  	if (rc)
4383  		return rc;
4384  	else
4385  		return count;
4386  }
4387  
4388  static struct bin_attribute ipr_dump_attr = {
4389  	.attr =	{
4390  		.name = "dump",
4391  		.mode = S_IRUSR | S_IWUSR,
4392  	},
4393  	.size = 0,
4394  	.read = ipr_read_dump,
4395  	.write = ipr_write_dump
4396  };
4397  #else
ipr_free_dump(struct ipr_ioa_cfg * ioa_cfg)4398  static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4399  #endif
4400  
4401  /**
4402   * ipr_change_queue_depth - Change the device's queue depth
4403   * @sdev:	scsi device struct
4404   * @qdepth:	depth to set
4405   *
4406   * Return value:
4407   * 	actual depth set
4408   **/
ipr_change_queue_depth(struct scsi_device * sdev,int qdepth)4409  static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
4410  {
4411  	scsi_change_queue_depth(sdev, qdepth);
4412  	return sdev->queue_depth;
4413  }
4414  
4415  /**
4416   * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4417   * @dev:	device struct
4418   * @attr:	device attribute structure
4419   * @buf:	buffer
4420   *
4421   * Return value:
4422   * 	number of bytes printed to buffer
4423   **/
ipr_show_adapter_handle(struct device * dev,struct device_attribute * attr,char * buf)4424  static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4425  {
4426  	struct scsi_device *sdev = to_scsi_device(dev);
4427  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4428  	struct ipr_resource_entry *res;
4429  	unsigned long lock_flags = 0;
4430  	ssize_t len = -ENXIO;
4431  
4432  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4433  	res = (struct ipr_resource_entry *)sdev->hostdata;
4434  	if (res)
4435  		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4436  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4437  	return len;
4438  }
4439  
4440  static struct device_attribute ipr_adapter_handle_attr = {
4441  	.attr = {
4442  		.name = 	"adapter_handle",
4443  		.mode =		S_IRUSR,
4444  	},
4445  	.show = ipr_show_adapter_handle
4446  };
4447  
4448  /**
4449   * ipr_show_resource_path - Show the resource path or the resource address for
4450   *			    this device.
4451   * @dev:	device struct
4452   * @attr:	device attribute structure
4453   * @buf:	buffer
4454   *
4455   * Return value:
4456   * 	number of bytes printed to buffer
4457   **/
ipr_show_resource_path(struct device * dev,struct device_attribute * attr,char * buf)4458  static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4459  {
4460  	struct scsi_device *sdev = to_scsi_device(dev);
4461  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4462  	struct ipr_resource_entry *res;
4463  	unsigned long lock_flags = 0;
4464  	ssize_t len = -ENXIO;
4465  	char buffer[IPR_MAX_RES_PATH_LENGTH];
4466  
4467  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4468  	res = (struct ipr_resource_entry *)sdev->hostdata;
4469  	if (res && ioa_cfg->sis64)
4470  		len = snprintf(buf, PAGE_SIZE, "%s\n",
4471  			       __ipr_format_res_path(res->res_path, buffer,
4472  						     sizeof(buffer)));
4473  	else if (res)
4474  		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4475  			       res->bus, res->target, res->lun);
4476  
4477  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4478  	return len;
4479  }
4480  
4481  static struct device_attribute ipr_resource_path_attr = {
4482  	.attr = {
4483  		.name = 	"resource_path",
4484  		.mode =		S_IRUGO,
4485  	},
4486  	.show = ipr_show_resource_path
4487  };
4488  
4489  /**
4490   * ipr_show_device_id - Show the device_id for this device.
4491   * @dev:	device struct
4492   * @attr:	device attribute structure
4493   * @buf:	buffer
4494   *
4495   * Return value:
4496   *	number of bytes printed to buffer
4497   **/
ipr_show_device_id(struct device * dev,struct device_attribute * attr,char * buf)4498  static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4499  {
4500  	struct scsi_device *sdev = to_scsi_device(dev);
4501  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502  	struct ipr_resource_entry *res;
4503  	unsigned long lock_flags = 0;
4504  	ssize_t len = -ENXIO;
4505  
4506  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4507  	res = (struct ipr_resource_entry *)sdev->hostdata;
4508  	if (res && ioa_cfg->sis64)
4509  		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
4510  	else if (res)
4511  		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4512  
4513  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4514  	return len;
4515  }
4516  
4517  static struct device_attribute ipr_device_id_attr = {
4518  	.attr = {
4519  		.name =		"device_id",
4520  		.mode =		S_IRUGO,
4521  	},
4522  	.show = ipr_show_device_id
4523  };
4524  
4525  /**
4526   * ipr_show_resource_type - Show the resource type for this device.
4527   * @dev:	device struct
4528   * @attr:	device attribute structure
4529   * @buf:	buffer
4530   *
4531   * Return value:
4532   *	number of bytes printed to buffer
4533   **/
ipr_show_resource_type(struct device * dev,struct device_attribute * attr,char * buf)4534  static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4535  {
4536  	struct scsi_device *sdev = to_scsi_device(dev);
4537  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4538  	struct ipr_resource_entry *res;
4539  	unsigned long lock_flags = 0;
4540  	ssize_t len = -ENXIO;
4541  
4542  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4543  	res = (struct ipr_resource_entry *)sdev->hostdata;
4544  
4545  	if (res)
4546  		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4547  
4548  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4549  	return len;
4550  }
4551  
4552  static struct device_attribute ipr_resource_type_attr = {
4553  	.attr = {
4554  		.name =		"resource_type",
4555  		.mode =		S_IRUGO,
4556  	},
4557  	.show = ipr_show_resource_type
4558  };
4559  
4560  /**
4561   * ipr_show_raw_mode - Show the adapter's raw mode
4562   * @dev:	class device struct
4563   * @attr:	device attribute (unused)
4564   * @buf:	buffer
4565   *
4566   * Return value:
4567   * 	number of bytes printed to buffer
4568   **/
ipr_show_raw_mode(struct device * dev,struct device_attribute * attr,char * buf)4569  static ssize_t ipr_show_raw_mode(struct device *dev,
4570  				 struct device_attribute *attr, char *buf)
4571  {
4572  	struct scsi_device *sdev = to_scsi_device(dev);
4573  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4574  	struct ipr_resource_entry *res;
4575  	unsigned long lock_flags = 0;
4576  	ssize_t len;
4577  
4578  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4579  	res = (struct ipr_resource_entry *)sdev->hostdata;
4580  	if (res)
4581  		len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4582  	else
4583  		len = -ENXIO;
4584  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4585  	return len;
4586  }
4587  
4588  /**
4589   * ipr_store_raw_mode - Change the adapter's raw mode
4590   * @dev:	class device struct
4591   * @attr:	device attribute (unused)
4592   * @buf:	buffer
4593   * @count:		buffer size
4594   *
4595   * Return value:
4596   * 	number of bytes printed to buffer
4597   **/
ipr_store_raw_mode(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)4598  static ssize_t ipr_store_raw_mode(struct device *dev,
4599  				  struct device_attribute *attr,
4600  				  const char *buf, size_t count)
4601  {
4602  	struct scsi_device *sdev = to_scsi_device(dev);
4603  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4604  	struct ipr_resource_entry *res;
4605  	unsigned long lock_flags = 0;
4606  	ssize_t len;
4607  
4608  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4609  	res = (struct ipr_resource_entry *)sdev->hostdata;
4610  	if (res) {
4611  		if (ipr_is_af_dasd_device(res)) {
4612  			res->raw_mode = simple_strtoul(buf, NULL, 10);
4613  			len = strlen(buf);
4614  			if (res->sdev)
4615  				sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4616  					res->raw_mode ? "enabled" : "disabled");
4617  		} else
4618  			len = -EINVAL;
4619  	} else
4620  		len = -ENXIO;
4621  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4622  	return len;
4623  }
4624  
4625  static struct device_attribute ipr_raw_mode_attr = {
4626  	.attr = {
4627  		.name =		"raw_mode",
4628  		.mode =		S_IRUGO | S_IWUSR,
4629  	},
4630  	.show = ipr_show_raw_mode,
4631  	.store = ipr_store_raw_mode
4632  };
4633  
4634  static struct attribute *ipr_dev_attrs[] = {
4635  	&ipr_adapter_handle_attr.attr,
4636  	&ipr_resource_path_attr.attr,
4637  	&ipr_device_id_attr.attr,
4638  	&ipr_resource_type_attr.attr,
4639  	&ipr_raw_mode_attr.attr,
4640  	NULL,
4641  };
4642  
4643  ATTRIBUTE_GROUPS(ipr_dev);
4644  
4645  /**
4646   * ipr_biosparam - Return the HSC mapping
4647   * @sdev:			scsi device struct
4648   * @block_device:	block device pointer
4649   * @capacity:		capacity of the device
4650   * @parm:			Array containing returned HSC values.
4651   *
4652   * This function generates the HSC parms that fdisk uses.
4653   * We want to make sure we return something that places partitions
4654   * on 4k boundaries for best performance with the IOA.
4655   *
4656   * Return value:
4657   * 	0 on success
4658   **/
ipr_biosparam(struct scsi_device * sdev,struct block_device * block_device,sector_t capacity,int * parm)4659  static int ipr_biosparam(struct scsi_device *sdev,
4660  			 struct block_device *block_device,
4661  			 sector_t capacity, int *parm)
4662  {
4663  	int heads, sectors;
4664  	sector_t cylinders;
4665  
4666  	heads = 128;
4667  	sectors = 32;
4668  
4669  	cylinders = capacity;
4670  	sector_div(cylinders, (128 * 32));
4671  
4672  	/* return result */
4673  	parm[0] = heads;
4674  	parm[1] = sectors;
4675  	parm[2] = cylinders;
4676  
4677  	return 0;
4678  }
4679  
4680  /**
4681   * ipr_find_starget - Find target based on bus/target.
4682   * @starget:	scsi target struct
4683   *
4684   * Return value:
4685   * 	resource entry pointer if found / NULL if not found
4686   **/
ipr_find_starget(struct scsi_target * starget)4687  static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4688  {
4689  	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4690  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4691  	struct ipr_resource_entry *res;
4692  
4693  	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4694  		if ((res->bus == starget->channel) &&
4695  		    (res->target == starget->id)) {
4696  			return res;
4697  		}
4698  	}
4699  
4700  	return NULL;
4701  }
4702  
4703  /**
4704   * ipr_target_destroy - Destroy a SCSI target
4705   * @starget:	scsi target struct
4706   *
4707   **/
ipr_target_destroy(struct scsi_target * starget)4708  static void ipr_target_destroy(struct scsi_target *starget)
4709  {
4710  	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4711  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4712  
4713  	if (ioa_cfg->sis64) {
4714  		if (!ipr_find_starget(starget)) {
4715  			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4716  				clear_bit(starget->id, ioa_cfg->array_ids);
4717  			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4718  				clear_bit(starget->id, ioa_cfg->vset_ids);
4719  			else if (starget->channel == 0)
4720  				clear_bit(starget->id, ioa_cfg->target_ids);
4721  		}
4722  	}
4723  }
4724  
4725  /**
4726   * ipr_find_sdev - Find device based on bus/target/lun.
4727   * @sdev:	scsi device struct
4728   *
4729   * Return value:
4730   * 	resource entry pointer if found / NULL if not found
4731   **/
ipr_find_sdev(struct scsi_device * sdev)4732  static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4733  {
4734  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4735  	struct ipr_resource_entry *res;
4736  
4737  	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4738  		if ((res->bus == sdev->channel) &&
4739  		    (res->target == sdev->id) &&
4740  		    (res->lun == sdev->lun))
4741  			return res;
4742  	}
4743  
4744  	return NULL;
4745  }
4746  
4747  /**
4748   * ipr_slave_destroy - Unconfigure a SCSI device
4749   * @sdev:	scsi device struct
4750   *
4751   * Return value:
4752   * 	nothing
4753   **/
ipr_slave_destroy(struct scsi_device * sdev)4754  static void ipr_slave_destroy(struct scsi_device *sdev)
4755  {
4756  	struct ipr_resource_entry *res;
4757  	struct ipr_ioa_cfg *ioa_cfg;
4758  	unsigned long lock_flags = 0;
4759  
4760  	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4761  
4762  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4763  	res = (struct ipr_resource_entry *) sdev->hostdata;
4764  	if (res) {
4765  		sdev->hostdata = NULL;
4766  		res->sdev = NULL;
4767  	}
4768  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4769  }
4770  
4771  /**
4772   * ipr_device_configure - Configure a SCSI device
4773   * @sdev:	scsi device struct
4774   * @lim:	queue limits
4775   *
4776   * This function configures the specified scsi device.
4777   *
4778   * Return value:
4779   * 	0 on success
4780   **/
ipr_device_configure(struct scsi_device * sdev,struct queue_limits * lim)4781  static int ipr_device_configure(struct scsi_device *sdev,
4782  		struct queue_limits *lim)
4783  {
4784  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4785  	struct ipr_resource_entry *res;
4786  	unsigned long lock_flags = 0;
4787  	char buffer[IPR_MAX_RES_PATH_LENGTH];
4788  
4789  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4790  	res = sdev->hostdata;
4791  	if (res) {
4792  		if (ipr_is_af_dasd_device(res))
4793  			sdev->type = TYPE_RAID;
4794  		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4795  			sdev->scsi_level = 4;
4796  			sdev->no_uld_attach = 1;
4797  		}
4798  		if (ipr_is_vset_device(res)) {
4799  			sdev->scsi_level = SCSI_SPC_3;
4800  			sdev->no_report_opcodes = 1;
4801  			blk_queue_rq_timeout(sdev->request_queue,
4802  					     IPR_VSET_RW_TIMEOUT);
4803  			lim->max_hw_sectors = IPR_VSET_MAX_SECTORS;
4804  		}
4805  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4806  
4807  		if (ioa_cfg->sis64)
4808  			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4809  				    ipr_format_res_path(ioa_cfg,
4810  				res->res_path, buffer, sizeof(buffer)));
4811  		return 0;
4812  	}
4813  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4814  	return 0;
4815  }
4816  
4817  /**
4818   * ipr_slave_alloc - Prepare for commands to a device.
4819   * @sdev:	scsi device struct
4820   *
4821   * This function saves a pointer to the resource entry
4822   * in the scsi device struct if the device exists. We
4823   * can then use this pointer in ipr_queuecommand when
4824   * handling new commands.
4825   *
4826   * Return value:
4827   * 	0 on success / -ENXIO if device does not exist
4828   **/
ipr_slave_alloc(struct scsi_device * sdev)4829  static int ipr_slave_alloc(struct scsi_device *sdev)
4830  {
4831  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4832  	struct ipr_resource_entry *res;
4833  	unsigned long lock_flags;
4834  	int rc = -ENXIO;
4835  
4836  	sdev->hostdata = NULL;
4837  
4838  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4839  
4840  	res = ipr_find_sdev(sdev);
4841  	if (res) {
4842  		res->sdev = sdev;
4843  		res->add_to_ml = 0;
4844  		res->in_erp = 0;
4845  		sdev->hostdata = res;
4846  		if (!ipr_is_naca_model(res))
4847  			res->needs_sync_complete = 1;
4848  		rc = 0;
4849  		if (ipr_is_gata(res)) {
4850  			sdev_printk(KERN_ERR, sdev, "SATA devices are no longer "
4851  				"supported by this driver. Skipping device.\n");
4852  			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4853  			return -ENXIO;
4854  		}
4855  	}
4856  
4857  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4858  
4859  	return rc;
4860  }
4861  
4862  /**
4863   * ipr_match_lun - Match function for specified LUN
4864   * @ipr_cmd:	ipr command struct
4865   * @device:		device to match (sdev)
4866   *
4867   * Returns:
4868   *	1 if command matches sdev / 0 if command does not match sdev
4869   **/
ipr_match_lun(struct ipr_cmnd * ipr_cmd,void * device)4870  static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
4871  {
4872  	if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
4873  		return 1;
4874  	return 0;
4875  }
4876  
4877  /**
4878   * ipr_cmnd_is_free - Check if a command is free or not
4879   * @ipr_cmd:	ipr command struct
4880   *
4881   * Returns:
4882   *	true / false
4883   **/
ipr_cmnd_is_free(struct ipr_cmnd * ipr_cmd)4884  static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
4885  {
4886  	struct ipr_cmnd *loop_cmd;
4887  
4888  	list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
4889  		if (loop_cmd == ipr_cmd)
4890  			return true;
4891  	}
4892  
4893  	return false;
4894  }
4895  
4896  /**
4897   * ipr_wait_for_ops - Wait for matching commands to complete
4898   * @ioa_cfg:	ioa config struct
4899   * @device:		device to match (sdev)
4900   * @match:		match function to use
4901   *
4902   * Returns:
4903   *	SUCCESS / FAILED
4904   **/
ipr_wait_for_ops(struct ipr_ioa_cfg * ioa_cfg,void * device,int (* match)(struct ipr_cmnd *,void *))4905  static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
4906  			    int (*match)(struct ipr_cmnd *, void *))
4907  {
4908  	struct ipr_cmnd *ipr_cmd;
4909  	int wait, i;
4910  	unsigned long flags;
4911  	struct ipr_hrr_queue *hrrq;
4912  	signed long timeout = IPR_ABORT_TASK_TIMEOUT;
4913  	DECLARE_COMPLETION_ONSTACK(comp);
4914  
4915  	ENTER;
4916  	do {
4917  		wait = 0;
4918  
4919  		for_each_hrrq(hrrq, ioa_cfg) {
4920  			spin_lock_irqsave(hrrq->lock, flags);
4921  			for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4922  				ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4923  				if (!ipr_cmnd_is_free(ipr_cmd)) {
4924  					if (match(ipr_cmd, device)) {
4925  						ipr_cmd->eh_comp = &comp;
4926  						wait++;
4927  					}
4928  				}
4929  			}
4930  			spin_unlock_irqrestore(hrrq->lock, flags);
4931  		}
4932  
4933  		if (wait) {
4934  			timeout = wait_for_completion_timeout(&comp, timeout);
4935  
4936  			if (!timeout) {
4937  				wait = 0;
4938  
4939  				for_each_hrrq(hrrq, ioa_cfg) {
4940  					spin_lock_irqsave(hrrq->lock, flags);
4941  					for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
4942  						ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
4943  						if (!ipr_cmnd_is_free(ipr_cmd)) {
4944  							if (match(ipr_cmd, device)) {
4945  								ipr_cmd->eh_comp = NULL;
4946  								wait++;
4947  							}
4948  						}
4949  					}
4950  					spin_unlock_irqrestore(hrrq->lock, flags);
4951  				}
4952  
4953  				if (wait)
4954  					dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
4955  				LEAVE;
4956  				return wait ? FAILED : SUCCESS;
4957  			}
4958  		}
4959  	} while (wait);
4960  
4961  	LEAVE;
4962  	return SUCCESS;
4963  }
4964  
ipr_eh_host_reset(struct scsi_cmnd * cmd)4965  static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
4966  {
4967  	struct ipr_ioa_cfg *ioa_cfg;
4968  	unsigned long lock_flags = 0;
4969  	int rc = SUCCESS;
4970  
4971  	ENTER;
4972  	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
4973  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4974  
4975  	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4976  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4977  		dev_err(&ioa_cfg->pdev->dev,
4978  			"Adapter being reset as a result of error recovery.\n");
4979  
4980  		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4981  			ioa_cfg->sdt_state = GET_DUMP;
4982  	}
4983  
4984  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4985  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4986  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4987  
4988  	/* If we got hit with a host reset while we were already resetting
4989  	 the adapter for some reason, and the reset failed. */
4990  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
4991  		ipr_trace;
4992  		rc = FAILED;
4993  	}
4994  
4995  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4996  	LEAVE;
4997  	return rc;
4998  }
4999  
5000  /**
5001   * ipr_device_reset - Reset the device
5002   * @ioa_cfg:	ioa config struct
5003   * @res:		resource entry struct
5004   *
5005   * This function issues a device reset to the affected device.
5006   * If the device is a SCSI device, a LUN reset will be sent
5007   * to the device first. If that does not work, a target reset
5008   * will be sent.
5009   *
5010   * Return value:
5011   *	0 on success / non-zero on failure
5012   **/
ipr_device_reset(struct ipr_ioa_cfg * ioa_cfg,struct ipr_resource_entry * res)5013  static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5014  			    struct ipr_resource_entry *res)
5015  {
5016  	struct ipr_cmnd *ipr_cmd;
5017  	struct ipr_ioarcb *ioarcb;
5018  	struct ipr_cmd_pkt *cmd_pkt;
5019  	u32 ioasc;
5020  
5021  	ENTER;
5022  	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5023  	ioarcb = &ipr_cmd->ioarcb;
5024  	cmd_pkt = &ioarcb->cmd_pkt;
5025  
5026  	if (ipr_cmd->ioa_cfg->sis64)
5027  		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5028  
5029  	ioarcb->res_handle = res->res_handle;
5030  	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5031  	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5032  
5033  	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5034  	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5035  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5036  
5037  	LEAVE;
5038  	return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
5039  }
5040  
5041  /**
5042   * __ipr_eh_dev_reset - Reset the device
5043   * @scsi_cmd:	scsi command struct
5044   *
5045   * This function issues a device reset to the affected device.
5046   * A LUN reset will be sent to the device first. If that does
5047   * not work, a target reset will be sent.
5048   *
5049   * Return value:
5050   *	SUCCESS / FAILED
5051   **/
__ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)5052  static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
5053  {
5054  	struct ipr_ioa_cfg *ioa_cfg;
5055  	struct ipr_resource_entry *res;
5056  	int rc = 0;
5057  
5058  	ENTER;
5059  	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5060  	res = scsi_cmd->device->hostdata;
5061  
5062  	/*
5063  	 * If we are currently going through reset/reload, return failed. This will force the
5064  	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5065  	 * reset to complete
5066  	 */
5067  	if (ioa_cfg->in_reset_reload)
5068  		return FAILED;
5069  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5070  		return FAILED;
5071  
5072  	res->resetting_device = 1;
5073  	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
5074  
5075  	rc = ipr_device_reset(ioa_cfg, res);
5076  	res->resetting_device = 0;
5077  	res->reset_occurred = 1;
5078  
5079  	LEAVE;
5080  	return rc ? FAILED : SUCCESS;
5081  }
5082  
ipr_eh_dev_reset(struct scsi_cmnd * cmd)5083  static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
5084  {
5085  	int rc;
5086  	struct ipr_ioa_cfg *ioa_cfg;
5087  	struct ipr_resource_entry *res;
5088  
5089  	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5090  	res = cmd->device->hostdata;
5091  
5092  	if (!res)
5093  		return FAILED;
5094  
5095  	spin_lock_irq(cmd->device->host->host_lock);
5096  	rc = __ipr_eh_dev_reset(cmd);
5097  	spin_unlock_irq(cmd->device->host->host_lock);
5098  
5099  	if (rc == SUCCESS)
5100  		rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5101  
5102  	return rc;
5103  }
5104  
5105  /**
5106   * ipr_bus_reset_done - Op done function for bus reset.
5107   * @ipr_cmd:	ipr command struct
5108   *
5109   * This function is the op done function for a bus reset
5110   *
5111   * Return value:
5112   * 	none
5113   **/
ipr_bus_reset_done(struct ipr_cmnd * ipr_cmd)5114  static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5115  {
5116  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5117  	struct ipr_resource_entry *res;
5118  
5119  	ENTER;
5120  	if (!ioa_cfg->sis64)
5121  		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5122  			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5123  				scsi_report_bus_reset(ioa_cfg->host, res->bus);
5124  				break;
5125  			}
5126  		}
5127  
5128  	/*
5129  	 * If abort has not completed, indicate the reset has, else call the
5130  	 * abort's done function to wake the sleeping eh thread
5131  	 */
5132  	if (ipr_cmd->sibling->sibling)
5133  		ipr_cmd->sibling->sibling = NULL;
5134  	else
5135  		ipr_cmd->sibling->done(ipr_cmd->sibling);
5136  
5137  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5138  	LEAVE;
5139  }
5140  
5141  /**
5142   * ipr_abort_timeout - An abort task has timed out
5143   * @t: Timer context used to fetch ipr command struct
5144   *
5145   * This function handles when an abort task times out. If this
5146   * happens we issue a bus reset since we have resources tied
5147   * up that must be freed before returning to the midlayer.
5148   *
5149   * Return value:
5150   *	none
5151   **/
ipr_abort_timeout(struct timer_list * t)5152  static void ipr_abort_timeout(struct timer_list *t)
5153  {
5154  	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
5155  	struct ipr_cmnd *reset_cmd;
5156  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5157  	struct ipr_cmd_pkt *cmd_pkt;
5158  	unsigned long lock_flags = 0;
5159  
5160  	ENTER;
5161  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5162  	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5163  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5164  		return;
5165  	}
5166  
5167  	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
5168  	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5169  	ipr_cmd->sibling = reset_cmd;
5170  	reset_cmd->sibling = ipr_cmd;
5171  	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5172  	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5173  	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5174  	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5175  	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5176  
5177  	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5178  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5179  	LEAVE;
5180  }
5181  
5182  /**
5183   * ipr_cancel_op - Cancel specified op
5184   * @scsi_cmd:	scsi command struct
5185   *
5186   * This function cancels specified op.
5187   *
5188   * Return value:
5189   *	SUCCESS / FAILED
5190   **/
ipr_cancel_op(struct scsi_cmnd * scsi_cmd)5191  static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
5192  {
5193  	struct ipr_cmnd *ipr_cmd;
5194  	struct ipr_ioa_cfg *ioa_cfg;
5195  	struct ipr_resource_entry *res;
5196  	struct ipr_cmd_pkt *cmd_pkt;
5197  	u32 ioasc;
5198  	int i, op_found = 0;
5199  	struct ipr_hrr_queue *hrrq;
5200  
5201  	ENTER;
5202  	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5203  	res = scsi_cmd->device->hostdata;
5204  
5205  	/* If we are currently going through reset/reload, return failed.
5206  	 * This will force the mid-layer to call ipr_eh_host_reset,
5207  	 * which will then go to sleep and wait for the reset to complete
5208  	 */
5209  	if (ioa_cfg->in_reset_reload ||
5210  	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
5211  		return FAILED;
5212  	if (!res)
5213  		return FAILED;
5214  
5215  	/*
5216  	 * If we are aborting a timed out op, chances are that the timeout was caused
5217  	 * by a still not detected EEH error. In such cases, reading a register will
5218  	 * trigger the EEH recovery infrastructure.
5219  	 */
5220  	readl(ioa_cfg->regs.sense_interrupt_reg);
5221  
5222  	if (!ipr_is_gscsi(res))
5223  		return FAILED;
5224  
5225  	for_each_hrrq(hrrq, ioa_cfg) {
5226  		spin_lock(&hrrq->_lock);
5227  		for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5228  			if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5229  				if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5230  					op_found = 1;
5231  					break;
5232  				}
5233  			}
5234  		}
5235  		spin_unlock(&hrrq->_lock);
5236  	}
5237  
5238  	if (!op_found)
5239  		return SUCCESS;
5240  
5241  	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5242  	ipr_cmd->ioarcb.res_handle = res->res_handle;
5243  	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5244  	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5245  	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5246  	ipr_cmd->u.sdev = scsi_cmd->device;
5247  
5248  	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5249  		    scsi_cmd->cmnd[0]);
5250  	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
5251  	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5252  
5253  	/*
5254  	 * If the abort task timed out and we sent a bus reset, we will get
5255  	 * one the following responses to the abort
5256  	 */
5257  	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5258  		ioasc = 0;
5259  		ipr_trace;
5260  	}
5261  
5262  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5263  	if (!ipr_is_naca_model(res))
5264  		res->needs_sync_complete = 1;
5265  
5266  	LEAVE;
5267  	return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
5268  }
5269  
5270  /**
5271   * ipr_scan_finished - Report whether scan is done
5272   * @shost:           scsi host struct
5273   * @elapsed_time:    elapsed time
5274   *
5275   * Return value:
5276   *	0 if scan in progress / 1 if scan is complete
5277   **/
ipr_scan_finished(struct Scsi_Host * shost,unsigned long elapsed_time)5278  static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5279  {
5280  	unsigned long lock_flags;
5281  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5282  	int rc = 0;
5283  
5284  	spin_lock_irqsave(shost->host_lock, lock_flags);
5285  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5286  		rc = 1;
5287  	if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5288  		rc = 1;
5289  	spin_unlock_irqrestore(shost->host_lock, lock_flags);
5290  	return rc;
5291  }
5292  
5293  /**
5294   * ipr_eh_abort - Reset the host adapter
5295   * @scsi_cmd:	scsi command struct
5296   *
5297   * Return value:
5298   * 	SUCCESS / FAILED
5299   **/
ipr_eh_abort(struct scsi_cmnd * scsi_cmd)5300  static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
5301  {
5302  	unsigned long flags;
5303  	int rc;
5304  	struct ipr_ioa_cfg *ioa_cfg;
5305  
5306  	ENTER;
5307  
5308  	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5309  
5310  	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5311  	rc = ipr_cancel_op(scsi_cmd);
5312  	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
5313  
5314  	if (rc == SUCCESS)
5315  		rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
5316  	LEAVE;
5317  	return rc;
5318  }
5319  
5320  /**
5321   * ipr_handle_other_interrupt - Handle "other" interrupts
5322   * @ioa_cfg:	ioa config struct
5323   * @int_reg:	interrupt register
5324   *
5325   * Return value:
5326   * 	IRQ_NONE / IRQ_HANDLED
5327   **/
ipr_handle_other_interrupt(struct ipr_ioa_cfg * ioa_cfg,u32 int_reg)5328  static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
5329  					      u32 int_reg)
5330  {
5331  	irqreturn_t rc = IRQ_HANDLED;
5332  	u32 int_mask_reg;
5333  
5334  	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5335  	int_reg &= ~int_mask_reg;
5336  
5337  	/* If an interrupt on the adapter did not occur, ignore it.
5338  	 * Or in the case of SIS 64, check for a stage change interrupt.
5339  	 */
5340  	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5341  		if (ioa_cfg->sis64) {
5342  			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5343  			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5344  			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5345  
5346  				/* clear stage change */
5347  				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5348  				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5349  				list_del(&ioa_cfg->reset_cmd->queue);
5350  				del_timer(&ioa_cfg->reset_cmd->timer);
5351  				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5352  				return IRQ_HANDLED;
5353  			}
5354  		}
5355  
5356  		return IRQ_NONE;
5357  	}
5358  
5359  	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5360  		/* Mask the interrupt */
5361  		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
5362  		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5363  
5364  		list_del(&ioa_cfg->reset_cmd->queue);
5365  		del_timer(&ioa_cfg->reset_cmd->timer);
5366  		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5367  	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
5368  		if (ioa_cfg->clear_isr) {
5369  			if (ipr_debug && printk_ratelimit())
5370  				dev_err(&ioa_cfg->pdev->dev,
5371  					"Spurious interrupt detected. 0x%08X\n", int_reg);
5372  			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5373  			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5374  			return IRQ_NONE;
5375  		}
5376  	} else {
5377  		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5378  			ioa_cfg->ioa_unit_checked = 1;
5379  		else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5380  			dev_err(&ioa_cfg->pdev->dev,
5381  				"No Host RRQ. 0x%08X\n", int_reg);
5382  		else
5383  			dev_err(&ioa_cfg->pdev->dev,
5384  				"Permanent IOA failure. 0x%08X\n", int_reg);
5385  
5386  		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5387  			ioa_cfg->sdt_state = GET_DUMP;
5388  
5389  		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5390  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5391  	}
5392  
5393  	return rc;
5394  }
5395  
5396  /**
5397   * ipr_isr_eh - Interrupt service routine error handler
5398   * @ioa_cfg:	ioa config struct
5399   * @msg:	message to log
5400   * @number:	various meanings depending on the caller/message
5401   *
5402   * Return value:
5403   * 	none
5404   **/
ipr_isr_eh(struct ipr_ioa_cfg * ioa_cfg,char * msg,u16 number)5405  static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
5406  {
5407  	ioa_cfg->errors_logged++;
5408  	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
5409  
5410  	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5411  		ioa_cfg->sdt_state = GET_DUMP;
5412  
5413  	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5414  }
5415  
ipr_process_hrrq(struct ipr_hrr_queue * hrr_queue,int budget,struct list_head * doneq)5416  static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
5417  						struct list_head *doneq)
5418  {
5419  	u32 ioasc;
5420  	u16 cmd_index;
5421  	struct ipr_cmnd *ipr_cmd;
5422  	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5423  	int num_hrrq = 0;
5424  
5425  	/* If interrupts are disabled, ignore the interrupt */
5426  	if (!hrr_queue->allow_interrupts)
5427  		return 0;
5428  
5429  	while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5430  	       hrr_queue->toggle_bit) {
5431  
5432  		cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5433  			     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5434  			     IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5435  
5436  		if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5437  			     cmd_index < hrr_queue->min_cmd_id)) {
5438  			ipr_isr_eh(ioa_cfg,
5439  				"Invalid response handle from IOA: ",
5440  				cmd_index);
5441  			break;
5442  		}
5443  
5444  		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5445  		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5446  
5447  		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5448  
5449  		list_move_tail(&ipr_cmd->queue, doneq);
5450  
5451  		if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5452  			hrr_queue->hrrq_curr++;
5453  		} else {
5454  			hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5455  			hrr_queue->toggle_bit ^= 1u;
5456  		}
5457  		num_hrrq++;
5458  		if (budget > 0 && num_hrrq >= budget)
5459  			break;
5460  	}
5461  
5462  	return num_hrrq;
5463  }
5464  
ipr_iopoll(struct irq_poll * iop,int budget)5465  static int ipr_iopoll(struct irq_poll *iop, int budget)
5466  {
5467  	struct ipr_hrr_queue *hrrq;
5468  	struct ipr_cmnd *ipr_cmd, *temp;
5469  	unsigned long hrrq_flags;
5470  	int completed_ops;
5471  	LIST_HEAD(doneq);
5472  
5473  	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5474  
5475  	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5476  	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5477  
5478  	if (completed_ops < budget)
5479  		irq_poll_complete(iop);
5480  	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5481  
5482  	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5483  		list_del(&ipr_cmd->queue);
5484  		del_timer(&ipr_cmd->timer);
5485  		ipr_cmd->fast_done(ipr_cmd);
5486  	}
5487  
5488  	return completed_ops;
5489  }
5490  
5491  /**
5492   * ipr_isr - Interrupt service routine
5493   * @irq:	irq number
5494   * @devp:	pointer to ioa config struct
5495   *
5496   * Return value:
5497   * 	IRQ_NONE / IRQ_HANDLED
5498   **/
ipr_isr(int irq,void * devp)5499  static irqreturn_t ipr_isr(int irq, void *devp)
5500  {
5501  	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5502  	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5503  	unsigned long hrrq_flags = 0;
5504  	u32 int_reg = 0;
5505  	int num_hrrq = 0;
5506  	int irq_none = 0;
5507  	struct ipr_cmnd *ipr_cmd, *temp;
5508  	irqreturn_t rc = IRQ_NONE;
5509  	LIST_HEAD(doneq);
5510  
5511  	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5512  	/* If interrupts are disabled, ignore the interrupt */
5513  	if (!hrrq->allow_interrupts) {
5514  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5515  		return IRQ_NONE;
5516  	}
5517  
5518  	while (1) {
5519  		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5520  			rc =  IRQ_HANDLED;
5521  
5522  			if (!ioa_cfg->clear_isr)
5523  				break;
5524  
5525  			/* Clear the PCI interrupt */
5526  			num_hrrq = 0;
5527  			do {
5528  				writel(IPR_PCII_HRRQ_UPDATED,
5529  				     ioa_cfg->regs.clr_interrupt_reg32);
5530  				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5531  			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5532  				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5533  
5534  		} else if (rc == IRQ_NONE && irq_none == 0) {
5535  			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5536  			irq_none++;
5537  		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5538  			   int_reg & IPR_PCII_HRRQ_UPDATED) {
5539  			ipr_isr_eh(ioa_cfg,
5540  				"Error clearing HRRQ: ", num_hrrq);
5541  			rc = IRQ_HANDLED;
5542  			break;
5543  		} else
5544  			break;
5545  	}
5546  
5547  	if (unlikely(rc == IRQ_NONE))
5548  		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5549  
5550  	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5551  	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5552  		list_del(&ipr_cmd->queue);
5553  		del_timer(&ipr_cmd->timer);
5554  		ipr_cmd->fast_done(ipr_cmd);
5555  	}
5556  	return rc;
5557  }
5558  
5559  /**
5560   * ipr_isr_mhrrq - Interrupt service routine
5561   * @irq:	irq number
5562   * @devp:	pointer to ioa config struct
5563   *
5564   * Return value:
5565   *	IRQ_NONE / IRQ_HANDLED
5566   **/
ipr_isr_mhrrq(int irq,void * devp)5567  static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5568  {
5569  	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5570  	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
5571  	unsigned long hrrq_flags = 0;
5572  	struct ipr_cmnd *ipr_cmd, *temp;
5573  	irqreturn_t rc = IRQ_NONE;
5574  	LIST_HEAD(doneq);
5575  
5576  	spin_lock_irqsave(hrrq->lock, hrrq_flags);
5577  
5578  	/* If interrupts are disabled, ignore the interrupt */
5579  	if (!hrrq->allow_interrupts) {
5580  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5581  		return IRQ_NONE;
5582  	}
5583  
5584  	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
5585  		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5586  		       hrrq->toggle_bit) {
5587  			irq_poll_sched(&hrrq->iopoll);
5588  			spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5589  			return IRQ_HANDLED;
5590  		}
5591  	} else {
5592  		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5593  			hrrq->toggle_bit)
5594  
5595  			if (ipr_process_hrrq(hrrq, -1, &doneq))
5596  				rc =  IRQ_HANDLED;
5597  	}
5598  
5599  	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5600  
5601  	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5602  		list_del(&ipr_cmd->queue);
5603  		del_timer(&ipr_cmd->timer);
5604  		ipr_cmd->fast_done(ipr_cmd);
5605  	}
5606  	return rc;
5607  }
5608  
5609  /**
5610   * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5611   * @ioa_cfg:	ioa config struct
5612   * @ipr_cmd:	ipr command struct
5613   *
5614   * Return value:
5615   * 	0 on success / -1 on failure
5616   **/
ipr_build_ioadl64(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5617  static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5618  			     struct ipr_cmnd *ipr_cmd)
5619  {
5620  	int i, nseg;
5621  	struct scatterlist *sg;
5622  	u32 length;
5623  	u32 ioadl_flags = 0;
5624  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5625  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5626  	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5627  
5628  	length = scsi_bufflen(scsi_cmd);
5629  	if (!length)
5630  		return 0;
5631  
5632  	nseg = scsi_dma_map(scsi_cmd);
5633  	if (nseg < 0) {
5634  		if (printk_ratelimit())
5635  			dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5636  		return -1;
5637  	}
5638  
5639  	ipr_cmd->dma_use_sg = nseg;
5640  
5641  	ioarcb->data_transfer_length = cpu_to_be32(length);
5642  	ioarcb->ioadl_len =
5643  		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5644  
5645  	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5646  		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5647  		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5648  	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5649  		ioadl_flags = IPR_IOADL_FLAGS_READ;
5650  
5651  	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5652  		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5653  		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5654  		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5655  	}
5656  
5657  	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5658  	return 0;
5659  }
5660  
5661  /**
5662   * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5663   * @ioa_cfg:	ioa config struct
5664   * @ipr_cmd:	ipr command struct
5665   *
5666   * Return value:
5667   * 	0 on success / -1 on failure
5668   **/
ipr_build_ioadl(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)5669  static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5670  			   struct ipr_cmnd *ipr_cmd)
5671  {
5672  	int i, nseg;
5673  	struct scatterlist *sg;
5674  	u32 length;
5675  	u32 ioadl_flags = 0;
5676  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5677  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5678  	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5679  
5680  	length = scsi_bufflen(scsi_cmd);
5681  	if (!length)
5682  		return 0;
5683  
5684  	nseg = scsi_dma_map(scsi_cmd);
5685  	if (nseg < 0) {
5686  		dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
5687  		return -1;
5688  	}
5689  
5690  	ipr_cmd->dma_use_sg = nseg;
5691  
5692  	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5693  		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5694  		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5695  		ioarcb->data_transfer_length = cpu_to_be32(length);
5696  		ioarcb->ioadl_len =
5697  			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5698  	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5699  		ioadl_flags = IPR_IOADL_FLAGS_READ;
5700  		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5701  		ioarcb->read_ioadl_len =
5702  			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5703  	}
5704  
5705  	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5706  		ioadl = ioarcb->u.add_data.u.ioadl;
5707  		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5708  				    offsetof(struct ipr_ioarcb, u.add_data));
5709  		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5710  	}
5711  
5712  	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5713  		ioadl[i].flags_and_data_len =
5714  			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5715  		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5716  	}
5717  
5718  	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5719  	return 0;
5720  }
5721  
5722  /**
5723   * __ipr_erp_done - Process completion of ERP for a device
5724   * @ipr_cmd:		ipr command struct
5725   *
5726   * This function copies the sense buffer into the scsi_cmd
5727   * struct and pushes the scsi_done function.
5728   *
5729   * Return value:
5730   * 	nothing
5731   **/
__ipr_erp_done(struct ipr_cmnd * ipr_cmd)5732  static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5733  {
5734  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5735  	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5736  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5737  
5738  	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5739  		scsi_cmd->result |= (DID_ERROR << 16);
5740  		scmd_printk(KERN_ERR, scsi_cmd,
5741  			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5742  	} else {
5743  		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5744  		       SCSI_SENSE_BUFFERSIZE);
5745  	}
5746  
5747  	if (res) {
5748  		if (!ipr_is_naca_model(res))
5749  			res->needs_sync_complete = 1;
5750  		res->in_erp = 0;
5751  	}
5752  	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5753  	scsi_done(scsi_cmd);
5754  	if (ipr_cmd->eh_comp)
5755  		complete(ipr_cmd->eh_comp);
5756  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
5757  }
5758  
5759  /**
5760   * ipr_erp_done - Process completion of ERP for a device
5761   * @ipr_cmd:		ipr command struct
5762   *
5763   * This function copies the sense buffer into the scsi_cmd
5764   * struct and pushes the scsi_done function.
5765   *
5766   * Return value:
5767   * 	nothing
5768   **/
ipr_erp_done(struct ipr_cmnd * ipr_cmd)5769  static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5770  {
5771  	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5772  	unsigned long hrrq_flags;
5773  
5774  	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5775  	__ipr_erp_done(ipr_cmd);
5776  	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5777  }
5778  
5779  /**
5780   * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5781   * @ipr_cmd:	ipr command struct
5782   *
5783   * Return value:
5784   * 	none
5785   **/
ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd * ipr_cmd)5786  static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5787  {
5788  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5789  	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5790  	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5791  
5792  	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5793  	ioarcb->data_transfer_length = 0;
5794  	ioarcb->read_data_transfer_length = 0;
5795  	ioarcb->ioadl_len = 0;
5796  	ioarcb->read_ioadl_len = 0;
5797  	ioasa->hdr.ioasc = 0;
5798  	ioasa->hdr.residual_data_len = 0;
5799  
5800  	if (ipr_cmd->ioa_cfg->sis64)
5801  		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5802  			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5803  	else {
5804  		ioarcb->write_ioadl_addr =
5805  			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5806  		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5807  	}
5808  }
5809  
5810  /**
5811   * __ipr_erp_request_sense - Send request sense to a device
5812   * @ipr_cmd:	ipr command struct
5813   *
5814   * This function sends a request sense to a device as a result
5815   * of a check condition.
5816   *
5817   * Return value:
5818   * 	nothing
5819   **/
__ipr_erp_request_sense(struct ipr_cmnd * ipr_cmd)5820  static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5821  {
5822  	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5823  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5824  
5825  	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5826  		__ipr_erp_done(ipr_cmd);
5827  		return;
5828  	}
5829  
5830  	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5831  
5832  	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5833  	cmd_pkt->cdb[0] = REQUEST_SENSE;
5834  	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5835  	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5836  	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5837  	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5838  
5839  	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5840  		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5841  
5842  	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5843  		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5844  }
5845  
5846  /**
5847   * ipr_erp_request_sense - Send request sense to a device
5848   * @ipr_cmd:	ipr command struct
5849   *
5850   * This function sends a request sense to a device as a result
5851   * of a check condition.
5852   *
5853   * Return value:
5854   * 	nothing
5855   **/
ipr_erp_request_sense(struct ipr_cmnd * ipr_cmd)5856  static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5857  {
5858  	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
5859  	unsigned long hrrq_flags;
5860  
5861  	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
5862  	__ipr_erp_request_sense(ipr_cmd);
5863  	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
5864  }
5865  
5866  /**
5867   * ipr_erp_cancel_all - Send cancel all to a device
5868   * @ipr_cmd:	ipr command struct
5869   *
5870   * This function sends a cancel all to a device to clear the
5871   * queue. If we are running TCQ on the device, QERR is set to 1,
5872   * which means all outstanding ops have been dropped on the floor.
5873   * Cancel all will return them to us.
5874   *
5875   * Return value:
5876   * 	nothing
5877   **/
ipr_erp_cancel_all(struct ipr_cmnd * ipr_cmd)5878  static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5879  {
5880  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5881  	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5882  	struct ipr_cmd_pkt *cmd_pkt;
5883  
5884  	res->in_erp = 1;
5885  
5886  	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5887  
5888  	if (!scsi_cmd->device->simple_tags) {
5889  		__ipr_erp_request_sense(ipr_cmd);
5890  		return;
5891  	}
5892  
5893  	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5894  	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5895  	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5896  
5897  	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5898  		   IPR_CANCEL_ALL_TIMEOUT);
5899  }
5900  
5901  /**
5902   * ipr_dump_ioasa - Dump contents of IOASA
5903   * @ioa_cfg:	ioa config struct
5904   * @ipr_cmd:	ipr command struct
5905   * @res:		resource entry struct
5906   *
5907   * This function is invoked by the interrupt handler when ops
5908   * fail. It will log the IOASA if appropriate. Only called
5909   * for GPDD ops.
5910   *
5911   * Return value:
5912   * 	none
5913   **/
ipr_dump_ioasa(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd,struct ipr_resource_entry * res)5914  static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5915  			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5916  {
5917  	int i;
5918  	u16 data_len;
5919  	u32 ioasc, fd_ioasc;
5920  	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5921  	__be32 *ioasa_data = (__be32 *)ioasa;
5922  	int error_index;
5923  
5924  	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5925  	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5926  
5927  	if (0 == ioasc)
5928  		return;
5929  
5930  	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5931  		return;
5932  
5933  	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5934  		error_index = ipr_get_error(fd_ioasc);
5935  	else
5936  		error_index = ipr_get_error(ioasc);
5937  
5938  	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5939  		/* Don't log an error if the IOA already logged one */
5940  		if (ioasa->hdr.ilid != 0)
5941  			return;
5942  
5943  		if (!ipr_is_gscsi(res))
5944  			return;
5945  
5946  		if (ipr_error_table[error_index].log_ioasa == 0)
5947  			return;
5948  	}
5949  
5950  	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5951  
5952  	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5953  	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5954  		data_len = sizeof(struct ipr_ioasa64);
5955  	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5956  		data_len = sizeof(struct ipr_ioasa);
5957  
5958  	ipr_err("IOASA Dump:\n");
5959  
5960  	for (i = 0; i < data_len / 4; i += 4) {
5961  		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5962  			be32_to_cpu(ioasa_data[i]),
5963  			be32_to_cpu(ioasa_data[i+1]),
5964  			be32_to_cpu(ioasa_data[i+2]),
5965  			be32_to_cpu(ioasa_data[i+3]));
5966  	}
5967  }
5968  
5969  /**
5970   * ipr_gen_sense - Generate SCSI sense data from an IOASA
5971   * @ipr_cmd:	ipr command struct
5972   *
5973   * Return value:
5974   * 	none
5975   **/
ipr_gen_sense(struct ipr_cmnd * ipr_cmd)5976  static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5977  {
5978  	u32 failing_lba;
5979  	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5980  	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5981  	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5982  	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5983  
5984  	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5985  
5986  	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5987  		return;
5988  
5989  	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5990  
5991  	if (ipr_is_vset_device(res) &&
5992  	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5993  	    ioasa->u.vset.failing_lba_hi != 0) {
5994  		sense_buf[0] = 0x72;
5995  		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5996  		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5997  		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5998  
5999  		sense_buf[7] = 12;
6000  		sense_buf[8] = 0;
6001  		sense_buf[9] = 0x0A;
6002  		sense_buf[10] = 0x80;
6003  
6004  		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6005  
6006  		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6007  		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6008  		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6009  		sense_buf[15] = failing_lba & 0x000000ff;
6010  
6011  		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6012  
6013  		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6014  		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6015  		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6016  		sense_buf[19] = failing_lba & 0x000000ff;
6017  	} else {
6018  		sense_buf[0] = 0x70;
6019  		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6020  		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6021  		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6022  
6023  		/* Illegal request */
6024  		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
6025  		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
6026  			sense_buf[7] = 10;	/* additional length */
6027  
6028  			/* IOARCB was in error */
6029  			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6030  				sense_buf[15] = 0xC0;
6031  			else	/* Parameter data was invalid */
6032  				sense_buf[15] = 0x80;
6033  
6034  			sense_buf[16] =
6035  			    ((IPR_FIELD_POINTER_MASK &
6036  			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
6037  			sense_buf[17] =
6038  			    (IPR_FIELD_POINTER_MASK &
6039  			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
6040  		} else {
6041  			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6042  				if (ipr_is_vset_device(res))
6043  					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6044  				else
6045  					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6046  
6047  				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
6048  				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6049  				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6050  				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6051  				sense_buf[6] = failing_lba & 0x000000ff;
6052  			}
6053  
6054  			sense_buf[7] = 6;	/* additional length */
6055  		}
6056  	}
6057  }
6058  
6059  /**
6060   * ipr_get_autosense - Copy autosense data to sense buffer
6061   * @ipr_cmd:	ipr command struct
6062   *
6063   * This function copies the autosense buffer to the buffer
6064   * in the scsi_cmd, if there is autosense available.
6065   *
6066   * Return value:
6067   *	1 if autosense was available / 0 if not
6068   **/
ipr_get_autosense(struct ipr_cmnd * ipr_cmd)6069  static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6070  {
6071  	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6072  	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
6073  
6074  	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
6075  		return 0;
6076  
6077  	if (ipr_cmd->ioa_cfg->sis64)
6078  		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6079  		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6080  			   SCSI_SENSE_BUFFERSIZE));
6081  	else
6082  		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6083  		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6084  			   SCSI_SENSE_BUFFERSIZE));
6085  	return 1;
6086  }
6087  
6088  /**
6089   * ipr_erp_start - Process an error response for a SCSI op
6090   * @ioa_cfg:	ioa config struct
6091   * @ipr_cmd:	ipr command struct
6092   *
6093   * This function determines whether or not to initiate ERP
6094   * on the affected device.
6095   *
6096   * Return value:
6097   * 	nothing
6098   **/
ipr_erp_start(struct ipr_ioa_cfg * ioa_cfg,struct ipr_cmnd * ipr_cmd)6099  static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6100  			      struct ipr_cmnd *ipr_cmd)
6101  {
6102  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6103  	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6104  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6105  	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
6106  
6107  	if (!res) {
6108  		__ipr_scsi_eh_done(ipr_cmd);
6109  		return;
6110  	}
6111  
6112  	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
6113  		ipr_gen_sense(ipr_cmd);
6114  
6115  	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6116  
6117  	switch (masked_ioasc) {
6118  	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
6119  		if (ipr_is_naca_model(res))
6120  			scsi_cmd->result |= (DID_ABORT << 16);
6121  		else
6122  			scsi_cmd->result |= (DID_IMM_RETRY << 16);
6123  		break;
6124  	case IPR_IOASC_IR_RESOURCE_HANDLE:
6125  	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
6126  		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6127  		break;
6128  	case IPR_IOASC_HW_SEL_TIMEOUT:
6129  		scsi_cmd->result |= (DID_NO_CONNECT << 16);
6130  		if (!ipr_is_naca_model(res))
6131  			res->needs_sync_complete = 1;
6132  		break;
6133  	case IPR_IOASC_SYNC_REQUIRED:
6134  		if (!res->in_erp)
6135  			res->needs_sync_complete = 1;
6136  		scsi_cmd->result |= (DID_IMM_RETRY << 16);
6137  		break;
6138  	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
6139  	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
6140  		/*
6141  		 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6142  		 * so SCSI mid-layer and upper layers handle it accordingly.
6143  		 */
6144  		if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6145  			scsi_cmd->result |= (DID_PASSTHROUGH << 16);
6146  		break;
6147  	case IPR_IOASC_BUS_WAS_RESET:
6148  	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6149  		/*
6150  		 * Report the bus reset and ask for a retry. The device
6151  		 * will give CC/UA the next command.
6152  		 */
6153  		if (!res->resetting_device)
6154  			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6155  		scsi_cmd->result |= (DID_ERROR << 16);
6156  		if (!ipr_is_naca_model(res))
6157  			res->needs_sync_complete = 1;
6158  		break;
6159  	case IPR_IOASC_HW_DEV_BUS_STATUS:
6160  		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6161  		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
6162  			if (!ipr_get_autosense(ipr_cmd)) {
6163  				if (!ipr_is_naca_model(res)) {
6164  					ipr_erp_cancel_all(ipr_cmd);
6165  					return;
6166  				}
6167  			}
6168  		}
6169  		if (!ipr_is_naca_model(res))
6170  			res->needs_sync_complete = 1;
6171  		break;
6172  	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6173  		break;
6174  	case IPR_IOASC_IR_NON_OPTIMIZED:
6175  		if (res->raw_mode) {
6176  			res->raw_mode = 0;
6177  			scsi_cmd->result |= (DID_IMM_RETRY << 16);
6178  		} else
6179  			scsi_cmd->result |= (DID_ERROR << 16);
6180  		break;
6181  	default:
6182  		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6183  			scsi_cmd->result |= (DID_ERROR << 16);
6184  		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
6185  			res->needs_sync_complete = 1;
6186  		break;
6187  	}
6188  
6189  	scsi_dma_unmap(ipr_cmd->scsi_cmd);
6190  	scsi_done(scsi_cmd);
6191  	if (ipr_cmd->eh_comp)
6192  		complete(ipr_cmd->eh_comp);
6193  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6194  }
6195  
6196  /**
6197   * ipr_scsi_done - mid-layer done function
6198   * @ipr_cmd:	ipr command struct
6199   *
6200   * This function is invoked by the interrupt handler for
6201   * ops generated by the SCSI mid-layer
6202   *
6203   * Return value:
6204   * 	none
6205   **/
ipr_scsi_done(struct ipr_cmnd * ipr_cmd)6206  static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6207  {
6208  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6209  	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6210  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6211  	unsigned long lock_flags;
6212  
6213  	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6214  
6215  	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6216  		scsi_dma_unmap(scsi_cmd);
6217  
6218  		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6219  		scsi_done(scsi_cmd);
6220  		if (ipr_cmd->eh_comp)
6221  			complete(ipr_cmd->eh_comp);
6222  		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6223  		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6224  	} else {
6225  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6226  		spin_lock(&ipr_cmd->hrrq->_lock);
6227  		ipr_erp_start(ioa_cfg, ipr_cmd);
6228  		spin_unlock(&ipr_cmd->hrrq->_lock);
6229  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6230  	}
6231  }
6232  
6233  /**
6234   * ipr_queuecommand - Queue a mid-layer request
6235   * @shost:		scsi host struct
6236   * @scsi_cmd:	scsi command struct
6237   *
6238   * This function queues a request generated by the mid-layer.
6239   *
6240   * Return value:
6241   *	0 on success
6242   *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6243   *	SCSI_MLQUEUE_HOST_BUSY if host is busy
6244   **/
ipr_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * scsi_cmd)6245  static int ipr_queuecommand(struct Scsi_Host *shost,
6246  			    struct scsi_cmnd *scsi_cmd)
6247  {
6248  	struct ipr_ioa_cfg *ioa_cfg;
6249  	struct ipr_resource_entry *res;
6250  	struct ipr_ioarcb *ioarcb;
6251  	struct ipr_cmnd *ipr_cmd;
6252  	unsigned long hrrq_flags;
6253  	int rc;
6254  	struct ipr_hrr_queue *hrrq;
6255  	int hrrq_id;
6256  
6257  	ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6258  
6259  	scsi_cmd->result = (DID_OK << 16);
6260  	res = scsi_cmd->device->hostdata;
6261  
6262  	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6263  	hrrq = &ioa_cfg->hrrq[hrrq_id];
6264  
6265  	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6266  	/*
6267  	 * We are currently blocking all devices due to a host reset
6268  	 * We have told the host to stop giving us new requests, but
6269  	 * ERP ops don't count. FIXME
6270  	 */
6271  	if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
6272  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6273  		return SCSI_MLQUEUE_HOST_BUSY;
6274  	}
6275  
6276  	/*
6277  	 * FIXME - Create scsi_set_host_offline interface
6278  	 *  and the ioa_is_dead check can be removed
6279  	 */
6280  	if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
6281  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6282  		goto err_nodev;
6283  	}
6284  
6285  	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6286  	if (ipr_cmd == NULL) {
6287  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6288  		return SCSI_MLQUEUE_HOST_BUSY;
6289  	}
6290  	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6291  
6292  	ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
6293  	ioarcb = &ipr_cmd->ioarcb;
6294  
6295  	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6296  	ipr_cmd->scsi_cmd = scsi_cmd;
6297  	ipr_cmd->done = ipr_scsi_eh_done;
6298  
6299  	if (ipr_is_gscsi(res)) {
6300  		if (scsi_cmd->underflow == 0)
6301  			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6302  
6303  		if (res->reset_occurred) {
6304  			res->reset_occurred = 0;
6305  			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
6306  		}
6307  	}
6308  
6309  	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6310  		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6311  
6312  		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
6313  		if (scsi_cmd->flags & SCMD_TAGGED)
6314  			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6315  		else
6316  			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
6317  	}
6318  
6319  	if (scsi_cmd->cmnd[0] >= 0xC0 &&
6320  	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
6321  		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6322  	}
6323  	if (res->raw_mode && ipr_is_af_dasd_device(res)) {
6324  		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
6325  
6326  		if (scsi_cmd->underflow == 0)
6327  			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6328  	}
6329  
6330  	if (ioa_cfg->sis64)
6331  		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6332  	else
6333  		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
6334  
6335  	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6336  	if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
6337  		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6338  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6339  		if (!rc)
6340  			scsi_dma_unmap(scsi_cmd);
6341  		return SCSI_MLQUEUE_HOST_BUSY;
6342  	}
6343  
6344  	if (unlikely(hrrq->ioa_is_dead)) {
6345  		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
6346  		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6347  		scsi_dma_unmap(scsi_cmd);
6348  		goto err_nodev;
6349  	}
6350  
6351  	ioarcb->res_handle = res->res_handle;
6352  	if (res->needs_sync_complete) {
6353  		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6354  		res->needs_sync_complete = 0;
6355  	}
6356  	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
6357  	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6358  	ipr_send_command(ipr_cmd);
6359  	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6360  	return 0;
6361  
6362  err_nodev:
6363  	spin_lock_irqsave(hrrq->lock, hrrq_flags);
6364  	memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6365  	scsi_cmd->result = (DID_NO_CONNECT << 16);
6366  	scsi_done(scsi_cmd);
6367  	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
6368  	return 0;
6369  }
6370  
6371  /**
6372   * ipr_ioa_info - Get information about the card/driver
6373   * @host:	scsi host struct
6374   *
6375   * Return value:
6376   * 	pointer to buffer with description string
6377   **/
ipr_ioa_info(struct Scsi_Host * host)6378  static const char *ipr_ioa_info(struct Scsi_Host *host)
6379  {
6380  	static char buffer[512];
6381  	struct ipr_ioa_cfg *ioa_cfg;
6382  	unsigned long lock_flags = 0;
6383  
6384  	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6385  
6386  	spin_lock_irqsave(host->host_lock, lock_flags);
6387  	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6388  	spin_unlock_irqrestore(host->host_lock, lock_flags);
6389  
6390  	return buffer;
6391  }
6392  
6393  static const struct scsi_host_template driver_template = {
6394  	.module = THIS_MODULE,
6395  	.name = "IPR",
6396  	.info = ipr_ioa_info,
6397  	.queuecommand = ipr_queuecommand,
6398  	.eh_abort_handler = ipr_eh_abort,
6399  	.eh_device_reset_handler = ipr_eh_dev_reset,
6400  	.eh_host_reset_handler = ipr_eh_host_reset,
6401  	.slave_alloc = ipr_slave_alloc,
6402  	.device_configure = ipr_device_configure,
6403  	.slave_destroy = ipr_slave_destroy,
6404  	.scan_finished = ipr_scan_finished,
6405  	.target_destroy = ipr_target_destroy,
6406  	.change_queue_depth = ipr_change_queue_depth,
6407  	.bios_param = ipr_biosparam,
6408  	.can_queue = IPR_MAX_COMMANDS,
6409  	.this_id = -1,
6410  	.sg_tablesize = IPR_MAX_SGLIST,
6411  	.max_sectors = IPR_IOA_MAX_SECTORS,
6412  	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6413  	.shost_groups = ipr_ioa_groups,
6414  	.sdev_groups = ipr_dev_groups,
6415  	.proc_name = IPR_NAME,
6416  };
6417  
6418  /**
6419   * ipr_ioa_bringdown_done - IOA bring down completion.
6420   * @ipr_cmd:	ipr command struct
6421   *
6422   * This function processes the completion of an adapter bring down.
6423   * It wakes any reset sleepers.
6424   *
6425   * Return value:
6426   * 	IPR_RC_JOB_RETURN
6427   **/
ipr_ioa_bringdown_done(struct ipr_cmnd * ipr_cmd)6428  static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6429  {
6430  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6431  	int i;
6432  
6433  	ENTER;
6434  	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
6435  		ipr_trace;
6436  		ioa_cfg->scsi_unblock = 1;
6437  		schedule_work(&ioa_cfg->work_q);
6438  	}
6439  
6440  	ioa_cfg->in_reset_reload = 0;
6441  	ioa_cfg->reset_retries = 0;
6442  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
6443  		spin_lock(&ioa_cfg->hrrq[i]._lock);
6444  		ioa_cfg->hrrq[i].ioa_is_dead = 1;
6445  		spin_unlock(&ioa_cfg->hrrq[i]._lock);
6446  	}
6447  	wmb();
6448  
6449  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6450  	wake_up_all(&ioa_cfg->reset_wait_q);
6451  	LEAVE;
6452  
6453  	return IPR_RC_JOB_RETURN;
6454  }
6455  
6456  /**
6457   * ipr_ioa_reset_done - IOA reset completion.
6458   * @ipr_cmd:	ipr command struct
6459   *
6460   * This function processes the completion of an adapter reset.
6461   * It schedules any necessary mid-layer add/removes and
6462   * wakes any reset sleepers.
6463   *
6464   * Return value:
6465   * 	IPR_RC_JOB_RETURN
6466   **/
ipr_ioa_reset_done(struct ipr_cmnd * ipr_cmd)6467  static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6468  {
6469  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6470  	struct ipr_resource_entry *res;
6471  	int j;
6472  
6473  	ENTER;
6474  	ioa_cfg->in_reset_reload = 0;
6475  	for (j = 0; j < ioa_cfg->hrrq_num; j++) {
6476  		spin_lock(&ioa_cfg->hrrq[j]._lock);
6477  		ioa_cfg->hrrq[j].allow_cmds = 1;
6478  		spin_unlock(&ioa_cfg->hrrq[j]._lock);
6479  	}
6480  	wmb();
6481  	ioa_cfg->reset_cmd = NULL;
6482  	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6483  
6484  	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6485  		if (res->add_to_ml || res->del_from_ml) {
6486  			ipr_trace;
6487  			break;
6488  		}
6489  	}
6490  	schedule_work(&ioa_cfg->work_q);
6491  
6492  	for (j = 0; j < IPR_NUM_HCAMS; j++) {
6493  		list_del_init(&ioa_cfg->hostrcb[j]->queue);
6494  		if (j < IPR_NUM_LOG_HCAMS)
6495  			ipr_send_hcam(ioa_cfg,
6496  				IPR_HCAM_CDB_OP_CODE_LOG_DATA,
6497  				ioa_cfg->hostrcb[j]);
6498  		else
6499  			ipr_send_hcam(ioa_cfg,
6500  				IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
6501  				ioa_cfg->hostrcb[j]);
6502  	}
6503  
6504  	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6505  	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6506  
6507  	ioa_cfg->reset_retries = 0;
6508  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6509  	wake_up_all(&ioa_cfg->reset_wait_q);
6510  
6511  	ioa_cfg->scsi_unblock = 1;
6512  	schedule_work(&ioa_cfg->work_q);
6513  	LEAVE;
6514  	return IPR_RC_JOB_RETURN;
6515  }
6516  
6517  /**
6518   * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6519   * @supported_dev:	supported device struct
6520   * @vpids:			vendor product id struct
6521   *
6522   * Return value:
6523   * 	none
6524   **/
ipr_set_sup_dev_dflt(struct ipr_supported_device * supported_dev,struct ipr_std_inq_vpids * vpids)6525  static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6526  				 struct ipr_std_inq_vpids *vpids)
6527  {
6528  	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6529  	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6530  	supported_dev->num_records = 1;
6531  	supported_dev->data_length =
6532  		cpu_to_be16(sizeof(struct ipr_supported_device));
6533  	supported_dev->reserved = 0;
6534  }
6535  
6536  /**
6537   * ipr_set_supported_devs - Send Set Supported Devices for a device
6538   * @ipr_cmd:	ipr command struct
6539   *
6540   * This function sends a Set Supported Devices to the adapter
6541   *
6542   * Return value:
6543   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6544   **/
ipr_set_supported_devs(struct ipr_cmnd * ipr_cmd)6545  static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6546  {
6547  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6548  	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6549  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6550  	struct ipr_resource_entry *res = ipr_cmd->u.res;
6551  
6552  	ipr_cmd->job_step = ipr_ioa_reset_done;
6553  
6554  	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6555  		if (!ipr_is_scsi_disk(res))
6556  			continue;
6557  
6558  		ipr_cmd->u.res = res;
6559  		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6560  
6561  		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6562  		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6563  		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6564  
6565  		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6566  		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6567  		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6568  		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6569  
6570  		ipr_init_ioadl(ipr_cmd,
6571  			       ioa_cfg->vpd_cbs_dma +
6572  				 offsetof(struct ipr_misc_cbs, supp_dev),
6573  			       sizeof(struct ipr_supported_device),
6574  			       IPR_IOADL_FLAGS_WRITE_LAST);
6575  
6576  		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6577  			   IPR_SET_SUP_DEVICE_TIMEOUT);
6578  
6579  		if (!ioa_cfg->sis64)
6580  			ipr_cmd->job_step = ipr_set_supported_devs;
6581  		LEAVE;
6582  		return IPR_RC_JOB_RETURN;
6583  	}
6584  
6585  	LEAVE;
6586  	return IPR_RC_JOB_CONTINUE;
6587  }
6588  
6589  /**
6590   * ipr_get_mode_page - Locate specified mode page
6591   * @mode_pages:	mode page buffer
6592   * @page_code:	page code to find
6593   * @len:		minimum required length for mode page
6594   *
6595   * Return value:
6596   * 	pointer to mode page / NULL on failure
6597   **/
ipr_get_mode_page(struct ipr_mode_pages * mode_pages,u32 page_code,u32 len)6598  static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6599  			       u32 page_code, u32 len)
6600  {
6601  	struct ipr_mode_page_hdr *mode_hdr;
6602  	u32 page_length;
6603  	u32 length;
6604  
6605  	if (!mode_pages || (mode_pages->hdr.length == 0))
6606  		return NULL;
6607  
6608  	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6609  	mode_hdr = (struct ipr_mode_page_hdr *)
6610  		(mode_pages->data + mode_pages->hdr.block_desc_len);
6611  
6612  	while (length) {
6613  		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6614  			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6615  				return mode_hdr;
6616  			break;
6617  		} else {
6618  			page_length = (sizeof(struct ipr_mode_page_hdr) +
6619  				       mode_hdr->page_length);
6620  			length -= page_length;
6621  			mode_hdr = (struct ipr_mode_page_hdr *)
6622  				((unsigned long)mode_hdr + page_length);
6623  		}
6624  	}
6625  	return NULL;
6626  }
6627  
6628  /**
6629   * ipr_check_term_power - Check for term power errors
6630   * @ioa_cfg:	ioa config struct
6631   * @mode_pages:	IOAFP mode pages buffer
6632   *
6633   * Check the IOAFP's mode page 28 for term power errors
6634   *
6635   * Return value:
6636   * 	nothing
6637   **/
ipr_check_term_power(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)6638  static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6639  				 struct ipr_mode_pages *mode_pages)
6640  {
6641  	int i;
6642  	int entry_length;
6643  	struct ipr_dev_bus_entry *bus;
6644  	struct ipr_mode_page28 *mode_page;
6645  
6646  	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6647  				      sizeof(struct ipr_mode_page28));
6648  
6649  	entry_length = mode_page->entry_length;
6650  
6651  	bus = mode_page->bus;
6652  
6653  	for (i = 0; i < mode_page->num_entries; i++) {
6654  		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6655  			dev_err(&ioa_cfg->pdev->dev,
6656  				"Term power is absent on scsi bus %d\n",
6657  				bus->res_addr.bus);
6658  		}
6659  
6660  		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6661  	}
6662  }
6663  
6664  /**
6665   * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6666   * @ioa_cfg:	ioa config struct
6667   *
6668   * Looks through the config table checking for SES devices. If
6669   * the SES device is in the SES table indicating a maximum SCSI
6670   * bus speed, the speed is limited for the bus.
6671   *
6672   * Return value:
6673   * 	none
6674   **/
ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg * ioa_cfg)6675  static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6676  {
6677  	u32 max_xfer_rate;
6678  	int i;
6679  
6680  	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6681  		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6682  						       ioa_cfg->bus_attr[i].bus_width);
6683  
6684  		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6685  			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6686  	}
6687  }
6688  
6689  /**
6690   * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6691   * @ioa_cfg:	ioa config struct
6692   * @mode_pages:	mode page 28 buffer
6693   *
6694   * Updates mode page 28 based on driver configuration
6695   *
6696   * Return value:
6697   * 	none
6698   **/
ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg * ioa_cfg,struct ipr_mode_pages * mode_pages)6699  static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6700  					  struct ipr_mode_pages *mode_pages)
6701  {
6702  	int i, entry_length;
6703  	struct ipr_dev_bus_entry *bus;
6704  	struct ipr_bus_attributes *bus_attr;
6705  	struct ipr_mode_page28 *mode_page;
6706  
6707  	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6708  				      sizeof(struct ipr_mode_page28));
6709  
6710  	entry_length = mode_page->entry_length;
6711  
6712  	/* Loop for each device bus entry */
6713  	for (i = 0, bus = mode_page->bus;
6714  	     i < mode_page->num_entries;
6715  	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6716  		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6717  			dev_err(&ioa_cfg->pdev->dev,
6718  				"Invalid resource address reported: 0x%08X\n",
6719  				IPR_GET_PHYS_LOC(bus->res_addr));
6720  			continue;
6721  		}
6722  
6723  		bus_attr = &ioa_cfg->bus_attr[i];
6724  		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6725  		bus->bus_width = bus_attr->bus_width;
6726  		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6727  		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6728  		if (bus_attr->qas_enabled)
6729  			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6730  		else
6731  			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6732  	}
6733  }
6734  
6735  /**
6736   * ipr_build_mode_select - Build a mode select command
6737   * @ipr_cmd:	ipr command struct
6738   * @res_handle:	resource handle to send command to
6739   * @parm:		Byte 2 of Mode Sense command
6740   * @dma_addr:	DMA buffer address
6741   * @xfer_len:	data transfer length
6742   *
6743   * Return value:
6744   * 	none
6745   **/
ipr_build_mode_select(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)6746  static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6747  				  __be32 res_handle, u8 parm,
6748  				  dma_addr_t dma_addr, u8 xfer_len)
6749  {
6750  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6751  
6752  	ioarcb->res_handle = res_handle;
6753  	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6754  	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6755  	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6756  	ioarcb->cmd_pkt.cdb[1] = parm;
6757  	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6758  
6759  	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6760  }
6761  
6762  /**
6763   * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6764   * @ipr_cmd:	ipr command struct
6765   *
6766   * This function sets up the SCSI bus attributes and sends
6767   * a Mode Select for Page 28 to activate them.
6768   *
6769   * Return value:
6770   * 	IPR_RC_JOB_RETURN
6771   **/
ipr_ioafp_mode_select_page28(struct ipr_cmnd * ipr_cmd)6772  static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6773  {
6774  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6775  	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6776  	int length;
6777  
6778  	ENTER;
6779  	ipr_scsi_bus_speed_limit(ioa_cfg);
6780  	ipr_check_term_power(ioa_cfg, mode_pages);
6781  	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6782  	length = mode_pages->hdr.length + 1;
6783  	mode_pages->hdr.length = 0;
6784  
6785  	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6786  			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6787  			      length);
6788  
6789  	ipr_cmd->job_step = ipr_set_supported_devs;
6790  	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6791  				    struct ipr_resource_entry, queue);
6792  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6793  
6794  	LEAVE;
6795  	return IPR_RC_JOB_RETURN;
6796  }
6797  
6798  /**
6799   * ipr_build_mode_sense - Builds a mode sense command
6800   * @ipr_cmd:	ipr command struct
6801   * @res_handle:		resource entry struct
6802   * @parm:		Byte 2 of mode sense command
6803   * @dma_addr:	DMA address of mode sense buffer
6804   * @xfer_len:	Size of DMA buffer
6805   *
6806   * Return value:
6807   * 	none
6808   **/
ipr_build_mode_sense(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 parm,dma_addr_t dma_addr,u8 xfer_len)6809  static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6810  				 __be32 res_handle,
6811  				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6812  {
6813  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6814  
6815  	ioarcb->res_handle = res_handle;
6816  	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6817  	ioarcb->cmd_pkt.cdb[2] = parm;
6818  	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6819  	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6820  
6821  	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6822  }
6823  
6824  /**
6825   * ipr_reset_cmd_failed - Handle failure of IOA reset command
6826   * @ipr_cmd:	ipr command struct
6827   *
6828   * This function handles the failure of an IOA bringup command.
6829   *
6830   * Return value:
6831   * 	IPR_RC_JOB_RETURN
6832   **/
ipr_reset_cmd_failed(struct ipr_cmnd * ipr_cmd)6833  static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6834  {
6835  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6836  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6837  
6838  	dev_err(&ioa_cfg->pdev->dev,
6839  		"0x%02X failed with IOASC: 0x%08X\n",
6840  		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6841  
6842  	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6843  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6844  	return IPR_RC_JOB_RETURN;
6845  }
6846  
6847  /**
6848   * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6849   * @ipr_cmd:	ipr command struct
6850   *
6851   * This function handles the failure of a Mode Sense to the IOAFP.
6852   * Some adapters do not handle all mode pages.
6853   *
6854   * Return value:
6855   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6856   **/
ipr_reset_mode_sense_failed(struct ipr_cmnd * ipr_cmd)6857  static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6858  {
6859  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6860  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6861  
6862  	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6863  		ipr_cmd->job_step = ipr_set_supported_devs;
6864  		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6865  					    struct ipr_resource_entry, queue);
6866  		return IPR_RC_JOB_CONTINUE;
6867  	}
6868  
6869  	return ipr_reset_cmd_failed(ipr_cmd);
6870  }
6871  
6872  /**
6873   * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6874   * @ipr_cmd:	ipr command struct
6875   *
6876   * This function send a Page 28 mode sense to the IOA to
6877   * retrieve SCSI bus attributes.
6878   *
6879   * Return value:
6880   * 	IPR_RC_JOB_RETURN
6881   **/
ipr_ioafp_mode_sense_page28(struct ipr_cmnd * ipr_cmd)6882  static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6883  {
6884  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6885  
6886  	ENTER;
6887  	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6888  			     0x28, ioa_cfg->vpd_cbs_dma +
6889  			     offsetof(struct ipr_misc_cbs, mode_pages),
6890  			     sizeof(struct ipr_mode_pages));
6891  
6892  	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6893  	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6894  
6895  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6896  
6897  	LEAVE;
6898  	return IPR_RC_JOB_RETURN;
6899  }
6900  
6901  /**
6902   * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6903   * @ipr_cmd:	ipr command struct
6904   *
6905   * This function enables dual IOA RAID support if possible.
6906   *
6907   * Return value:
6908   * 	IPR_RC_JOB_RETURN
6909   **/
ipr_ioafp_mode_select_page24(struct ipr_cmnd * ipr_cmd)6910  static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6911  {
6912  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6913  	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6914  	struct ipr_mode_page24 *mode_page;
6915  	int length;
6916  
6917  	ENTER;
6918  	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6919  				      sizeof(struct ipr_mode_page24));
6920  
6921  	if (mode_page)
6922  		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6923  
6924  	length = mode_pages->hdr.length + 1;
6925  	mode_pages->hdr.length = 0;
6926  
6927  	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6928  			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6929  			      length);
6930  
6931  	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6932  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6933  
6934  	LEAVE;
6935  	return IPR_RC_JOB_RETURN;
6936  }
6937  
6938  /**
6939   * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6940   * @ipr_cmd:	ipr command struct
6941   *
6942   * This function handles the failure of a Mode Sense to the IOAFP.
6943   * Some adapters do not handle all mode pages.
6944   *
6945   * Return value:
6946   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6947   **/
ipr_reset_mode_sense_page24_failed(struct ipr_cmnd * ipr_cmd)6948  static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6949  {
6950  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6951  
6952  	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6953  		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6954  		return IPR_RC_JOB_CONTINUE;
6955  	}
6956  
6957  	return ipr_reset_cmd_failed(ipr_cmd);
6958  }
6959  
6960  /**
6961   * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6962   * @ipr_cmd:	ipr command struct
6963   *
6964   * This function send a mode sense to the IOA to retrieve
6965   * the IOA Advanced Function Control mode page.
6966   *
6967   * Return value:
6968   * 	IPR_RC_JOB_RETURN
6969   **/
ipr_ioafp_mode_sense_page24(struct ipr_cmnd * ipr_cmd)6970  static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6971  {
6972  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6973  
6974  	ENTER;
6975  	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6976  			     0x24, ioa_cfg->vpd_cbs_dma +
6977  			     offsetof(struct ipr_misc_cbs, mode_pages),
6978  			     sizeof(struct ipr_mode_pages));
6979  
6980  	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6981  	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6982  
6983  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6984  
6985  	LEAVE;
6986  	return IPR_RC_JOB_RETURN;
6987  }
6988  
6989  /**
6990   * ipr_init_res_table - Initialize the resource table
6991   * @ipr_cmd:	ipr command struct
6992   *
6993   * This function looks through the existing resource table, comparing
6994   * it with the config table. This function will take care of old/new
6995   * devices and schedule adding/removing them from the mid-layer
6996   * as appropriate.
6997   *
6998   * Return value:
6999   * 	IPR_RC_JOB_CONTINUE
7000   **/
ipr_init_res_table(struct ipr_cmnd * ipr_cmd)7001  static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7002  {
7003  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7004  	struct ipr_resource_entry *res, *temp;
7005  	struct ipr_config_table_entry_wrapper cfgtew;
7006  	int entries, found, flag, i;
7007  	LIST_HEAD(old_res);
7008  
7009  	ENTER;
7010  	if (ioa_cfg->sis64)
7011  		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7012  	else
7013  		flag = ioa_cfg->u.cfg_table->hdr.flags;
7014  
7015  	if (flag & IPR_UCODE_DOWNLOAD_REQ)
7016  		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7017  
7018  	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7019  		list_move_tail(&res->queue, &old_res);
7020  
7021  	if (ioa_cfg->sis64)
7022  		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
7023  	else
7024  		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7025  
7026  	for (i = 0; i < entries; i++) {
7027  		if (ioa_cfg->sis64)
7028  			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7029  		else
7030  			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
7031  		found = 0;
7032  
7033  		list_for_each_entry_safe(res, temp, &old_res, queue) {
7034  			if (ipr_is_same_device(res, &cfgtew)) {
7035  				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7036  				found = 1;
7037  				break;
7038  			}
7039  		}
7040  
7041  		if (!found) {
7042  			if (list_empty(&ioa_cfg->free_res_q)) {
7043  				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7044  				break;
7045  			}
7046  
7047  			found = 1;
7048  			res = list_entry(ioa_cfg->free_res_q.next,
7049  					 struct ipr_resource_entry, queue);
7050  			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7051  			ipr_init_res_entry(res, &cfgtew);
7052  			res->add_to_ml = 1;
7053  		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7054  			res->sdev->allow_restart = 1;
7055  
7056  		if (found)
7057  			ipr_update_res_entry(res, &cfgtew);
7058  	}
7059  
7060  	list_for_each_entry_safe(res, temp, &old_res, queue) {
7061  		if (res->sdev) {
7062  			res->del_from_ml = 1;
7063  			res->res_handle = IPR_INVALID_RES_HANDLE;
7064  			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7065  		}
7066  	}
7067  
7068  	list_for_each_entry_safe(res, temp, &old_res, queue) {
7069  		ipr_clear_res_target(res);
7070  		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7071  	}
7072  
7073  	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7074  		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7075  	else
7076  		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7077  
7078  	LEAVE;
7079  	return IPR_RC_JOB_CONTINUE;
7080  }
7081  
7082  /**
7083   * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7084   * @ipr_cmd:	ipr command struct
7085   *
7086   * This function sends a Query IOA Configuration command
7087   * to the adapter to retrieve the IOA configuration table.
7088   *
7089   * Return value:
7090   * 	IPR_RC_JOB_RETURN
7091   **/
ipr_ioafp_query_ioa_cfg(struct ipr_cmnd * ipr_cmd)7092  static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7093  {
7094  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7095  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7096  	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
7097  	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7098  
7099  	ENTER;
7100  	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7101  		ioa_cfg->dual_raid = 1;
7102  	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7103  		 ucode_vpd->major_release, ucode_vpd->card_type,
7104  		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7105  	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7106  	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7107  
7108  	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
7109  	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
7110  	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7111  	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
7112  
7113  	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
7114  		       IPR_IOADL_FLAGS_READ_LAST);
7115  
7116  	ipr_cmd->job_step = ipr_init_res_table;
7117  
7118  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7119  
7120  	LEAVE;
7121  	return IPR_RC_JOB_RETURN;
7122  }
7123  
ipr_ioa_service_action_failed(struct ipr_cmnd * ipr_cmd)7124  static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7125  {
7126  	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7127  
7128  	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7129  		return IPR_RC_JOB_CONTINUE;
7130  
7131  	return ipr_reset_cmd_failed(ipr_cmd);
7132  }
7133  
ipr_build_ioa_service_action(struct ipr_cmnd * ipr_cmd,__be32 res_handle,u8 sa_code)7134  static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7135  					 __be32 res_handle, u8 sa_code)
7136  {
7137  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7138  
7139  	ioarcb->res_handle = res_handle;
7140  	ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7141  	ioarcb->cmd_pkt.cdb[1] = sa_code;
7142  	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7143  }
7144  
7145  /**
7146   * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7147   * action
7148   * @ipr_cmd:	ipr command struct
7149   *
7150   * Return value:
7151   *	none
7152   **/
ipr_ioafp_set_caching_parameters(struct ipr_cmnd * ipr_cmd)7153  static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7154  {
7155  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7156  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7157  	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7158  
7159  	ENTER;
7160  
7161  	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7162  
7163  	if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7164  		ipr_build_ioa_service_action(ipr_cmd,
7165  					     cpu_to_be32(IPR_IOA_RES_HANDLE),
7166  					     IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7167  
7168  		ioarcb->cmd_pkt.cdb[2] = 0x40;
7169  
7170  		ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7171  		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7172  			   IPR_SET_SUP_DEVICE_TIMEOUT);
7173  
7174  		LEAVE;
7175  		return IPR_RC_JOB_RETURN;
7176  	}
7177  
7178  	LEAVE;
7179  	return IPR_RC_JOB_CONTINUE;
7180  }
7181  
7182  /**
7183   * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7184   * @ipr_cmd:	ipr command struct
7185   * @flags:	flags to send
7186   * @page:	page to inquire
7187   * @dma_addr:	DMA address
7188   * @xfer_len:	transfer data length
7189   *
7190   * This utility function sends an inquiry to the adapter.
7191   *
7192   * Return value:
7193   * 	none
7194   **/
ipr_ioafp_inquiry(struct ipr_cmnd * ipr_cmd,u8 flags,u8 page,dma_addr_t dma_addr,u8 xfer_len)7195  static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
7196  			      dma_addr_t dma_addr, u8 xfer_len)
7197  {
7198  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7199  
7200  	ENTER;
7201  	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7202  	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7203  
7204  	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7205  	ioarcb->cmd_pkt.cdb[1] = flags;
7206  	ioarcb->cmd_pkt.cdb[2] = page;
7207  	ioarcb->cmd_pkt.cdb[4] = xfer_len;
7208  
7209  	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
7210  
7211  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7212  	LEAVE;
7213  }
7214  
7215  /**
7216   * ipr_inquiry_page_supported - Is the given inquiry page supported
7217   * @page0:		inquiry page 0 buffer
7218   * @page:		page code.
7219   *
7220   * This function determines if the specified inquiry page is supported.
7221   *
7222   * Return value:
7223   *	1 if page is supported / 0 if not
7224   **/
ipr_inquiry_page_supported(struct ipr_inquiry_page0 * page0,u8 page)7225  static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7226  {
7227  	int i;
7228  
7229  	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7230  		if (page0->page[i] == page)
7231  			return 1;
7232  
7233  	return 0;
7234  }
7235  
7236  /**
7237   * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7238   * @ipr_cmd:	ipr command struct
7239   *
7240   * This function sends a Page 0xC4 inquiry to the adapter
7241   * to retrieve software VPD information.
7242   *
7243   * Return value:
7244   *	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7245   **/
ipr_ioafp_pageC4_inquiry(struct ipr_cmnd * ipr_cmd)7246  static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
7247  {
7248  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7249  	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7250  	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7251  
7252  	ENTER;
7253  	ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
7254  	memset(pageC4, 0, sizeof(*pageC4));
7255  
7256  	if (ipr_inquiry_page_supported(page0, 0xC4)) {
7257  		ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
7258  				  (ioa_cfg->vpd_cbs_dma
7259  				   + offsetof(struct ipr_misc_cbs,
7260  					      pageC4_data)),
7261  				  sizeof(struct ipr_inquiry_pageC4));
7262  		return IPR_RC_JOB_RETURN;
7263  	}
7264  
7265  	LEAVE;
7266  	return IPR_RC_JOB_CONTINUE;
7267  }
7268  
7269  /**
7270   * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7271   * @ipr_cmd:	ipr command struct
7272   *
7273   * This function sends a Page 0xD0 inquiry to the adapter
7274   * to retrieve adapter capabilities.
7275   *
7276   * Return value:
7277   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7278   **/
ipr_ioafp_cap_inquiry(struct ipr_cmnd * ipr_cmd)7279  static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7280  {
7281  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7282  	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7283  	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7284  
7285  	ENTER;
7286  	ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
7287  	memset(cap, 0, sizeof(*cap));
7288  
7289  	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7290  		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7291  				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7292  				  sizeof(struct ipr_inquiry_cap));
7293  		return IPR_RC_JOB_RETURN;
7294  	}
7295  
7296  	LEAVE;
7297  	return IPR_RC_JOB_CONTINUE;
7298  }
7299  
7300  /**
7301   * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7302   * @ipr_cmd:	ipr command struct
7303   *
7304   * This function sends a Page 3 inquiry to the adapter
7305   * to retrieve software VPD information.
7306   *
7307   * Return value:
7308   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7309   **/
ipr_ioafp_page3_inquiry(struct ipr_cmnd * ipr_cmd)7310  static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7311  {
7312  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7313  
7314  	ENTER;
7315  
7316  	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7317  
7318  	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7319  			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7320  			  sizeof(struct ipr_inquiry_page3));
7321  
7322  	LEAVE;
7323  	return IPR_RC_JOB_RETURN;
7324  }
7325  
7326  /**
7327   * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7328   * @ipr_cmd:	ipr command struct
7329   *
7330   * This function sends a Page 0 inquiry to the adapter
7331   * to retrieve supported inquiry pages.
7332   *
7333   * Return value:
7334   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7335   **/
ipr_ioafp_page0_inquiry(struct ipr_cmnd * ipr_cmd)7336  static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7337  {
7338  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7339  	char type[5];
7340  
7341  	ENTER;
7342  
7343  	/* Grab the type out of the VPD and store it away */
7344  	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7345  	type[4] = '\0';
7346  	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7347  
7348  	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7349  
7350  	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7351  			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7352  			  sizeof(struct ipr_inquiry_page0));
7353  
7354  	LEAVE;
7355  	return IPR_RC_JOB_RETURN;
7356  }
7357  
7358  /**
7359   * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7360   * @ipr_cmd:	ipr command struct
7361   *
7362   * This function sends a standard inquiry to the adapter.
7363   *
7364   * Return value:
7365   * 	IPR_RC_JOB_RETURN
7366   **/
ipr_ioafp_std_inquiry(struct ipr_cmnd * ipr_cmd)7367  static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7368  {
7369  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7370  
7371  	ENTER;
7372  	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7373  
7374  	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7375  			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7376  			  sizeof(struct ipr_ioa_vpd));
7377  
7378  	LEAVE;
7379  	return IPR_RC_JOB_RETURN;
7380  }
7381  
7382  /**
7383   * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7384   * @ipr_cmd:	ipr command struct
7385   *
7386   * This function send an Identify Host Request Response Queue
7387   * command to establish the HRRQ with the adapter.
7388   *
7389   * Return value:
7390   * 	IPR_RC_JOB_RETURN
7391   **/
ipr_ioafp_identify_hrrq(struct ipr_cmnd * ipr_cmd)7392  static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7393  {
7394  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7395  	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7396  	struct ipr_hrr_queue *hrrq;
7397  
7398  	ENTER;
7399  	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7400  	if (ioa_cfg->identify_hrrq_index == 0)
7401  		dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7402  
7403  	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
7404  		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
7405  
7406  		ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7407  		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7408  
7409  		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7410  		if (ioa_cfg->sis64)
7411  			ioarcb->cmd_pkt.cdb[1] = 0x1;
7412  
7413  		if (ioa_cfg->nvectors == 1)
7414  			ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
7415  		else
7416  			ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
7417  
7418  		ioarcb->cmd_pkt.cdb[2] =
7419  			((u64) hrrq->host_rrq_dma >> 24) & 0xff;
7420  		ioarcb->cmd_pkt.cdb[3] =
7421  			((u64) hrrq->host_rrq_dma >> 16) & 0xff;
7422  		ioarcb->cmd_pkt.cdb[4] =
7423  			((u64) hrrq->host_rrq_dma >> 8) & 0xff;
7424  		ioarcb->cmd_pkt.cdb[5] =
7425  			((u64) hrrq->host_rrq_dma) & 0xff;
7426  		ioarcb->cmd_pkt.cdb[7] =
7427  			((sizeof(u32) * hrrq->size) >> 8) & 0xff;
7428  		ioarcb->cmd_pkt.cdb[8] =
7429  			(sizeof(u32) * hrrq->size) & 0xff;
7430  
7431  		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7432  			ioarcb->cmd_pkt.cdb[9] =
7433  					ioa_cfg->identify_hrrq_index;
7434  
7435  		if (ioa_cfg->sis64) {
7436  			ioarcb->cmd_pkt.cdb[10] =
7437  				((u64) hrrq->host_rrq_dma >> 56) & 0xff;
7438  			ioarcb->cmd_pkt.cdb[11] =
7439  				((u64) hrrq->host_rrq_dma >> 48) & 0xff;
7440  			ioarcb->cmd_pkt.cdb[12] =
7441  				((u64) hrrq->host_rrq_dma >> 40) & 0xff;
7442  			ioarcb->cmd_pkt.cdb[13] =
7443  				((u64) hrrq->host_rrq_dma >> 32) & 0xff;
7444  		}
7445  
7446  		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
7447  			ioarcb->cmd_pkt.cdb[14] =
7448  					ioa_cfg->identify_hrrq_index;
7449  
7450  		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7451  			   IPR_INTERNAL_TIMEOUT);
7452  
7453  		if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
7454  			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7455  
7456  		LEAVE;
7457  		return IPR_RC_JOB_RETURN;
7458  	}
7459  
7460  	LEAVE;
7461  	return IPR_RC_JOB_CONTINUE;
7462  }
7463  
7464  /**
7465   * ipr_reset_timer_done - Adapter reset timer function
7466   * @t: Timer context used to fetch ipr command struct
7467   *
7468   * Description: This function is used in adapter reset processing
7469   * for timing events. If the reset_cmd pointer in the IOA
7470   * config struct is not this adapter's we are doing nested
7471   * resets and fail_all_ops will take care of freeing the
7472   * command block.
7473   *
7474   * Return value:
7475   * 	none
7476   **/
ipr_reset_timer_done(struct timer_list * t)7477  static void ipr_reset_timer_done(struct timer_list *t)
7478  {
7479  	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
7480  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7481  	unsigned long lock_flags = 0;
7482  
7483  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7484  
7485  	if (ioa_cfg->reset_cmd == ipr_cmd) {
7486  		list_del(&ipr_cmd->queue);
7487  		ipr_cmd->done(ipr_cmd);
7488  	}
7489  
7490  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7491  }
7492  
7493  /**
7494   * ipr_reset_start_timer - Start a timer for adapter reset job
7495   * @ipr_cmd:	ipr command struct
7496   * @timeout:	timeout value
7497   *
7498   * Description: This function is used in adapter reset processing
7499   * for timing events. If the reset_cmd pointer in the IOA
7500   * config struct is not this adapter's we are doing nested
7501   * resets and fail_all_ops will take care of freeing the
7502   * command block.
7503   *
7504   * Return value:
7505   * 	none
7506   **/
ipr_reset_start_timer(struct ipr_cmnd * ipr_cmd,unsigned long timeout)7507  static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7508  				  unsigned long timeout)
7509  {
7510  
7511  	ENTER;
7512  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7513  	ipr_cmd->done = ipr_reset_ioa_job;
7514  
7515  	ipr_cmd->timer.expires = jiffies + timeout;
7516  	ipr_cmd->timer.function = ipr_reset_timer_done;
7517  	add_timer(&ipr_cmd->timer);
7518  }
7519  
7520  /**
7521   * ipr_init_ioa_mem - Initialize ioa_cfg control block
7522   * @ioa_cfg:	ioa cfg struct
7523   *
7524   * Return value:
7525   * 	nothing
7526   **/
ipr_init_ioa_mem(struct ipr_ioa_cfg * ioa_cfg)7527  static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7528  {
7529  	struct ipr_hrr_queue *hrrq;
7530  
7531  	for_each_hrrq(hrrq, ioa_cfg) {
7532  		spin_lock(&hrrq->_lock);
7533  		memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
7534  
7535  		/* Initialize Host RRQ pointers */
7536  		hrrq->hrrq_start = hrrq->host_rrq;
7537  		hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
7538  		hrrq->hrrq_curr = hrrq->hrrq_start;
7539  		hrrq->toggle_bit = 1;
7540  		spin_unlock(&hrrq->_lock);
7541  	}
7542  	wmb();
7543  
7544  	ioa_cfg->identify_hrrq_index = 0;
7545  	if (ioa_cfg->hrrq_num == 1)
7546  		atomic_set(&ioa_cfg->hrrq_index, 0);
7547  	else
7548  		atomic_set(&ioa_cfg->hrrq_index, 1);
7549  
7550  	/* Zero out config table */
7551  	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7552  }
7553  
7554  /**
7555   * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7556   * @ipr_cmd:	ipr command struct
7557   *
7558   * Return value:
7559   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7560   **/
ipr_reset_next_stage(struct ipr_cmnd * ipr_cmd)7561  static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7562  {
7563  	unsigned long stage, stage_time;
7564  	u32 feedback;
7565  	volatile u32 int_reg;
7566  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7567  	u64 maskval = 0;
7568  
7569  	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7570  	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7571  	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7572  
7573  	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7574  
7575  	/* sanity check the stage_time value */
7576  	if (stage_time == 0)
7577  		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7578  	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7579  		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7580  	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7581  		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7582  
7583  	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7584  		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7585  		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7586  		stage_time = ioa_cfg->transop_timeout;
7587  		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7588  	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7589  		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7590  		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7591  			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7592  			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7593  			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7594  			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7595  			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7596  			return IPR_RC_JOB_CONTINUE;
7597  		}
7598  	}
7599  
7600  	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7601  	ipr_cmd->timer.function = ipr_oper_timeout;
7602  	ipr_cmd->done = ipr_reset_ioa_job;
7603  	add_timer(&ipr_cmd->timer);
7604  
7605  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7606  
7607  	return IPR_RC_JOB_RETURN;
7608  }
7609  
7610  /**
7611   * ipr_reset_enable_ioa - Enable the IOA following a reset.
7612   * @ipr_cmd:	ipr command struct
7613   *
7614   * This function reinitializes some control blocks and
7615   * enables destructive diagnostics on the adapter.
7616   *
7617   * Return value:
7618   * 	IPR_RC_JOB_RETURN
7619   **/
ipr_reset_enable_ioa(struct ipr_cmnd * ipr_cmd)7620  static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7621  {
7622  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7623  	volatile u32 int_reg;
7624  	volatile u64 maskval;
7625  	int i;
7626  
7627  	ENTER;
7628  	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7629  	ipr_init_ioa_mem(ioa_cfg);
7630  
7631  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7632  		spin_lock(&ioa_cfg->hrrq[i]._lock);
7633  		ioa_cfg->hrrq[i].allow_interrupts = 1;
7634  		spin_unlock(&ioa_cfg->hrrq[i]._lock);
7635  	}
7636  	if (ioa_cfg->sis64) {
7637  		/* Set the adapter to the correct endian mode. */
7638  		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7639  		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7640  	}
7641  
7642  	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7643  
7644  	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7645  		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7646  		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7647  		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7648  		return IPR_RC_JOB_CONTINUE;
7649  	}
7650  
7651  	/* Enable destructive diagnostics on IOA */
7652  	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7653  
7654  	if (ioa_cfg->sis64) {
7655  		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7656  		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7657  		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7658  	} else
7659  		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7660  
7661  	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7662  
7663  	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7664  
7665  	if (ioa_cfg->sis64) {
7666  		ipr_cmd->job_step = ipr_reset_next_stage;
7667  		return IPR_RC_JOB_CONTINUE;
7668  	}
7669  
7670  	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7671  	ipr_cmd->timer.function = ipr_oper_timeout;
7672  	ipr_cmd->done = ipr_reset_ioa_job;
7673  	add_timer(&ipr_cmd->timer);
7674  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
7675  
7676  	LEAVE;
7677  	return IPR_RC_JOB_RETURN;
7678  }
7679  
7680  /**
7681   * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7682   * @ipr_cmd:	ipr command struct
7683   *
7684   * This function is invoked when an adapter dump has run out
7685   * of processing time.
7686   *
7687   * Return value:
7688   * 	IPR_RC_JOB_CONTINUE
7689   **/
ipr_reset_wait_for_dump(struct ipr_cmnd * ipr_cmd)7690  static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7691  {
7692  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7693  
7694  	if (ioa_cfg->sdt_state == GET_DUMP)
7695  		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7696  	else if (ioa_cfg->sdt_state == READ_DUMP)
7697  		ioa_cfg->sdt_state = ABORT_DUMP;
7698  
7699  	ioa_cfg->dump_timeout = 1;
7700  	ipr_cmd->job_step = ipr_reset_alert;
7701  
7702  	return IPR_RC_JOB_CONTINUE;
7703  }
7704  
7705  /**
7706   * ipr_unit_check_no_data - Log a unit check/no data error log
7707   * @ioa_cfg:		ioa config struct
7708   *
7709   * Logs an error indicating the adapter unit checked, but for some
7710   * reason, we were unable to fetch the unit check buffer.
7711   *
7712   * Return value:
7713   * 	nothing
7714   **/
ipr_unit_check_no_data(struct ipr_ioa_cfg * ioa_cfg)7715  static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7716  {
7717  	ioa_cfg->errors_logged++;
7718  	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7719  }
7720  
7721  /**
7722   * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7723   * @ioa_cfg:		ioa config struct
7724   *
7725   * Fetches the unit check buffer from the adapter by clocking the data
7726   * through the mailbox register.
7727   *
7728   * Return value:
7729   * 	nothing
7730   **/
ipr_get_unit_check_buffer(struct ipr_ioa_cfg * ioa_cfg)7731  static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7732  {
7733  	unsigned long mailbox;
7734  	struct ipr_hostrcb *hostrcb;
7735  	struct ipr_uc_sdt sdt;
7736  	int rc, length;
7737  	u32 ioasc;
7738  
7739  	mailbox = readl(ioa_cfg->ioa_mailbox);
7740  
7741  	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7742  		ipr_unit_check_no_data(ioa_cfg);
7743  		return;
7744  	}
7745  
7746  	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7747  	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7748  					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7749  
7750  	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7751  	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7752  	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7753  		ipr_unit_check_no_data(ioa_cfg);
7754  		return;
7755  	}
7756  
7757  	/* Find length of the first sdt entry (UC buffer) */
7758  	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7759  		length = be32_to_cpu(sdt.entry[0].end_token);
7760  	else
7761  		length = (be32_to_cpu(sdt.entry[0].end_token) -
7762  			  be32_to_cpu(sdt.entry[0].start_token)) &
7763  			  IPR_FMT2_MBX_ADDR_MASK;
7764  
7765  	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7766  			     struct ipr_hostrcb, queue);
7767  	list_del_init(&hostrcb->queue);
7768  	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7769  
7770  	rc = ipr_get_ldump_data_section(ioa_cfg,
7771  					be32_to_cpu(sdt.entry[0].start_token),
7772  					(__be32 *)&hostrcb->hcam,
7773  					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7774  
7775  	if (!rc) {
7776  		ipr_handle_log_data(ioa_cfg, hostrcb);
7777  		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7778  		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7779  		    ioa_cfg->sdt_state == GET_DUMP)
7780  			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7781  	} else
7782  		ipr_unit_check_no_data(ioa_cfg);
7783  
7784  	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7785  }
7786  
7787  /**
7788   * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7789   * @ipr_cmd:	ipr command struct
7790   *
7791   * Description: This function will call to get the unit check buffer.
7792   *
7793   * Return value:
7794   *	IPR_RC_JOB_RETURN
7795   **/
ipr_reset_get_unit_check_job(struct ipr_cmnd * ipr_cmd)7796  static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7797  {
7798  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7799  
7800  	ENTER;
7801  	ioa_cfg->ioa_unit_checked = 0;
7802  	ipr_get_unit_check_buffer(ioa_cfg);
7803  	ipr_cmd->job_step = ipr_reset_alert;
7804  	ipr_reset_start_timer(ipr_cmd, 0);
7805  
7806  	LEAVE;
7807  	return IPR_RC_JOB_RETURN;
7808  }
7809  
ipr_dump_mailbox_wait(struct ipr_cmnd * ipr_cmd)7810  static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
7811  {
7812  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7813  
7814  	ENTER;
7815  
7816  	if (ioa_cfg->sdt_state != GET_DUMP)
7817  		return IPR_RC_JOB_RETURN;
7818  
7819  	if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
7820  	    (readl(ioa_cfg->regs.sense_interrupt_reg) &
7821  	     IPR_PCII_MAILBOX_STABLE)) {
7822  
7823  		if (!ipr_cmd->u.time_left)
7824  			dev_err(&ioa_cfg->pdev->dev,
7825  				"Timed out waiting for Mailbox register.\n");
7826  
7827  		ioa_cfg->sdt_state = READ_DUMP;
7828  		ioa_cfg->dump_timeout = 0;
7829  		if (ioa_cfg->sis64)
7830  			ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
7831  		else
7832  			ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
7833  		ipr_cmd->job_step = ipr_reset_wait_for_dump;
7834  		schedule_work(&ioa_cfg->work_q);
7835  
7836  	} else {
7837  		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7838  		ipr_reset_start_timer(ipr_cmd,
7839  				      IPR_CHECK_FOR_RESET_TIMEOUT);
7840  	}
7841  
7842  	LEAVE;
7843  	return IPR_RC_JOB_RETURN;
7844  }
7845  
7846  /**
7847   * ipr_reset_restore_cfg_space - Restore PCI config space.
7848   * @ipr_cmd:	ipr command struct
7849   *
7850   * Description: This function restores the saved PCI config space of
7851   * the adapter, fails all outstanding ops back to the callers, and
7852   * fetches the dump/unit check if applicable to this reset.
7853   *
7854   * Return value:
7855   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7856   **/
ipr_reset_restore_cfg_space(struct ipr_cmnd * ipr_cmd)7857  static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7858  {
7859  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7860  
7861  	ENTER;
7862  	ioa_cfg->pdev->state_saved = true;
7863  	pci_restore_state(ioa_cfg->pdev);
7864  
7865  	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7866  		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7867  		return IPR_RC_JOB_CONTINUE;
7868  	}
7869  
7870  	ipr_fail_all_ops(ioa_cfg);
7871  
7872  	if (ioa_cfg->sis64) {
7873  		/* Set the adapter to the correct endian mode. */
7874  		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7875  		readl(ioa_cfg->regs.endian_swap_reg);
7876  	}
7877  
7878  	if (ioa_cfg->ioa_unit_checked) {
7879  		if (ioa_cfg->sis64) {
7880  			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7881  			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7882  			return IPR_RC_JOB_RETURN;
7883  		} else {
7884  			ioa_cfg->ioa_unit_checked = 0;
7885  			ipr_get_unit_check_buffer(ioa_cfg);
7886  			ipr_cmd->job_step = ipr_reset_alert;
7887  			ipr_reset_start_timer(ipr_cmd, 0);
7888  			return IPR_RC_JOB_RETURN;
7889  		}
7890  	}
7891  
7892  	if (ioa_cfg->in_ioa_bringdown) {
7893  		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7894  	} else if (ioa_cfg->sdt_state == GET_DUMP) {
7895  		ipr_cmd->job_step = ipr_dump_mailbox_wait;
7896  		ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
7897  	} else {
7898  		ipr_cmd->job_step = ipr_reset_enable_ioa;
7899  	}
7900  
7901  	LEAVE;
7902  	return IPR_RC_JOB_CONTINUE;
7903  }
7904  
7905  /**
7906   * ipr_reset_bist_done - BIST has completed on the adapter.
7907   * @ipr_cmd:	ipr command struct
7908   *
7909   * Description: Unblock config space and resume the reset process.
7910   *
7911   * Return value:
7912   * 	IPR_RC_JOB_CONTINUE
7913   **/
ipr_reset_bist_done(struct ipr_cmnd * ipr_cmd)7914  static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7915  {
7916  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7917  
7918  	ENTER;
7919  	if (ioa_cfg->cfg_locked)
7920  		pci_cfg_access_unlock(ioa_cfg->pdev);
7921  	ioa_cfg->cfg_locked = 0;
7922  	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7923  	LEAVE;
7924  	return IPR_RC_JOB_CONTINUE;
7925  }
7926  
7927  /**
7928   * ipr_reset_start_bist - Run BIST on the adapter.
7929   * @ipr_cmd:	ipr command struct
7930   *
7931   * Description: This function runs BIST on the adapter, then delays 2 seconds.
7932   *
7933   * Return value:
7934   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7935   **/
ipr_reset_start_bist(struct ipr_cmnd * ipr_cmd)7936  static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7937  {
7938  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7939  	int rc = PCIBIOS_SUCCESSFUL;
7940  
7941  	ENTER;
7942  	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7943  		writel(IPR_UPROCI_SIS64_START_BIST,
7944  		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7945  	else
7946  		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7947  
7948  	if (rc == PCIBIOS_SUCCESSFUL) {
7949  		ipr_cmd->job_step = ipr_reset_bist_done;
7950  		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7951  		rc = IPR_RC_JOB_RETURN;
7952  	} else {
7953  		if (ioa_cfg->cfg_locked)
7954  			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
7955  		ioa_cfg->cfg_locked = 0;
7956  		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7957  		rc = IPR_RC_JOB_CONTINUE;
7958  	}
7959  
7960  	LEAVE;
7961  	return rc;
7962  }
7963  
7964  /**
7965   * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7966   * @ipr_cmd:	ipr command struct
7967   *
7968   * Description: This clears PCI reset to the adapter and delays two seconds.
7969   *
7970   * Return value:
7971   * 	IPR_RC_JOB_RETURN
7972   **/
ipr_reset_slot_reset_done(struct ipr_cmnd * ipr_cmd)7973  static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7974  {
7975  	ENTER;
7976  	ipr_cmd->job_step = ipr_reset_bist_done;
7977  	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7978  	LEAVE;
7979  	return IPR_RC_JOB_RETURN;
7980  }
7981  
7982  /**
7983   * ipr_reset_reset_work - Pulse a PCIe fundamental reset
7984   * @work:	work struct
7985   *
7986   * Description: This pulses warm reset to a slot.
7987   *
7988   **/
ipr_reset_reset_work(struct work_struct * work)7989  static void ipr_reset_reset_work(struct work_struct *work)
7990  {
7991  	struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
7992  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7993  	struct pci_dev *pdev = ioa_cfg->pdev;
7994  	unsigned long lock_flags = 0;
7995  
7996  	ENTER;
7997  	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7998  	msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
7999  	pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8000  
8001  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8002  	if (ioa_cfg->reset_cmd == ipr_cmd)
8003  		ipr_reset_ioa_job(ipr_cmd);
8004  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8005  	LEAVE;
8006  }
8007  
8008  /**
8009   * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8010   * @ipr_cmd:	ipr command struct
8011   *
8012   * Description: This asserts PCI reset to the adapter.
8013   *
8014   * Return value:
8015   * 	IPR_RC_JOB_RETURN
8016   **/
ipr_reset_slot_reset(struct ipr_cmnd * ipr_cmd)8017  static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8018  {
8019  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8020  
8021  	ENTER;
8022  	INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8023  	queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
8024  	ipr_cmd->job_step = ipr_reset_slot_reset_done;
8025  	LEAVE;
8026  	return IPR_RC_JOB_RETURN;
8027  }
8028  
8029  /**
8030   * ipr_reset_block_config_access_wait - Wait for permission to block config access
8031   * @ipr_cmd:	ipr command struct
8032   *
8033   * Description: This attempts to block config access to the IOA.
8034   *
8035   * Return value:
8036   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8037   **/
ipr_reset_block_config_access_wait(struct ipr_cmnd * ipr_cmd)8038  static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8039  {
8040  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8041  	int rc = IPR_RC_JOB_CONTINUE;
8042  
8043  	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8044  		ioa_cfg->cfg_locked = 1;
8045  		ipr_cmd->job_step = ioa_cfg->reset;
8046  	} else {
8047  		if (ipr_cmd->u.time_left) {
8048  			rc = IPR_RC_JOB_RETURN;
8049  			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8050  			ipr_reset_start_timer(ipr_cmd,
8051  					      IPR_CHECK_FOR_RESET_TIMEOUT);
8052  		} else {
8053  			ipr_cmd->job_step = ioa_cfg->reset;
8054  			dev_err(&ioa_cfg->pdev->dev,
8055  				"Timed out waiting to lock config access. Resetting anyway.\n");
8056  		}
8057  	}
8058  
8059  	return rc;
8060  }
8061  
8062  /**
8063   * ipr_reset_block_config_access - Block config access to the IOA
8064   * @ipr_cmd:	ipr command struct
8065   *
8066   * Description: This attempts to block config access to the IOA
8067   *
8068   * Return value:
8069   * 	IPR_RC_JOB_CONTINUE
8070   **/
ipr_reset_block_config_access(struct ipr_cmnd * ipr_cmd)8071  static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8072  {
8073  	ipr_cmd->ioa_cfg->cfg_locked = 0;
8074  	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8075  	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8076  	return IPR_RC_JOB_CONTINUE;
8077  }
8078  
8079  /**
8080   * ipr_reset_allowed - Query whether or not IOA can be reset
8081   * @ioa_cfg:	ioa config struct
8082   *
8083   * Return value:
8084   * 	0 if reset not allowed / non-zero if reset is allowed
8085   **/
ipr_reset_allowed(struct ipr_ioa_cfg * ioa_cfg)8086  static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8087  {
8088  	volatile u32 temp_reg;
8089  
8090  	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8091  	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8092  }
8093  
8094  /**
8095   * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8096   * @ipr_cmd:	ipr command struct
8097   *
8098   * Description: This function waits for adapter permission to run BIST,
8099   * then runs BIST. If the adapter does not give permission after a
8100   * reasonable time, we will reset the adapter anyway. The impact of
8101   * resetting the adapter without warning the adapter is the risk of
8102   * losing the persistent error log on the adapter. If the adapter is
8103   * reset while it is writing to the flash on the adapter, the flash
8104   * segment will have bad ECC and be zeroed.
8105   *
8106   * Return value:
8107   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8108   **/
ipr_reset_wait_to_start_bist(struct ipr_cmnd * ipr_cmd)8109  static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8110  {
8111  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8112  	int rc = IPR_RC_JOB_RETURN;
8113  
8114  	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8115  		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8116  		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8117  	} else {
8118  		ipr_cmd->job_step = ipr_reset_block_config_access;
8119  		rc = IPR_RC_JOB_CONTINUE;
8120  	}
8121  
8122  	return rc;
8123  }
8124  
8125  /**
8126   * ipr_reset_alert - Alert the adapter of a pending reset
8127   * @ipr_cmd:	ipr command struct
8128   *
8129   * Description: This function alerts the adapter that it will be reset.
8130   * If memory space is not currently enabled, proceed directly
8131   * to running BIST on the adapter. The timer must always be started
8132   * so we guarantee we do not run BIST from ipr_isr.
8133   *
8134   * Return value:
8135   * 	IPR_RC_JOB_RETURN
8136   **/
ipr_reset_alert(struct ipr_cmnd * ipr_cmd)8137  static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8138  {
8139  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8140  	u16 cmd_reg;
8141  	int rc;
8142  
8143  	ENTER;
8144  	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8145  
8146  	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8147  		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
8148  		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
8149  		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8150  	} else {
8151  		ipr_cmd->job_step = ipr_reset_block_config_access;
8152  	}
8153  
8154  	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8155  	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8156  
8157  	LEAVE;
8158  	return IPR_RC_JOB_RETURN;
8159  }
8160  
8161  /**
8162   * ipr_reset_quiesce_done - Complete IOA disconnect
8163   * @ipr_cmd:	ipr command struct
8164   *
8165   * Description: Freeze the adapter to complete quiesce processing
8166   *
8167   * Return value:
8168   * 	IPR_RC_JOB_CONTINUE
8169   **/
ipr_reset_quiesce_done(struct ipr_cmnd * ipr_cmd)8170  static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8171  {
8172  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8173  
8174  	ENTER;
8175  	ipr_cmd->job_step = ipr_ioa_bringdown_done;
8176  	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8177  	LEAVE;
8178  	return IPR_RC_JOB_CONTINUE;
8179  }
8180  
8181  /**
8182   * ipr_reset_cancel_hcam_done - Check for outstanding commands
8183   * @ipr_cmd:	ipr command struct
8184   *
8185   * Description: Ensure nothing is outstanding to the IOA and
8186   *			proceed with IOA disconnect. Otherwise reset the IOA.
8187   *
8188   * Return value:
8189   * 	IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8190   **/
ipr_reset_cancel_hcam_done(struct ipr_cmnd * ipr_cmd)8191  static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8192  {
8193  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8194  	struct ipr_cmnd *loop_cmd;
8195  	struct ipr_hrr_queue *hrrq;
8196  	int rc = IPR_RC_JOB_CONTINUE;
8197  	int count = 0;
8198  
8199  	ENTER;
8200  	ipr_cmd->job_step = ipr_reset_quiesce_done;
8201  
8202  	for_each_hrrq(hrrq, ioa_cfg) {
8203  		spin_lock(&hrrq->_lock);
8204  		list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8205  			count++;
8206  			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8207  			list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8208  			rc = IPR_RC_JOB_RETURN;
8209  			break;
8210  		}
8211  		spin_unlock(&hrrq->_lock);
8212  
8213  		if (count)
8214  			break;
8215  	}
8216  
8217  	LEAVE;
8218  	return rc;
8219  }
8220  
8221  /**
8222   * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8223   * @ipr_cmd:	ipr command struct
8224   *
8225   * Description: Cancel any oustanding HCAMs to the IOA.
8226   *
8227   * Return value:
8228   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8229   **/
ipr_reset_cancel_hcam(struct ipr_cmnd * ipr_cmd)8230  static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8231  {
8232  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8233  	int rc = IPR_RC_JOB_CONTINUE;
8234  	struct ipr_cmd_pkt *cmd_pkt;
8235  	struct ipr_cmnd *hcam_cmd;
8236  	struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
8237  
8238  	ENTER;
8239  	ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
8240  
8241  	if (!hrrq->ioa_is_dead) {
8242  		if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
8243  			list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
8244  				if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
8245  					continue;
8246  
8247  				ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8248  				ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8249  				cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
8250  				cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
8251  				cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
8252  				cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
8253  				cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
8254  				cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
8255  				cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
8256  				cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
8257  				cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
8258  				cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
8259  				cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
8260  				cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
8261  
8262  				ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8263  					   IPR_CANCEL_TIMEOUT);
8264  
8265  				rc = IPR_RC_JOB_RETURN;
8266  				ipr_cmd->job_step = ipr_reset_cancel_hcam;
8267  				break;
8268  			}
8269  		}
8270  	} else
8271  		ipr_cmd->job_step = ipr_reset_alert;
8272  
8273  	LEAVE;
8274  	return rc;
8275  }
8276  
8277  /**
8278   * ipr_reset_ucode_download_done - Microcode download completion
8279   * @ipr_cmd:	ipr command struct
8280   *
8281   * Description: This function unmaps the microcode download buffer.
8282   *
8283   * Return value:
8284   * 	IPR_RC_JOB_CONTINUE
8285   **/
ipr_reset_ucode_download_done(struct ipr_cmnd * ipr_cmd)8286  static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
8287  {
8288  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8289  	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8290  
8291  	dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
8292  		     sglist->num_sg, DMA_TO_DEVICE);
8293  
8294  	ipr_cmd->job_step = ipr_reset_alert;
8295  	return IPR_RC_JOB_CONTINUE;
8296  }
8297  
8298  /**
8299   * ipr_reset_ucode_download - Download microcode to the adapter
8300   * @ipr_cmd:	ipr command struct
8301   *
8302   * Description: This function checks to see if it there is microcode
8303   * to download to the adapter. If there is, a download is performed.
8304   *
8305   * Return value:
8306   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8307   **/
ipr_reset_ucode_download(struct ipr_cmnd * ipr_cmd)8308  static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
8309  {
8310  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8311  	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
8312  
8313  	ENTER;
8314  	ipr_cmd->job_step = ipr_reset_alert;
8315  
8316  	if (!sglist)
8317  		return IPR_RC_JOB_CONTINUE;
8318  
8319  	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8320  	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
8321  	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
8322  	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
8323  	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
8324  	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
8325  	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
8326  
8327  	if (ioa_cfg->sis64)
8328  		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8329  	else
8330  		ipr_build_ucode_ioadl(ipr_cmd, sglist);
8331  	ipr_cmd->job_step = ipr_reset_ucode_download_done;
8332  
8333  	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8334  		   IPR_WRITE_BUFFER_TIMEOUT);
8335  
8336  	LEAVE;
8337  	return IPR_RC_JOB_RETURN;
8338  }
8339  
8340  /**
8341   * ipr_reset_shutdown_ioa - Shutdown the adapter
8342   * @ipr_cmd:	ipr command struct
8343   *
8344   * Description: This function issues an adapter shutdown of the
8345   * specified type to the specified adapter as part of the
8346   * adapter reset job.
8347   *
8348   * Return value:
8349   * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8350   **/
ipr_reset_shutdown_ioa(struct ipr_cmnd * ipr_cmd)8351  static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
8352  {
8353  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8354  	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
8355  	unsigned long timeout;
8356  	int rc = IPR_RC_JOB_CONTINUE;
8357  
8358  	ENTER;
8359  	if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
8360  		ipr_cmd->job_step = ipr_reset_cancel_hcam;
8361  	else if (shutdown_type != IPR_SHUTDOWN_NONE &&
8362  			!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
8363  		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8364  		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8365  		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8366  		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8367  
8368  		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
8369  			timeout = IPR_SHUTDOWN_TIMEOUT;
8370  		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
8371  			timeout = IPR_INTERNAL_TIMEOUT;
8372  		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
8373  			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
8374  		else
8375  			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
8376  
8377  		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8378  
8379  		rc = IPR_RC_JOB_RETURN;
8380  		ipr_cmd->job_step = ipr_reset_ucode_download;
8381  	} else
8382  		ipr_cmd->job_step = ipr_reset_alert;
8383  
8384  	LEAVE;
8385  	return rc;
8386  }
8387  
8388  /**
8389   * ipr_reset_ioa_job - Adapter reset job
8390   * @ipr_cmd:	ipr command struct
8391   *
8392   * Description: This function is the job router for the adapter reset job.
8393   *
8394   * Return value:
8395   * 	none
8396   **/
ipr_reset_ioa_job(struct ipr_cmnd * ipr_cmd)8397  static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
8398  {
8399  	u32 rc, ioasc;
8400  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8401  
8402  	do {
8403  		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
8404  
8405  		if (ioa_cfg->reset_cmd != ipr_cmd) {
8406  			/*
8407  			 * We are doing nested adapter resets and this is
8408  			 * not the current reset job.
8409  			 */
8410  			list_add_tail(&ipr_cmd->queue,
8411  					&ipr_cmd->hrrq->hrrq_free_q);
8412  			return;
8413  		}
8414  
8415  		if (IPR_IOASC_SENSE_KEY(ioasc)) {
8416  			rc = ipr_cmd->job_step_failed(ipr_cmd);
8417  			if (rc == IPR_RC_JOB_RETURN)
8418  				return;
8419  		}
8420  
8421  		ipr_reinit_ipr_cmnd(ipr_cmd);
8422  		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
8423  		rc = ipr_cmd->job_step(ipr_cmd);
8424  	} while (rc == IPR_RC_JOB_CONTINUE);
8425  }
8426  
8427  /**
8428   * _ipr_initiate_ioa_reset - Initiate an adapter reset
8429   * @ioa_cfg:		ioa config struct
8430   * @job_step:		first job step of reset job
8431   * @shutdown_type:	shutdown type
8432   *
8433   * Description: This function will initiate the reset of the given adapter
8434   * starting at the selected job step.
8435   * If the caller needs to wait on the completion of the reset,
8436   * the caller must sleep on the reset_wait_q.
8437   *
8438   * Return value:
8439   * 	none
8440   **/
_ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,int (* job_step)(struct ipr_cmnd *),enum ipr_shutdown_type shutdown_type)8441  static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8442  				    int (*job_step) (struct ipr_cmnd *),
8443  				    enum ipr_shutdown_type shutdown_type)
8444  {
8445  	struct ipr_cmnd *ipr_cmd;
8446  	int i;
8447  
8448  	ioa_cfg->in_reset_reload = 1;
8449  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8450  		spin_lock(&ioa_cfg->hrrq[i]._lock);
8451  		ioa_cfg->hrrq[i].allow_cmds = 0;
8452  		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8453  	}
8454  	wmb();
8455  	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8456  		ioa_cfg->scsi_unblock = 0;
8457  		ioa_cfg->scsi_blocked = 1;
8458  		scsi_block_requests(ioa_cfg->host);
8459  	}
8460  
8461  	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8462  	ioa_cfg->reset_cmd = ipr_cmd;
8463  	ipr_cmd->job_step = job_step;
8464  	ipr_cmd->u.shutdown_type = shutdown_type;
8465  
8466  	ipr_reset_ioa_job(ipr_cmd);
8467  }
8468  
8469  /**
8470   * ipr_initiate_ioa_reset - Initiate an adapter reset
8471   * @ioa_cfg:		ioa config struct
8472   * @shutdown_type:	shutdown type
8473   *
8474   * Description: This function will initiate the reset of the given adapter.
8475   * If the caller needs to wait on the completion of the reset,
8476   * the caller must sleep on the reset_wait_q.
8477   *
8478   * Return value:
8479   * 	none
8480   **/
ipr_initiate_ioa_reset(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)8481  static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
8482  				   enum ipr_shutdown_type shutdown_type)
8483  {
8484  	int i;
8485  
8486  	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
8487  		return;
8488  
8489  	if (ioa_cfg->in_reset_reload) {
8490  		if (ioa_cfg->sdt_state == GET_DUMP)
8491  			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8492  		else if (ioa_cfg->sdt_state == READ_DUMP)
8493  			ioa_cfg->sdt_state = ABORT_DUMP;
8494  	}
8495  
8496  	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
8497  		dev_err(&ioa_cfg->pdev->dev,
8498  			"IOA taken offline - error recovery failed\n");
8499  
8500  		ioa_cfg->reset_retries = 0;
8501  		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8502  			spin_lock(&ioa_cfg->hrrq[i]._lock);
8503  			ioa_cfg->hrrq[i].ioa_is_dead = 1;
8504  			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8505  		}
8506  		wmb();
8507  
8508  		if (ioa_cfg->in_ioa_bringdown) {
8509  			ioa_cfg->reset_cmd = NULL;
8510  			ioa_cfg->in_reset_reload = 0;
8511  			ipr_fail_all_ops(ioa_cfg);
8512  			wake_up_all(&ioa_cfg->reset_wait_q);
8513  
8514  			if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
8515  				ioa_cfg->scsi_unblock = 1;
8516  				schedule_work(&ioa_cfg->work_q);
8517  			}
8518  			return;
8519  		} else {
8520  			ioa_cfg->in_ioa_bringdown = 1;
8521  			shutdown_type = IPR_SHUTDOWN_NONE;
8522  		}
8523  	}
8524  
8525  	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8526  				shutdown_type);
8527  }
8528  
8529  /**
8530   * ipr_reset_freeze - Hold off all I/O activity
8531   * @ipr_cmd:	ipr command struct
8532   *
8533   * Description: If the PCI slot is frozen, hold off all I/O
8534   * activity; then, as soon as the slot is available again,
8535   * initiate an adapter reset.
8536   */
ipr_reset_freeze(struct ipr_cmnd * ipr_cmd)8537  static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
8538  {
8539  	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8540  	int i;
8541  
8542  	/* Disallow new interrupts, avoid loop */
8543  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8544  		spin_lock(&ioa_cfg->hrrq[i]._lock);
8545  		ioa_cfg->hrrq[i].allow_interrupts = 0;
8546  		spin_unlock(&ioa_cfg->hrrq[i]._lock);
8547  	}
8548  	wmb();
8549  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
8550  	ipr_cmd->done = ipr_reset_ioa_job;
8551  	return IPR_RC_JOB_RETURN;
8552  }
8553  
8554  /**
8555   * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
8556   * @pdev:	PCI device struct
8557   *
8558   * Description: This routine is called to tell us that the MMIO
8559   * access to the IOA has been restored
8560   */
ipr_pci_mmio_enabled(struct pci_dev * pdev)8561  static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
8562  {
8563  	unsigned long flags = 0;
8564  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8565  
8566  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8567  	if (!ioa_cfg->probe_done)
8568  		pci_save_state(pdev);
8569  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8570  	return PCI_ERS_RESULT_NEED_RESET;
8571  }
8572  
8573  /**
8574   * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8575   * @pdev:	PCI device struct
8576   *
8577   * Description: This routine is called to tell us that the PCI bus
8578   * is down. Can't do anything here, except put the device driver
8579   * into a holding pattern, waiting for the PCI bus to come back.
8580   */
ipr_pci_frozen(struct pci_dev * pdev)8581  static void ipr_pci_frozen(struct pci_dev *pdev)
8582  {
8583  	unsigned long flags = 0;
8584  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8585  
8586  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8587  	if (ioa_cfg->probe_done)
8588  		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8589  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8590  }
8591  
8592  /**
8593   * ipr_pci_slot_reset - Called when PCI slot has been reset.
8594   * @pdev:	PCI device struct
8595   *
8596   * Description: This routine is called by the pci error recovery
8597   * code after the PCI slot has been reset, just before we
8598   * should resume normal operations.
8599   */
ipr_pci_slot_reset(struct pci_dev * pdev)8600  static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8601  {
8602  	unsigned long flags = 0;
8603  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8604  
8605  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8606  	if (ioa_cfg->probe_done) {
8607  		if (ioa_cfg->needs_warm_reset)
8608  			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8609  		else
8610  			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8611  						IPR_SHUTDOWN_NONE);
8612  	} else
8613  		wake_up_all(&ioa_cfg->eeh_wait_q);
8614  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8615  	return PCI_ERS_RESULT_RECOVERED;
8616  }
8617  
8618  /**
8619   * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8620   * @pdev:	PCI device struct
8621   *
8622   * Description: This routine is called when the PCI bus has
8623   * permanently failed.
8624   */
ipr_pci_perm_failure(struct pci_dev * pdev)8625  static void ipr_pci_perm_failure(struct pci_dev *pdev)
8626  {
8627  	unsigned long flags = 0;
8628  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8629  	int i;
8630  
8631  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8632  	if (ioa_cfg->probe_done) {
8633  		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8634  			ioa_cfg->sdt_state = ABORT_DUMP;
8635  		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
8636  		ioa_cfg->in_ioa_bringdown = 1;
8637  		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8638  			spin_lock(&ioa_cfg->hrrq[i]._lock);
8639  			ioa_cfg->hrrq[i].allow_cmds = 0;
8640  			spin_unlock(&ioa_cfg->hrrq[i]._lock);
8641  		}
8642  		wmb();
8643  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8644  	} else
8645  		wake_up_all(&ioa_cfg->eeh_wait_q);
8646  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8647  }
8648  
8649  /**
8650   * ipr_pci_error_detected - Called when a PCI error is detected.
8651   * @pdev:	PCI device struct
8652   * @state:	PCI channel state
8653   *
8654   * Description: Called when a PCI error is detected.
8655   *
8656   * Return value:
8657   * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8658   */
ipr_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t state)8659  static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8660  					       pci_channel_state_t state)
8661  {
8662  	switch (state) {
8663  	case pci_channel_io_frozen:
8664  		ipr_pci_frozen(pdev);
8665  		return PCI_ERS_RESULT_CAN_RECOVER;
8666  	case pci_channel_io_perm_failure:
8667  		ipr_pci_perm_failure(pdev);
8668  		return PCI_ERS_RESULT_DISCONNECT;
8669  	default:
8670  		break;
8671  	}
8672  	return PCI_ERS_RESULT_NEED_RESET;
8673  }
8674  
8675  /**
8676   * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8677   * @ioa_cfg:	ioa cfg struct
8678   *
8679   * Description: This is the second phase of adapter initialization
8680   * This function takes care of initilizing the adapter to the point
8681   * where it can accept new commands.
8682   * Return value:
8683   *     none
8684   **/
ipr_probe_ioa_part2(struct ipr_ioa_cfg * ioa_cfg)8685  static void ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8686  {
8687  	unsigned long host_lock_flags = 0;
8688  
8689  	ENTER;
8690  	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8691  	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8692  	ioa_cfg->probe_done = 1;
8693  	if (ioa_cfg->needs_hard_reset) {
8694  		ioa_cfg->needs_hard_reset = 0;
8695  		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8696  	} else
8697  		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8698  					IPR_SHUTDOWN_NONE);
8699  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8700  
8701  	LEAVE;
8702  }
8703  
8704  /**
8705   * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8706   * @ioa_cfg:	ioa config struct
8707   *
8708   * Return value:
8709   * 	none
8710   **/
ipr_free_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)8711  static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8712  {
8713  	int i;
8714  
8715  	if (ioa_cfg->ipr_cmnd_list) {
8716  		for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8717  			if (ioa_cfg->ipr_cmnd_list[i])
8718  				dma_pool_free(ioa_cfg->ipr_cmd_pool,
8719  					      ioa_cfg->ipr_cmnd_list[i],
8720  					      ioa_cfg->ipr_cmnd_list_dma[i]);
8721  
8722  			ioa_cfg->ipr_cmnd_list[i] = NULL;
8723  		}
8724  	}
8725  
8726  	dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
8727  
8728  	kfree(ioa_cfg->ipr_cmnd_list);
8729  	kfree(ioa_cfg->ipr_cmnd_list_dma);
8730  	ioa_cfg->ipr_cmnd_list = NULL;
8731  	ioa_cfg->ipr_cmnd_list_dma = NULL;
8732  	ioa_cfg->ipr_cmd_pool = NULL;
8733  }
8734  
8735  /**
8736   * ipr_free_mem - Frees memory allocated for an adapter
8737   * @ioa_cfg:	ioa cfg struct
8738   *
8739   * Return value:
8740   * 	nothing
8741   **/
ipr_free_mem(struct ipr_ioa_cfg * ioa_cfg)8742  static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8743  {
8744  	int i;
8745  
8746  	kfree(ioa_cfg->res_entries);
8747  	dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
8748  			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8749  	ipr_free_cmd_blks(ioa_cfg);
8750  
8751  	for (i = 0; i < ioa_cfg->hrrq_num; i++)
8752  		dma_free_coherent(&ioa_cfg->pdev->dev,
8753  				  sizeof(u32) * ioa_cfg->hrrq[i].size,
8754  				  ioa_cfg->hrrq[i].host_rrq,
8755  				  ioa_cfg->hrrq[i].host_rrq_dma);
8756  
8757  	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
8758  			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
8759  
8760  	for (i = 0; i < IPR_MAX_HCAMS; i++) {
8761  		dma_free_coherent(&ioa_cfg->pdev->dev,
8762  				  sizeof(struct ipr_hostrcb),
8763  				  ioa_cfg->hostrcb[i],
8764  				  ioa_cfg->hostrcb_dma[i]);
8765  	}
8766  
8767  	ipr_free_dump(ioa_cfg);
8768  	kfree(ioa_cfg->trace);
8769  }
8770  
8771  /**
8772   * ipr_free_irqs - Free all allocated IRQs for the adapter.
8773   * @ioa_cfg:	ipr cfg struct
8774   *
8775   * This function frees all allocated IRQs for the
8776   * specified adapter.
8777   *
8778   * Return value:
8779   * 	none
8780   **/
ipr_free_irqs(struct ipr_ioa_cfg * ioa_cfg)8781  static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
8782  {
8783  	struct pci_dev *pdev = ioa_cfg->pdev;
8784  	int i;
8785  
8786  	for (i = 0; i < ioa_cfg->nvectors; i++)
8787  		free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
8788  	pci_free_irq_vectors(pdev);
8789  }
8790  
8791  /**
8792   * ipr_free_all_resources - Free all allocated resources for an adapter.
8793   * @ioa_cfg:	ioa config struct
8794   *
8795   * This function frees all allocated resources for the
8796   * specified adapter.
8797   *
8798   * Return value:
8799   * 	none
8800   **/
ipr_free_all_resources(struct ipr_ioa_cfg * ioa_cfg)8801  static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8802  {
8803  	struct pci_dev *pdev = ioa_cfg->pdev;
8804  
8805  	ENTER;
8806  	ipr_free_irqs(ioa_cfg);
8807  	if (ioa_cfg->reset_work_q)
8808  		destroy_workqueue(ioa_cfg->reset_work_q);
8809  	iounmap(ioa_cfg->hdw_dma_regs);
8810  	pci_release_regions(pdev);
8811  	ipr_free_mem(ioa_cfg);
8812  	scsi_host_put(ioa_cfg->host);
8813  	pci_disable_device(pdev);
8814  	LEAVE;
8815  }
8816  
8817  /**
8818   * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8819   * @ioa_cfg:	ioa config struct
8820   *
8821   * Return value:
8822   * 	0 on success / -ENOMEM on allocation failure
8823   **/
ipr_alloc_cmd_blks(struct ipr_ioa_cfg * ioa_cfg)8824  static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8825  {
8826  	struct ipr_cmnd *ipr_cmd;
8827  	struct ipr_ioarcb *ioarcb;
8828  	dma_addr_t dma_addr;
8829  	int i, entries_each_hrrq, hrrq_id = 0;
8830  
8831  	ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
8832  						sizeof(struct ipr_cmnd), 512, 0);
8833  
8834  	if (!ioa_cfg->ipr_cmd_pool)
8835  		return -ENOMEM;
8836  
8837  	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
8838  	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
8839  
8840  	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
8841  		ipr_free_cmd_blks(ioa_cfg);
8842  		return -ENOMEM;
8843  	}
8844  
8845  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8846  		if (ioa_cfg->hrrq_num > 1) {
8847  			if (i == 0) {
8848  				entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
8849  				ioa_cfg->hrrq[i].min_cmd_id = 0;
8850  				ioa_cfg->hrrq[i].max_cmd_id =
8851  					(entries_each_hrrq - 1);
8852  			} else {
8853  				entries_each_hrrq =
8854  					IPR_NUM_BASE_CMD_BLKS/
8855  					(ioa_cfg->hrrq_num - 1);
8856  				ioa_cfg->hrrq[i].min_cmd_id =
8857  					IPR_NUM_INTERNAL_CMD_BLKS +
8858  					(i - 1) * entries_each_hrrq;
8859  				ioa_cfg->hrrq[i].max_cmd_id =
8860  					(IPR_NUM_INTERNAL_CMD_BLKS +
8861  					i * entries_each_hrrq - 1);
8862  			}
8863  		} else {
8864  			entries_each_hrrq = IPR_NUM_CMD_BLKS;
8865  			ioa_cfg->hrrq[i].min_cmd_id = 0;
8866  			ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
8867  		}
8868  		ioa_cfg->hrrq[i].size = entries_each_hrrq;
8869  	}
8870  
8871  	BUG_ON(ioa_cfg->hrrq_num == 0);
8872  
8873  	i = IPR_NUM_CMD_BLKS -
8874  		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
8875  	if (i > 0) {
8876  		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
8877  		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
8878  	}
8879  
8880  	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8881  		ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
8882  				GFP_KERNEL, &dma_addr);
8883  
8884  		if (!ipr_cmd) {
8885  			ipr_free_cmd_blks(ioa_cfg);
8886  			return -ENOMEM;
8887  		}
8888  
8889  		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8890  		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8891  
8892  		ioarcb = &ipr_cmd->ioarcb;
8893  		ipr_cmd->dma_addr = dma_addr;
8894  		if (ioa_cfg->sis64)
8895  			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8896  		else
8897  			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8898  
8899  		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8900  		if (ioa_cfg->sis64) {
8901  			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8902  				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8903  			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8904  				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8905  		} else {
8906  			ioarcb->write_ioadl_addr =
8907  				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8908  			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8909  			ioarcb->ioasa_host_pci_addr =
8910  				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8911  		}
8912  		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8913  		ipr_cmd->cmd_index = i;
8914  		ipr_cmd->ioa_cfg = ioa_cfg;
8915  		ipr_cmd->sense_buffer_dma = dma_addr +
8916  			offsetof(struct ipr_cmnd, sense_buffer);
8917  
8918  		ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
8919  		ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
8920  		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8921  		if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
8922  			hrrq_id++;
8923  	}
8924  
8925  	return 0;
8926  }
8927  
8928  /**
8929   * ipr_alloc_mem - Allocate memory for an adapter
8930   * @ioa_cfg:	ioa config struct
8931   *
8932   * Return value:
8933   * 	0 on success / non-zero for error
8934   **/
ipr_alloc_mem(struct ipr_ioa_cfg * ioa_cfg)8935  static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8936  {
8937  	struct pci_dev *pdev = ioa_cfg->pdev;
8938  	int i, rc = -ENOMEM;
8939  
8940  	ENTER;
8941  	ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
8942  				       sizeof(struct ipr_resource_entry),
8943  				       GFP_KERNEL);
8944  
8945  	if (!ioa_cfg->res_entries)
8946  		goto out;
8947  
8948  	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8949  		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8950  		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8951  	}
8952  
8953  	ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
8954  					      sizeof(struct ipr_misc_cbs),
8955  					      &ioa_cfg->vpd_cbs_dma,
8956  					      GFP_KERNEL);
8957  
8958  	if (!ioa_cfg->vpd_cbs)
8959  		goto out_free_res_entries;
8960  
8961  	if (ipr_alloc_cmd_blks(ioa_cfg))
8962  		goto out_free_vpd_cbs;
8963  
8964  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8965  		ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
8966  					sizeof(u32) * ioa_cfg->hrrq[i].size,
8967  					&ioa_cfg->hrrq[i].host_rrq_dma,
8968  					GFP_KERNEL);
8969  
8970  		if (!ioa_cfg->hrrq[i].host_rrq)  {
8971  			while (--i >= 0)
8972  				dma_free_coherent(&pdev->dev,
8973  					sizeof(u32) * ioa_cfg->hrrq[i].size,
8974  					ioa_cfg->hrrq[i].host_rrq,
8975  					ioa_cfg->hrrq[i].host_rrq_dma);
8976  			goto out_ipr_free_cmd_blocks;
8977  		}
8978  		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
8979  	}
8980  
8981  	ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
8982  						  ioa_cfg->cfg_table_size,
8983  						  &ioa_cfg->cfg_table_dma,
8984  						  GFP_KERNEL);
8985  
8986  	if (!ioa_cfg->u.cfg_table)
8987  		goto out_free_host_rrq;
8988  
8989  	for (i = 0; i < IPR_MAX_HCAMS; i++) {
8990  		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
8991  							 sizeof(struct ipr_hostrcb),
8992  							 &ioa_cfg->hostrcb_dma[i],
8993  							 GFP_KERNEL);
8994  
8995  		if (!ioa_cfg->hostrcb[i])
8996  			goto out_free_hostrcb_dma;
8997  
8998  		ioa_cfg->hostrcb[i]->hostrcb_dma =
8999  			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
9000  		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
9001  		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9002  	}
9003  
9004  	ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9005  				 sizeof(struct ipr_trace_entry),
9006  				 GFP_KERNEL);
9007  
9008  	if (!ioa_cfg->trace)
9009  		goto out_free_hostrcb_dma;
9010  
9011  	rc = 0;
9012  out:
9013  	LEAVE;
9014  	return rc;
9015  
9016  out_free_hostrcb_dma:
9017  	while (i-- > 0) {
9018  		dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9019  				  ioa_cfg->hostrcb[i],
9020  				  ioa_cfg->hostrcb_dma[i]);
9021  	}
9022  	dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9023  			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
9024  out_free_host_rrq:
9025  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9026  		dma_free_coherent(&pdev->dev,
9027  				  sizeof(u32) * ioa_cfg->hrrq[i].size,
9028  				  ioa_cfg->hrrq[i].host_rrq,
9029  				  ioa_cfg->hrrq[i].host_rrq_dma);
9030  	}
9031  out_ipr_free_cmd_blocks:
9032  	ipr_free_cmd_blks(ioa_cfg);
9033  out_free_vpd_cbs:
9034  	dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9035  			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
9036  out_free_res_entries:
9037  	kfree(ioa_cfg->res_entries);
9038  	goto out;
9039  }
9040  
9041  /**
9042   * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9043   * @ioa_cfg:	ioa config struct
9044   *
9045   * Return value:
9046   * 	none
9047   **/
ipr_initialize_bus_attr(struct ipr_ioa_cfg * ioa_cfg)9048  static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
9049  {
9050  	int i;
9051  
9052  	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9053  		ioa_cfg->bus_attr[i].bus = i;
9054  		ioa_cfg->bus_attr[i].qas_enabled = 0;
9055  		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9056  		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9057  			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9058  		else
9059  			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9060  	}
9061  }
9062  
9063  /**
9064   * ipr_init_regs - Initialize IOA registers
9065   * @ioa_cfg:	ioa config struct
9066   *
9067   * Return value:
9068   *	none
9069   **/
ipr_init_regs(struct ipr_ioa_cfg * ioa_cfg)9070  static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
9071  {
9072  	const struct ipr_interrupt_offsets *p;
9073  	struct ipr_interrupts *t;
9074  	void __iomem *base;
9075  
9076  	p = &ioa_cfg->chip_cfg->regs;
9077  	t = &ioa_cfg->regs;
9078  	base = ioa_cfg->hdw_dma_regs;
9079  
9080  	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9081  	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
9082  	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
9083  	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
9084  	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
9085  	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
9086  	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
9087  	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
9088  	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
9089  	t->ioarrin_reg = base + p->ioarrin_reg;
9090  	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
9091  	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
9092  	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
9093  	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
9094  	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
9095  	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
9096  
9097  	if (ioa_cfg->sis64) {
9098  		t->init_feedback_reg = base + p->init_feedback_reg;
9099  		t->dump_addr_reg = base + p->dump_addr_reg;
9100  		t->dump_data_reg = base + p->dump_data_reg;
9101  		t->endian_swap_reg = base + p->endian_swap_reg;
9102  	}
9103  }
9104  
9105  /**
9106   * ipr_init_ioa_cfg - Initialize IOA config struct
9107   * @ioa_cfg:	ioa config struct
9108   * @host:		scsi host struct
9109   * @pdev:		PCI dev struct
9110   *
9111   * Return value:
9112   * 	none
9113   **/
ipr_init_ioa_cfg(struct ipr_ioa_cfg * ioa_cfg,struct Scsi_Host * host,struct pci_dev * pdev)9114  static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9115  			     struct Scsi_Host *host, struct pci_dev *pdev)
9116  {
9117  	int i;
9118  
9119  	ioa_cfg->host = host;
9120  	ioa_cfg->pdev = pdev;
9121  	ioa_cfg->log_level = ipr_log_level;
9122  	ioa_cfg->doorbell = IPR_DOORBELL;
9123  	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9124  	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9125  	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9126  	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9127  	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9128  	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9129  
9130  	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9131  	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
9132  	INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
9133  	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9134  	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9135  	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9136  	INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9137  	init_waitqueue_head(&ioa_cfg->reset_wait_q);
9138  	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9139  	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9140  	ioa_cfg->sdt_state = INACTIVE;
9141  
9142  	ipr_initialize_bus_attr(ioa_cfg);
9143  	ioa_cfg->max_devs_supported = ipr_max_devs;
9144  
9145  	if (ioa_cfg->sis64) {
9146  		host->max_channel = IPR_MAX_SIS64_BUSES;
9147  		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9148  		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9149  		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9150  			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9151  		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9152  					   + ((sizeof(struct ipr_config_table_entry64)
9153  					       * ioa_cfg->max_devs_supported)));
9154  	} else {
9155  		host->max_channel = IPR_VSET_BUS;
9156  		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9157  		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9158  		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9159  			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9160  		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9161  					   + ((sizeof(struct ipr_config_table_entry)
9162  					       * ioa_cfg->max_devs_supported)));
9163  	}
9164  
9165  	host->unique_id = host->host_no;
9166  	host->max_cmd_len = IPR_MAX_CDB_LEN;
9167  	host->can_queue = ioa_cfg->max_cmds;
9168  	pci_set_drvdata(pdev, ioa_cfg);
9169  
9170  	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9171  		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9172  		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9173  		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9174  		if (i == 0)
9175  			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9176  		else
9177  			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9178  	}
9179  }
9180  
9181  /**
9182   * ipr_get_chip_info - Find adapter chip information
9183   * @dev_id:		PCI device id struct
9184   *
9185   * Return value:
9186   * 	ptr to chip information on success / NULL on failure
9187   **/
9188  static const struct ipr_chip_t *
ipr_get_chip_info(const struct pci_device_id * dev_id)9189  ipr_get_chip_info(const struct pci_device_id *dev_id)
9190  {
9191  	int i;
9192  
9193  	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9194  		if (ipr_chip[i].vendor == dev_id->vendor &&
9195  		    ipr_chip[i].device == dev_id->device)
9196  			return &ipr_chip[i];
9197  	return NULL;
9198  }
9199  
9200  /**
9201   * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9202   *						during probe time
9203   * @ioa_cfg:	ioa config struct
9204   *
9205   * Return value:
9206   * 	None
9207   **/
ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg * ioa_cfg)9208  static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9209  {
9210  	struct pci_dev *pdev = ioa_cfg->pdev;
9211  
9212  	if (pci_channel_offline(pdev)) {
9213  		wait_event_timeout(ioa_cfg->eeh_wait_q,
9214  				   !pci_channel_offline(pdev),
9215  				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9216  		pci_restore_state(pdev);
9217  	}
9218  }
9219  
name_msi_vectors(struct ipr_ioa_cfg * ioa_cfg)9220  static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9221  {
9222  	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9223  
9224  	for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9225  		snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9226  			 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9227  		ioa_cfg->vectors_info[vec_idx].
9228  			desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9229  	}
9230  }
9231  
ipr_request_other_msi_irqs(struct ipr_ioa_cfg * ioa_cfg,struct pci_dev * pdev)9232  static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
9233  		struct pci_dev *pdev)
9234  {
9235  	int i, rc;
9236  
9237  	for (i = 1; i < ioa_cfg->nvectors; i++) {
9238  		rc = request_irq(pci_irq_vector(pdev, i),
9239  			ipr_isr_mhrrq,
9240  			0,
9241  			ioa_cfg->vectors_info[i].desc,
9242  			&ioa_cfg->hrrq[i]);
9243  		if (rc) {
9244  			while (--i > 0)
9245  				free_irq(pci_irq_vector(pdev, i),
9246  					&ioa_cfg->hrrq[i]);
9247  			return rc;
9248  		}
9249  	}
9250  	return 0;
9251  }
9252  
9253  /**
9254   * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
9255   * @devp:		PCI device struct
9256   * @irq:		IRQ number
9257   *
9258   * Description: Simply set the msi_received flag to 1 indicating that
9259   * Message Signaled Interrupts are supported.
9260   *
9261   * Return value:
9262   * 	0 on success / non-zero on failure
9263   **/
ipr_test_intr(int irq,void * devp)9264  static irqreturn_t ipr_test_intr(int irq, void *devp)
9265  {
9266  	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
9267  	unsigned long lock_flags = 0;
9268  
9269  	dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
9270  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9271  
9272  	ioa_cfg->msi_received = 1;
9273  	wake_up(&ioa_cfg->msi_wait_q);
9274  
9275  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9276  	return IRQ_HANDLED;
9277  }
9278  
9279  /**
9280   * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
9281   * @ioa_cfg:		ioa config struct
9282   * @pdev:		PCI device struct
9283   *
9284   * Description: This routine sets up and initiates a test interrupt to determine
9285   * if the interrupt is received via the ipr_test_intr() service routine.
9286   * If the tests fails, the driver will fall back to LSI.
9287   *
9288   * Return value:
9289   * 	0 on success / non-zero on failure
9290   **/
ipr_test_msi(struct ipr_ioa_cfg * ioa_cfg,struct pci_dev * pdev)9291  static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
9292  {
9293  	int rc;
9294  	unsigned long lock_flags = 0;
9295  	int irq = pci_irq_vector(pdev, 0);
9296  
9297  	ENTER;
9298  
9299  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9300  	init_waitqueue_head(&ioa_cfg->msi_wait_q);
9301  	ioa_cfg->msi_received = 0;
9302  	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9303  	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
9304  	readl(ioa_cfg->regs.sense_interrupt_mask_reg);
9305  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9306  
9307  	rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
9308  	if (rc) {
9309  		dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
9310  		return rc;
9311  	} else if (ipr_debug)
9312  		dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
9313  
9314  	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
9315  	readl(ioa_cfg->regs.sense_interrupt_reg);
9316  	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
9317  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9318  	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9319  
9320  	if (!ioa_cfg->msi_received) {
9321  		/* MSI test failed */
9322  		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
9323  		rc = -EOPNOTSUPP;
9324  	} else if (ipr_debug)
9325  		dev_info(&pdev->dev, "MSI test succeeded.\n");
9326  
9327  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9328  
9329  	free_irq(irq, ioa_cfg);
9330  
9331  	LEAVE;
9332  
9333  	return rc;
9334  }
9335  
9336   /* ipr_probe_ioa - Allocates memory and does first stage of initialization
9337   * @pdev:		PCI device struct
9338   * @dev_id:		PCI device id struct
9339   *
9340   * Return value:
9341   * 	0 on success / non-zero on failure
9342   **/
ipr_probe_ioa(struct pci_dev * pdev,const struct pci_device_id * dev_id)9343  static int ipr_probe_ioa(struct pci_dev *pdev,
9344  			 const struct pci_device_id *dev_id)
9345  {
9346  	struct ipr_ioa_cfg *ioa_cfg;
9347  	struct Scsi_Host *host;
9348  	unsigned long ipr_regs_pci;
9349  	void __iomem *ipr_regs;
9350  	int rc = PCIBIOS_SUCCESSFUL;
9351  	volatile u32 mask, uproc, interrupts;
9352  	unsigned long lock_flags, driver_lock_flags;
9353  	unsigned int irq_flag;
9354  
9355  	ENTER;
9356  
9357  	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
9358  	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
9359  
9360  	if (!host) {
9361  		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
9362  		rc = -ENOMEM;
9363  		goto out;
9364  	}
9365  
9366  	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
9367  	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
9368  
9369  	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
9370  
9371  	if (!ioa_cfg->ipr_chip) {
9372  		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
9373  			dev_id->vendor, dev_id->device);
9374  		goto out_scsi_host_put;
9375  	}
9376  
9377  	/* set SIS 32 or SIS 64 */
9378  	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
9379  	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
9380  	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
9381  	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
9382  
9383  	if (ipr_transop_timeout)
9384  		ioa_cfg->transop_timeout = ipr_transop_timeout;
9385  	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
9386  		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
9387  	else
9388  		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
9389  
9390  	ioa_cfg->revid = pdev->revision;
9391  
9392  	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
9393  
9394  	ipr_regs_pci = pci_resource_start(pdev, 0);
9395  
9396  	rc = pci_request_regions(pdev, IPR_NAME);
9397  	if (rc < 0) {
9398  		dev_err(&pdev->dev,
9399  			"Couldn't register memory range of registers\n");
9400  		goto out_scsi_host_put;
9401  	}
9402  
9403  	rc = pci_enable_device(pdev);
9404  
9405  	if (rc || pci_channel_offline(pdev)) {
9406  		if (pci_channel_offline(pdev)) {
9407  			ipr_wait_for_pci_err_recovery(ioa_cfg);
9408  			rc = pci_enable_device(pdev);
9409  		}
9410  
9411  		if (rc) {
9412  			dev_err(&pdev->dev, "Cannot enable adapter\n");
9413  			ipr_wait_for_pci_err_recovery(ioa_cfg);
9414  			goto out_release_regions;
9415  		}
9416  	}
9417  
9418  	ipr_regs = pci_ioremap_bar(pdev, 0);
9419  
9420  	if (!ipr_regs) {
9421  		dev_err(&pdev->dev,
9422  			"Couldn't map memory range of registers\n");
9423  		rc = -ENOMEM;
9424  		goto out_disable;
9425  	}
9426  
9427  	ioa_cfg->hdw_dma_regs = ipr_regs;
9428  	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
9429  	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
9430  
9431  	ipr_init_regs(ioa_cfg);
9432  
9433  	if (ioa_cfg->sis64) {
9434  		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
9435  		if (rc < 0) {
9436  			dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
9437  			rc = dma_set_mask_and_coherent(&pdev->dev,
9438  						       DMA_BIT_MASK(32));
9439  		}
9440  	} else
9441  		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
9442  
9443  	if (rc < 0) {
9444  		dev_err(&pdev->dev, "Failed to set DMA mask\n");
9445  		goto cleanup_nomem;
9446  	}
9447  
9448  	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
9449  				   ioa_cfg->chip_cfg->cache_line_size);
9450  
9451  	if (rc != PCIBIOS_SUCCESSFUL) {
9452  		dev_err(&pdev->dev, "Write of cache line size failed\n");
9453  		ipr_wait_for_pci_err_recovery(ioa_cfg);
9454  		rc = -EIO;
9455  		goto cleanup_nomem;
9456  	}
9457  
9458  	/* Issue MMIO read to ensure card is not in EEH */
9459  	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
9460  	ipr_wait_for_pci_err_recovery(ioa_cfg);
9461  
9462  	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
9463  		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
9464  			IPR_MAX_MSIX_VECTORS);
9465  		ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
9466  	}
9467  
9468  	irq_flag = PCI_IRQ_INTX;
9469  	if (ioa_cfg->ipr_chip->has_msi)
9470  		irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
9471  	rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
9472  	if (rc < 0) {
9473  		ipr_wait_for_pci_err_recovery(ioa_cfg);
9474  		goto cleanup_nomem;
9475  	}
9476  	ioa_cfg->nvectors = rc;
9477  
9478  	if (!pdev->msi_enabled && !pdev->msix_enabled)
9479  		ioa_cfg->clear_isr = 1;
9480  
9481  	pci_set_master(pdev);
9482  
9483  	if (pci_channel_offline(pdev)) {
9484  		ipr_wait_for_pci_err_recovery(ioa_cfg);
9485  		pci_set_master(pdev);
9486  		if (pci_channel_offline(pdev)) {
9487  			rc = -EIO;
9488  			goto out_msi_disable;
9489  		}
9490  	}
9491  
9492  	if (pdev->msi_enabled || pdev->msix_enabled) {
9493  		rc = ipr_test_msi(ioa_cfg, pdev);
9494  		switch (rc) {
9495  		case 0:
9496  			dev_info(&pdev->dev,
9497  				"Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
9498  				pdev->msix_enabled ? "-X" : "");
9499  			break;
9500  		case -EOPNOTSUPP:
9501  			ipr_wait_for_pci_err_recovery(ioa_cfg);
9502  			pci_free_irq_vectors(pdev);
9503  
9504  			ioa_cfg->nvectors = 1;
9505  			ioa_cfg->clear_isr = 1;
9506  			break;
9507  		default:
9508  			goto out_msi_disable;
9509  		}
9510  	}
9511  
9512  	ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
9513  				(unsigned int)num_online_cpus(),
9514  				(unsigned int)IPR_MAX_HRRQ_NUM);
9515  
9516  	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
9517  		goto out_msi_disable;
9518  
9519  	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
9520  		goto out_msi_disable;
9521  
9522  	rc = ipr_alloc_mem(ioa_cfg);
9523  	if (rc < 0) {
9524  		dev_err(&pdev->dev,
9525  			"Couldn't allocate enough memory for device driver!\n");
9526  		goto out_msi_disable;
9527  	}
9528  
9529  	/* Save away PCI config space for use following IOA reset */
9530  	rc = pci_save_state(pdev);
9531  
9532  	if (rc != PCIBIOS_SUCCESSFUL) {
9533  		dev_err(&pdev->dev, "Failed to save PCI config space\n");
9534  		rc = -EIO;
9535  		goto cleanup_nolog;
9536  	}
9537  
9538  	/*
9539  	 * If HRRQ updated interrupt is not masked, or reset alert is set,
9540  	 * the card is in an unknown state and needs a hard reset
9541  	 */
9542  	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
9543  	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
9544  	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
9545  	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
9546  		ioa_cfg->needs_hard_reset = 1;
9547  	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
9548  		ioa_cfg->needs_hard_reset = 1;
9549  	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
9550  		ioa_cfg->ioa_unit_checked = 1;
9551  
9552  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9553  	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
9554  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9555  
9556  	if (pdev->msi_enabled || pdev->msix_enabled) {
9557  		name_msi_vectors(ioa_cfg);
9558  		rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
9559  			ioa_cfg->vectors_info[0].desc,
9560  			&ioa_cfg->hrrq[0]);
9561  		if (!rc)
9562  			rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
9563  	} else {
9564  		rc = request_irq(pdev->irq, ipr_isr,
9565  			 IRQF_SHARED,
9566  			 IPR_NAME, &ioa_cfg->hrrq[0]);
9567  	}
9568  	if (rc) {
9569  		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
9570  			pdev->irq, rc);
9571  		goto cleanup_nolog;
9572  	}
9573  
9574  	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
9575  	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
9576  		ioa_cfg->needs_warm_reset = 1;
9577  		ioa_cfg->reset = ipr_reset_slot_reset;
9578  
9579  		ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
9580  								WQ_MEM_RECLAIM, host->host_no);
9581  
9582  		if (!ioa_cfg->reset_work_q) {
9583  			dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
9584  			rc = -ENOMEM;
9585  			goto out_free_irq;
9586  		}
9587  	} else
9588  		ioa_cfg->reset = ipr_reset_start_bist;
9589  
9590  	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9591  	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
9592  	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9593  
9594  	LEAVE;
9595  out:
9596  	return rc;
9597  
9598  out_free_irq:
9599  	ipr_free_irqs(ioa_cfg);
9600  cleanup_nolog:
9601  	ipr_free_mem(ioa_cfg);
9602  out_msi_disable:
9603  	ipr_wait_for_pci_err_recovery(ioa_cfg);
9604  	pci_free_irq_vectors(pdev);
9605  cleanup_nomem:
9606  	iounmap(ipr_regs);
9607  out_disable:
9608  	pci_disable_device(pdev);
9609  out_release_regions:
9610  	pci_release_regions(pdev);
9611  out_scsi_host_put:
9612  	scsi_host_put(host);
9613  	goto out;
9614  }
9615  
9616  /**
9617   * ipr_initiate_ioa_bringdown - Bring down an adapter
9618   * @ioa_cfg:		ioa config struct
9619   * @shutdown_type:	shutdown type
9620   *
9621   * Description: This function will initiate bringing down the adapter.
9622   * This consists of issuing an IOA shutdown to the adapter
9623   * to flush the cache, and running BIST.
9624   * If the caller needs to wait on the completion of the reset,
9625   * the caller must sleep on the reset_wait_q.
9626   *
9627   * Return value:
9628   * 	none
9629   **/
ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg * ioa_cfg,enum ipr_shutdown_type shutdown_type)9630  static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
9631  				       enum ipr_shutdown_type shutdown_type)
9632  {
9633  	ENTER;
9634  	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9635  		ioa_cfg->sdt_state = ABORT_DUMP;
9636  	ioa_cfg->reset_retries = 0;
9637  	ioa_cfg->in_ioa_bringdown = 1;
9638  	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9639  	LEAVE;
9640  }
9641  
9642  /**
9643   * __ipr_remove - Remove a single adapter
9644   * @pdev:	pci device struct
9645   *
9646   * Adapter hot plug remove entry point.
9647   *
9648   * Return value:
9649   * 	none
9650   **/
__ipr_remove(struct pci_dev * pdev)9651  static void __ipr_remove(struct pci_dev *pdev)
9652  {
9653  	unsigned long host_lock_flags = 0;
9654  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9655  	int i;
9656  	unsigned long driver_lock_flags;
9657  	ENTER;
9658  
9659  	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9660  	while (ioa_cfg->in_reset_reload) {
9661  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9662  		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9663  		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9664  	}
9665  
9666  	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9667  		spin_lock(&ioa_cfg->hrrq[i]._lock);
9668  		ioa_cfg->hrrq[i].removing_ioa = 1;
9669  		spin_unlock(&ioa_cfg->hrrq[i]._lock);
9670  	}
9671  	wmb();
9672  	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9673  
9674  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9675  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9676  	flush_work(&ioa_cfg->work_q);
9677  	if (ioa_cfg->reset_work_q)
9678  		flush_workqueue(ioa_cfg->reset_work_q);
9679  	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9680  	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9681  
9682  	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
9683  	list_del(&ioa_cfg->queue);
9684  	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
9685  
9686  	if (ioa_cfg->sdt_state == ABORT_DUMP)
9687  		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9688  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
9689  
9690  	ipr_free_all_resources(ioa_cfg);
9691  
9692  	LEAVE;
9693  }
9694  
9695  /**
9696   * ipr_remove - IOA hot plug remove entry point
9697   * @pdev:	pci device struct
9698   *
9699   * Adapter hot plug remove entry point.
9700   *
9701   * Return value:
9702   * 	none
9703   **/
ipr_remove(struct pci_dev * pdev)9704  static void ipr_remove(struct pci_dev *pdev)
9705  {
9706  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9707  
9708  	ENTER;
9709  
9710  	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9711  			      &ipr_trace_attr);
9712  	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9713  			     &ipr_dump_attr);
9714  	sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9715  			&ipr_ioa_async_err_log);
9716  	scsi_remove_host(ioa_cfg->host);
9717  
9718  	__ipr_remove(pdev);
9719  
9720  	LEAVE;
9721  }
9722  
9723  /**
9724   * ipr_probe - Adapter hot plug add entry point
9725   * @pdev:	pci device struct
9726   * @dev_id:	pci device ID
9727   *
9728   * Return value:
9729   * 	0 on success / non-zero on failure
9730   **/
ipr_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)9731  static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
9732  {
9733  	struct ipr_ioa_cfg *ioa_cfg;
9734  	unsigned long flags;
9735  	int rc, i;
9736  
9737  	rc = ipr_probe_ioa(pdev, dev_id);
9738  
9739  	if (rc)
9740  		return rc;
9741  
9742  	ioa_cfg = pci_get_drvdata(pdev);
9743  	ipr_probe_ioa_part2(ioa_cfg);
9744  
9745  	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
9746  
9747  	if (rc) {
9748  		__ipr_remove(pdev);
9749  		return rc;
9750  	}
9751  
9752  	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
9753  				   &ipr_trace_attr);
9754  
9755  	if (rc) {
9756  		scsi_remove_host(ioa_cfg->host);
9757  		__ipr_remove(pdev);
9758  		return rc;
9759  	}
9760  
9761  	rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
9762  			&ipr_ioa_async_err_log);
9763  
9764  	if (rc) {
9765  		ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
9766  				&ipr_dump_attr);
9767  		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9768  				&ipr_trace_attr);
9769  		scsi_remove_host(ioa_cfg->host);
9770  		__ipr_remove(pdev);
9771  		return rc;
9772  	}
9773  
9774  	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
9775  				   &ipr_dump_attr);
9776  
9777  	if (rc) {
9778  		sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
9779  				      &ipr_ioa_async_err_log);
9780  		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
9781  				      &ipr_trace_attr);
9782  		scsi_remove_host(ioa_cfg->host);
9783  		__ipr_remove(pdev);
9784  		return rc;
9785  	}
9786  	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9787  	ioa_cfg->scan_enabled = 1;
9788  	schedule_work(&ioa_cfg->work_q);
9789  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9790  
9791  	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
9792  
9793  	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9794  		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
9795  			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
9796  					ioa_cfg->iopoll_weight, ipr_iopoll);
9797  		}
9798  	}
9799  
9800  	scsi_scan_host(ioa_cfg->host);
9801  
9802  	return 0;
9803  }
9804  
9805  /**
9806   * ipr_shutdown - Shutdown handler.
9807   * @pdev:	pci device struct
9808   *
9809   * This function is invoked upon system shutdown/reboot. It will issue
9810   * an adapter shutdown to the adapter to flush the write cache.
9811   *
9812   * Return value:
9813   * 	none
9814   **/
ipr_shutdown(struct pci_dev * pdev)9815  static void ipr_shutdown(struct pci_dev *pdev)
9816  {
9817  	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9818  	unsigned long lock_flags = 0;
9819  	enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
9820  	int i;
9821  
9822  	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9823  	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
9824  		ioa_cfg->iopoll_weight = 0;
9825  		for (i = 1; i < ioa_cfg->hrrq_num; i++)
9826  			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
9827  	}
9828  
9829  	while (ioa_cfg->in_reset_reload) {
9830  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9831  		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9832  		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9833  	}
9834  
9835  	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
9836  		shutdown_type = IPR_SHUTDOWN_QUIESCE;
9837  
9838  	ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
9839  	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9840  	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9841  	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
9842  		ipr_free_irqs(ioa_cfg);
9843  		pci_disable_device(ioa_cfg->pdev);
9844  	}
9845  }
9846  
9847  static struct pci_device_id ipr_pci_table[] = {
9848  	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9849  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9850  	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9851  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9852  	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9853  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9854  	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9855  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9856  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9857  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9858  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9859  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9860  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9861  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9862  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9863  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9864  		IPR_USE_LONG_TRANSOP_TIMEOUT },
9865  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9866  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9867  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9868  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9869  	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9870  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9871  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9872  	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9873  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9874  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9875  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9876  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9877  	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9878  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9879  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9880  	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9881  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9882  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9883  	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9884  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9885  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9886  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9887  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9888  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9889  	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9890  	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9891  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9892  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9893  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9894  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9895  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9896  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9897  		IPR_USE_LONG_TRANSOP_TIMEOUT },
9898  	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9899  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9900  		IPR_USE_LONG_TRANSOP_TIMEOUT },
9901  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9902  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9903  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9904  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9905  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9906  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9907  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9908  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
9909  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9910  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
9911  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9912  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9913  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9914  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9915  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9916  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9917  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9918  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9919  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9920  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
9921  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9922  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9923  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9924  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
9925  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9926  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
9927  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9928  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
9929  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9930  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
9931  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9932  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
9933  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9934  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
9935  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9936  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
9937  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9938  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
9939  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9940  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
9941  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9942  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
9943  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9944  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
9945  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9946  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
9947  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9948  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
9949  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9950  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
9951  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
9952  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
9953  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
9954  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
9955  	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
9956  		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
9957  	{ }
9958  };
9959  MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9960  
9961  static const struct pci_error_handlers ipr_err_handler = {
9962  	.error_detected = ipr_pci_error_detected,
9963  	.mmio_enabled = ipr_pci_mmio_enabled,
9964  	.slot_reset = ipr_pci_slot_reset,
9965  };
9966  
9967  static struct pci_driver ipr_driver = {
9968  	.name = IPR_NAME,
9969  	.id_table = ipr_pci_table,
9970  	.probe = ipr_probe,
9971  	.remove = ipr_remove,
9972  	.shutdown = ipr_shutdown,
9973  	.err_handler = &ipr_err_handler,
9974  };
9975  
9976  /**
9977   * ipr_halt_done - Shutdown prepare completion
9978   * @ipr_cmd:   ipr command struct
9979   *
9980   * Return value:
9981   * 	none
9982   **/
ipr_halt_done(struct ipr_cmnd * ipr_cmd)9983  static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9984  {
9985  	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9986  }
9987  
9988  /**
9989   * ipr_halt - Issue shutdown prepare to all adapters
9990   * @nb: Notifier block
9991   * @event: Notifier event
9992   * @buf: Notifier data (unused)
9993   *
9994   * Return value:
9995   * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9996   **/
ipr_halt(struct notifier_block * nb,ulong event,void * buf)9997  static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9998  {
9999  	struct ipr_cmnd *ipr_cmd;
10000  	struct ipr_ioa_cfg *ioa_cfg;
10001  	unsigned long flags = 0, driver_lock_flags;
10002  
10003  	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10004  		return NOTIFY_DONE;
10005  
10006  	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
10007  
10008  	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10009  		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10010  		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10011  		    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
10012  			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10013  			continue;
10014  		}
10015  
10016  		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10017  		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10018  		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10019  		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10020  		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10021  
10022  		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10023  		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10024  	}
10025  	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
10026  
10027  	return NOTIFY_OK;
10028  }
10029  
10030  static struct notifier_block ipr_notifier = {
10031  	ipr_halt, NULL, 0
10032  };
10033  
10034  /**
10035   * ipr_init - Module entry point
10036   *
10037   * Return value:
10038   * 	0 on success / negative value on failure
10039   **/
ipr_init(void)10040  static int __init ipr_init(void)
10041  {
10042  	int rc;
10043  
10044  	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10045  		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10046  
10047  	register_reboot_notifier(&ipr_notifier);
10048  	rc = pci_register_driver(&ipr_driver);
10049  	if (rc) {
10050  		unregister_reboot_notifier(&ipr_notifier);
10051  		return rc;
10052  	}
10053  
10054  	return 0;
10055  }
10056  
10057  /**
10058   * ipr_exit - Module unload
10059   *
10060   * Module unload entry point.
10061   *
10062   * Return value:
10063   * 	none
10064   **/
ipr_exit(void)10065  static void __exit ipr_exit(void)
10066  {
10067  	unregister_reboot_notifier(&ipr_notifier);
10068  	pci_unregister_driver(&ipr_driver);
10069  }
10070  
10071  module_init(ipr_init);
10072  module_exit(ipr_exit);
10073