1  /*
2   * This is the Fusion MPT base driver providing common API layer interface
3   * for access to MPT (Message Passing Technology) firmware.
4   *
5   * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6   * Copyright (C) 2012-2014  LSI Corporation
7   * Copyright (C) 2013-2014 Avago Technologies
8   *  (mailto: MPT-FusionLinux.pdl@avagotech.com)
9   *
10   * This program is free software; you can redistribute it and/or
11   * modify it under the terms of the GNU General Public License
12   * as published by the Free Software Foundation; either version 2
13   * of the License, or (at your option) any later version.
14   *
15   * This program is distributed in the hope that it will be useful,
16   * but WITHOUT ANY WARRANTY; without even the implied warranty of
17   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18   * GNU General Public License for more details.
19   *
20   * NO WARRANTY
21   * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22   * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23   * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24   * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25   * solely responsible for determining the appropriateness of using and
26   * distributing the Program and assumes all risks associated with its
27   * exercise of rights under this Agreement, including but not limited to
28   * the risks and costs of program errors, damage to or loss of data,
29   * programs or equipment, and unavailability or interruption of operations.
30  
31   * DISCLAIMER OF LIABILITY
32   * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33   * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34   * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35   * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36   * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37   * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38   * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39  
40   * You should have received a copy of the GNU General Public License
41   * along with this program; if not, write to the Free Software
42   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
43   * USA.
44   */
45  
46  #include <linux/kernel.h>
47  #include <linux/module.h>
48  #include <linux/errno.h>
49  #include <linux/init.h>
50  #include <linux/slab.h>
51  #include <linux/types.h>
52  #include <linux/pci.h>
53  #include <linux/kdev_t.h>
54  #include <linux/blkdev.h>
55  #include <linux/delay.h>
56  #include <linux/interrupt.h>
57  #include <linux/dma-mapping.h>
58  #include <linux/io.h>
59  #include <linux/time.h>
60  #include <linux/ktime.h>
61  #include <linux/kthread.h>
62  #include <asm/page.h>        /* To get host page size per arch */
63  
64  
65  #include "mpt3sas_base.h"
66  
67  static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
68  
69  
70  #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
71  
72   /* maximum controller queue depth */
73  #define MAX_HBA_QUEUE_DEPTH	30000
74  #define MAX_CHAIN_DEPTH		100000
75  static int max_queue_depth = -1;
76  module_param(max_queue_depth, int, 0444);
77  MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
78  
79  static int max_sgl_entries = -1;
80  module_param(max_sgl_entries, int, 0444);
81  MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
82  
83  static int msix_disable = -1;
84  module_param(msix_disable, int, 0444);
85  MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
86  
87  static int smp_affinity_enable = 1;
88  module_param(smp_affinity_enable, int, 0444);
89  MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
90  
91  static int max_msix_vectors = -1;
92  module_param(max_msix_vectors, int, 0444);
93  MODULE_PARM_DESC(max_msix_vectors,
94  	" max msix vectors");
95  
96  static int irqpoll_weight = -1;
97  module_param(irqpoll_weight, int, 0444);
98  MODULE_PARM_DESC(irqpoll_weight,
99  	"irq poll weight (default= one fourth of HBA queue depth)");
100  
101  static int mpt3sas_fwfault_debug;
102  MODULE_PARM_DESC(mpt3sas_fwfault_debug,
103  	" enable detection of firmware fault and halt firmware - (default=0)");
104  
105  static int perf_mode = -1;
106  module_param(perf_mode, int, 0444);
107  MODULE_PARM_DESC(perf_mode,
108  	"Performance mode (only for Aero/Sea Generation), options:\n\t\t"
109  	"0 - balanced: high iops mode is enabled &\n\t\t"
110  	"interrupt coalescing is enabled only on high iops queues,\n\t\t"
111  	"1 - iops: high iops mode is disabled &\n\t\t"
112  	"interrupt coalescing is enabled on all queues,\n\t\t"
113  	"2 - latency: high iops mode is disabled &\n\t\t"
114  	"interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
115  	"\t\tdefault - default perf_mode is 'balanced'"
116  	);
117  
118  static int poll_queues;
119  module_param(poll_queues, int, 0444);
120  MODULE_PARM_DESC(poll_queues, "Number of queues to be use for io_uring poll mode.\n\t\t"
121  	"This parameter is effective only if host_tagset_enable=1. &\n\t\t"
122  	"when poll_queues are enabled then &\n\t\t"
123  	"perf_mode is set to latency mode. &\n\t\t"
124  	);
125  
126  enum mpt3sas_perf_mode {
127  	MPT_PERF_MODE_DEFAULT	= -1,
128  	MPT_PERF_MODE_BALANCED	= 0,
129  	MPT_PERF_MODE_IOPS	= 1,
130  	MPT_PERF_MODE_LATENCY	= 2,
131  };
132  
133  static int
134  _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
135  		u32 ioc_state, int timeout);
136  static int
137  _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
138  static void
139  _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
140  
141  static u32
142  _base_readl_ext_retry(const void __iomem *addr);
143  
144  /**
145   * mpt3sas_base_check_cmd_timeout - Function
146   *		to check timeout and command termination due
147   *		to Host reset.
148   *
149   * @ioc:	per adapter object.
150   * @status:	Status of issued command.
151   * @mpi_request:mf request pointer.
152   * @sz:		size of buffer.
153   *
154   * Return: 1/0 Reset to be done or Not
155   */
156  u8
mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER * ioc,u8 status,void * mpi_request,int sz)157  mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
158  		u8 status, void *mpi_request, int sz)
159  {
160  	u8 issue_reset = 0;
161  
162  	if (!(status & MPT3_CMD_RESET))
163  		issue_reset = 1;
164  
165  	ioc_err(ioc, "Command %s\n",
166  		issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
167  	_debug_dump_mf(mpi_request, sz);
168  
169  	return issue_reset;
170  }
171  
172  /**
173   * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
174   * @val: ?
175   * @kp: ?
176   *
177   * Return: ?
178   */
179  static int
_scsih_set_fwfault_debug(const char * val,const struct kernel_param * kp)180  _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
181  {
182  	int ret = param_set_int(val, kp);
183  	struct MPT3SAS_ADAPTER *ioc;
184  
185  	if (ret)
186  		return ret;
187  
188  	/* global ioc spinlock to protect controller list on list operations */
189  	pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
190  	spin_lock(&gioc_lock);
191  	list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
192  		ioc->fwfault_debug = mpt3sas_fwfault_debug;
193  	spin_unlock(&gioc_lock);
194  	return 0;
195  }
196  module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
197  	param_get_int, &mpt3sas_fwfault_debug, 0644);
198  
199  /**
200   * _base_readl_aero - retry readl for max three times.
201   * @addr: MPT Fusion system interface register address
202   *
203   * Retry the readl() for max three times if it gets zero value
204   * while reading the system interface register.
205   */
206  static inline u32
_base_readl_aero(const void __iomem * addr)207  _base_readl_aero(const void __iomem *addr)
208  {
209  	u32 i = 0, ret_val;
210  
211  	do {
212  		ret_val = readl(addr);
213  		i++;
214  	} while (ret_val == 0 && i < 3);
215  
216  	return ret_val;
217  }
218  
219  static u32
_base_readl_ext_retry(const void __iomem * addr)220  _base_readl_ext_retry(const void __iomem *addr)
221  {
222  	u32 i, ret_val;
223  
224  	for (i = 0 ; i < 30 ; i++) {
225  		ret_val = readl(addr);
226  		if (ret_val != 0)
227  			break;
228  	}
229  
230  	return ret_val;
231  }
232  
233  static inline u32
_base_readl(const void __iomem * addr)234  _base_readl(const void __iomem *addr)
235  {
236  	return readl(addr);
237  }
238  
239  /**
240   * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
241   *				  in BAR0 space.
242   *
243   * @ioc: per adapter object
244   * @reply: reply message frame(lower 32bit addr)
245   * @index: System request message index.
246   */
247  static void
_base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER * ioc,u32 reply,u32 index)248  _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
249  		u32 index)
250  {
251  	/*
252  	 * 256 is offset within sys register.
253  	 * 256 offset MPI frame starts. Max MPI frame supported is 32.
254  	 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
255  	 */
256  	u16 cmd_credit = ioc->facts.RequestCredit + 1;
257  	void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
258  			MPI_FRAME_START_OFFSET +
259  			(cmd_credit * ioc->request_sz) + (index * sizeof(u32));
260  
261  	writel(reply, reply_free_iomem);
262  }
263  
264  /**
265   * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
266   *				to system/BAR0 region.
267   *
268   * @dst_iomem: Pointer to the destination location in BAR0 space.
269   * @src: Pointer to the Source data.
270   * @size: Size of data to be copied.
271   */
272  static void
_base_clone_mpi_to_sys_mem(void * dst_iomem,void * src,u32 size)273  _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
274  {
275  	int i;
276  	u32 *src_virt_mem = (u32 *)src;
277  
278  	for (i = 0; i < size/4; i++)
279  		writel((u32)src_virt_mem[i],
280  				(void __iomem *)dst_iomem + (i * 4));
281  }
282  
283  /**
284   * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
285   *
286   * @dst_iomem: Pointer to the destination location in BAR0 space.
287   * @src: Pointer to the Source data.
288   * @size: Size of data to be copied.
289   */
290  static void
_base_clone_to_sys_mem(void __iomem * dst_iomem,void * src,u32 size)291  _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
292  {
293  	int i;
294  	u32 *src_virt_mem = (u32 *)(src);
295  
296  	for (i = 0; i < size/4; i++)
297  		writel((u32)src_virt_mem[i],
298  			(void __iomem *)dst_iomem + (i * 4));
299  }
300  
301  /**
302   * _base_get_chain - Calculates and Returns virtual chain address
303   *			 for the provided smid in BAR0 space.
304   *
305   * @ioc: per adapter object
306   * @smid: system request message index
307   * @sge_chain_count: Scatter gather chain count.
308   *
309   * Return: the chain address.
310   */
311  static inline void __iomem*
_base_get_chain(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 sge_chain_count)312  _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
313  		u8 sge_chain_count)
314  {
315  	void __iomem *base_chain, *chain_virt;
316  	u16 cmd_credit = ioc->facts.RequestCredit + 1;
317  
318  	base_chain  = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
319  		(cmd_credit * ioc->request_sz) +
320  		REPLY_FREE_POOL_SIZE;
321  	chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
322  			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
323  	return chain_virt;
324  }
325  
326  /**
327   * _base_get_chain_phys - Calculates and Returns physical address
328   *			in BAR0 for scatter gather chains, for
329   *			the provided smid.
330   *
331   * @ioc: per adapter object
332   * @smid: system request message index
333   * @sge_chain_count: Scatter gather chain count.
334   *
335   * Return: Physical chain address.
336   */
337  static inline phys_addr_t
_base_get_chain_phys(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 sge_chain_count)338  _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
339  		u8 sge_chain_count)
340  {
341  	phys_addr_t base_chain_phys, chain_phys;
342  	u16 cmd_credit = ioc->facts.RequestCredit + 1;
343  
344  	base_chain_phys  = ioc->chip_phys + MPI_FRAME_START_OFFSET +
345  		(cmd_credit * ioc->request_sz) +
346  		REPLY_FREE_POOL_SIZE;
347  	chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
348  			ioc->request_sz) + (sge_chain_count * ioc->request_sz);
349  	return chain_phys;
350  }
351  
352  /**
353   * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
354   *			buffer address for the provided smid.
355   *			(Each smid can have 64K starts from 17024)
356   *
357   * @ioc: per adapter object
358   * @smid: system request message index
359   *
360   * Return: Pointer to buffer location in BAR0.
361   */
362  
363  static void __iomem *
_base_get_buffer_bar0(struct MPT3SAS_ADAPTER * ioc,u16 smid)364  _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
365  {
366  	u16 cmd_credit = ioc->facts.RequestCredit + 1;
367  	// Added extra 1 to reach end of chain.
368  	void __iomem *chain_end = _base_get_chain(ioc,
369  			cmd_credit + 1,
370  			ioc->facts.MaxChainDepth);
371  	return chain_end + (smid * 64 * 1024);
372  }
373  
374  /**
375   * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
376   *		Host buffer Physical address for the provided smid.
377   *		(Each smid can have 64K starts from 17024)
378   *
379   * @ioc: per adapter object
380   * @smid: system request message index
381   *
382   * Return: Pointer to buffer location in BAR0.
383   */
384  static phys_addr_t
_base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER * ioc,u16 smid)385  _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
386  {
387  	u16 cmd_credit = ioc->facts.RequestCredit + 1;
388  	phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
389  			cmd_credit + 1,
390  			ioc->facts.MaxChainDepth);
391  	return chain_end_phys + (smid * 64 * 1024);
392  }
393  
394  /**
395   * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
396   *			lookup list and Provides chain_buffer
397   *			address for the matching dma address.
398   *			(Each smid can have 64K starts from 17024)
399   *
400   * @ioc: per adapter object
401   * @chain_buffer_dma: Chain buffer dma address.
402   *
403   * Return: Pointer to chain buffer. Or Null on Failure.
404   */
405  static void *
_base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER * ioc,dma_addr_t chain_buffer_dma)406  _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
407  		dma_addr_t chain_buffer_dma)
408  {
409  	u16 index, j;
410  	struct chain_tracker *ct;
411  
412  	for (index = 0; index < ioc->scsiio_depth; index++) {
413  		for (j = 0; j < ioc->chains_needed_per_io; j++) {
414  			ct = &ioc->chain_lookup[index].chains_per_smid[j];
415  			if (ct && ct->chain_buffer_dma == chain_buffer_dma)
416  				return ct->chain_buffer;
417  		}
418  	}
419  	ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
420  	return NULL;
421  }
422  
423  /**
424   * _clone_sg_entries -	MPI EP's scsiio and config requests
425   *			are handled here. Base function for
426   *			double buffering, before submitting
427   *			the requests.
428   *
429   * @ioc: per adapter object.
430   * @mpi_request: mf request pointer.
431   * @smid: system request message index.
432   */
_clone_sg_entries(struct MPT3SAS_ADAPTER * ioc,void * mpi_request,u16 smid)433  static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
434  		void *mpi_request, u16 smid)
435  {
436  	Mpi2SGESimple32_t *sgel, *sgel_next;
437  	u32  sgl_flags, sge_chain_count = 0;
438  	bool is_write = false;
439  	u16 i = 0;
440  	void __iomem *buffer_iomem;
441  	phys_addr_t buffer_iomem_phys;
442  	void __iomem *buff_ptr;
443  	phys_addr_t buff_ptr_phys;
444  	void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
445  	void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
446  	phys_addr_t dst_addr_phys;
447  	MPI2RequestHeader_t *request_hdr;
448  	struct scsi_cmnd *scmd;
449  	struct scatterlist *sg_scmd = NULL;
450  	int is_scsiio_req = 0;
451  
452  	request_hdr = (MPI2RequestHeader_t *) mpi_request;
453  
454  	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
455  		Mpi25SCSIIORequest_t *scsiio_request =
456  			(Mpi25SCSIIORequest_t *)mpi_request;
457  		sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
458  		is_scsiio_req = 1;
459  	} else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
460  		Mpi2ConfigRequest_t  *config_req =
461  			(Mpi2ConfigRequest_t *)mpi_request;
462  		sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
463  	} else
464  		return;
465  
466  	/* From smid we can get scsi_cmd, once we have sg_scmd,
467  	 * we just need to get sg_virt and sg_next to get virtual
468  	 * address associated with sgel->Address.
469  	 */
470  
471  	if (is_scsiio_req) {
472  		/* Get scsi_cmd using smid */
473  		scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
474  		if (scmd == NULL) {
475  			ioc_err(ioc, "scmd is NULL\n");
476  			return;
477  		}
478  
479  		/* Get sg_scmd from scmd provided */
480  		sg_scmd = scsi_sglist(scmd);
481  	}
482  
483  	/*
484  	 * 0 - 255	System register
485  	 * 256 - 4352	MPI Frame. (This is based on maxCredit 32)
486  	 * 4352 - 4864	Reply_free pool (512 byte is reserved
487  	 *		considering maxCredit 32. Reply need extra
488  	 *		room, for mCPU case kept four times of
489  	 *		maxCredit).
490  	 * 4864 - 17152	SGE chain element. (32cmd * 3 chain of
491  	 *		128 byte size = 12288)
492  	 * 17152 - x	Host buffer mapped with smid.
493  	 *		(Each smid can have 64K Max IO.)
494  	 * BAR0+Last 1K MSIX Addr and Data
495  	 * Total size in use 2113664 bytes of 4MB BAR0
496  	 */
497  
498  	buffer_iomem = _base_get_buffer_bar0(ioc, smid);
499  	buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
500  
501  	buff_ptr = buffer_iomem;
502  	buff_ptr_phys = buffer_iomem_phys;
503  	WARN_ON(buff_ptr_phys > U32_MAX);
504  
505  	if (le32_to_cpu(sgel->FlagsLength) &
506  			(MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
507  		is_write = true;
508  
509  	for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
510  
511  		sgl_flags =
512  		    (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
513  
514  		switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
515  		case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
516  			/*
517  			 * Helper function which on passing
518  			 * chain_buffer_dma returns chain_buffer. Get
519  			 * the virtual address for sgel->Address
520  			 */
521  			sgel_next =
522  				_base_get_chain_buffer_dma_to_chain_buffer(ioc,
523  						le32_to_cpu(sgel->Address));
524  			if (sgel_next == NULL)
525  				return;
526  			/*
527  			 * This is coping 128 byte chain
528  			 * frame (not a host buffer)
529  			 */
530  			dst_chain_addr[sge_chain_count] =
531  				_base_get_chain(ioc,
532  					smid, sge_chain_count);
533  			src_chain_addr[sge_chain_count] =
534  						(void *) sgel_next;
535  			dst_addr_phys = _base_get_chain_phys(ioc,
536  						smid, sge_chain_count);
537  			WARN_ON(dst_addr_phys > U32_MAX);
538  			sgel->Address =
539  				cpu_to_le32(lower_32_bits(dst_addr_phys));
540  			sgel = sgel_next;
541  			sge_chain_count++;
542  			break;
543  		case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
544  			if (is_write) {
545  				if (is_scsiio_req) {
546  					_base_clone_to_sys_mem(buff_ptr,
547  					    sg_virt(sg_scmd),
548  					    (le32_to_cpu(sgel->FlagsLength) &
549  					    0x00ffffff));
550  					/*
551  					 * FIXME: this relies on a a zero
552  					 * PCI mem_offset.
553  					 */
554  					sgel->Address =
555  					    cpu_to_le32((u32)buff_ptr_phys);
556  				} else {
557  					_base_clone_to_sys_mem(buff_ptr,
558  					    ioc->config_vaddr,
559  					    (le32_to_cpu(sgel->FlagsLength) &
560  					    0x00ffffff));
561  					sgel->Address =
562  					    cpu_to_le32((u32)buff_ptr_phys);
563  				}
564  			}
565  			buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
566  			    0x00ffffff);
567  			buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
568  			    0x00ffffff);
569  			if ((le32_to_cpu(sgel->FlagsLength) &
570  			    (MPI2_SGE_FLAGS_END_OF_BUFFER
571  					<< MPI2_SGE_FLAGS_SHIFT)))
572  				goto eob_clone_chain;
573  			else {
574  				/*
575  				 * Every single element in MPT will have
576  				 * associated sg_next. Better to sanity that
577  				 * sg_next is not NULL, but it will be a bug
578  				 * if it is null.
579  				 */
580  				if (is_scsiio_req) {
581  					sg_scmd = sg_next(sg_scmd);
582  					if (sg_scmd)
583  						sgel++;
584  					else
585  						goto eob_clone_chain;
586  				}
587  			}
588  			break;
589  		}
590  	}
591  
592  eob_clone_chain:
593  	for (i = 0; i < sge_chain_count; i++) {
594  		if (is_scsiio_req)
595  			_base_clone_to_sys_mem(dst_chain_addr[i],
596  				src_chain_addr[i], ioc->request_sz);
597  	}
598  }
599  
600  /**
601   *  mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
602   * @arg: input argument, used to derive ioc
603   *
604   * Return:
605   * 0 if controller is removed from pci subsystem.
606   * -1 for other case.
607   */
mpt3sas_remove_dead_ioc_func(void * arg)608  static int mpt3sas_remove_dead_ioc_func(void *arg)
609  {
610  	struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
611  	struct pci_dev *pdev;
612  
613  	if (!ioc)
614  		return -1;
615  
616  	pdev = ioc->pdev;
617  	if (!pdev)
618  		return -1;
619  	pci_stop_and_remove_bus_device_locked(pdev);
620  	return 0;
621  }
622  
623  /**
624   * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
625   * @ioc: Per Adapter Object
626   *
627   * Return: nothing.
628   */
_base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER * ioc)629  static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
630  {
631  	Mpi26IoUnitControlRequest_t *mpi_request;
632  	Mpi26IoUnitControlReply_t *mpi_reply;
633  	u16 smid;
634  	ktime_t current_time;
635  	u64 TimeStamp = 0;
636  	u8 issue_reset = 0;
637  
638  	mutex_lock(&ioc->scsih_cmds.mutex);
639  	if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
640  		ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
641  		goto out;
642  	}
643  	ioc->scsih_cmds.status = MPT3_CMD_PENDING;
644  	smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
645  	if (!smid) {
646  		ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
647  		ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
648  		goto out;
649  	}
650  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
651  	ioc->scsih_cmds.smid = smid;
652  	memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
653  	mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
654  	mpi_request->Operation = MPI26_CTRL_OP_SET_IOC_PARAMETER;
655  	mpi_request->IOCParameter = MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP;
656  	current_time = ktime_get_real();
657  	TimeStamp = ktime_to_ms(current_time);
658  	mpi_request->Reserved7 = cpu_to_le32(TimeStamp >> 32);
659  	mpi_request->IOCParameterValue = cpu_to_le32(TimeStamp & 0xFFFFFFFF);
660  	init_completion(&ioc->scsih_cmds.done);
661  	ioc->put_smid_default(ioc, smid);
662  	dinitprintk(ioc, ioc_info(ioc,
663  	    "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
664  	    TimeStamp));
665  	wait_for_completion_timeout(&ioc->scsih_cmds.done,
666  		MPT3SAS_TIMESYNC_TIMEOUT_SECONDS*HZ);
667  	if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
668  		mpt3sas_check_cmd_timeout(ioc,
669  		    ioc->scsih_cmds.status, mpi_request,
670  		    sizeof(Mpi2SasIoUnitControlRequest_t)/4, issue_reset);
671  		goto issue_host_reset;
672  	}
673  	if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
674  		mpi_reply = ioc->scsih_cmds.reply;
675  		dinitprintk(ioc, ioc_info(ioc,
676  		    "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
677  		    le16_to_cpu(mpi_reply->IOCStatus),
678  		    le32_to_cpu(mpi_reply->IOCLogInfo)));
679  	}
680  issue_host_reset:
681  	if (issue_reset)
682  		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
683  	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
684  out:
685  	mutex_unlock(&ioc->scsih_cmds.mutex);
686  }
687  
688  /**
689   * _base_fault_reset_work - workq handling ioc fault conditions
690   * @work: input argument, used to derive ioc
691   *
692   * Context: sleep.
693   */
694  static void
_base_fault_reset_work(struct work_struct * work)695  _base_fault_reset_work(struct work_struct *work)
696  {
697  	struct MPT3SAS_ADAPTER *ioc =
698  	    container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
699  	unsigned long	 flags;
700  	u32 doorbell;
701  	int rc;
702  	struct task_struct *p;
703  
704  
705  	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
706  	if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
707  			ioc->pci_error_recovery)
708  		goto rearm_timer;
709  	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
710  
711  	doorbell = mpt3sas_base_get_iocstate(ioc, 0);
712  	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
713  		ioc_err(ioc, "SAS host is non-operational !!!!\n");
714  
715  		/* It may be possible that EEH recovery can resolve some of
716  		 * pci bus failure issues rather removing the dead ioc function
717  		 * by considering controller is in a non-operational state. So
718  		 * here priority is given to the EEH recovery. If it doesn't
719  		 * not resolve this issue, mpt3sas driver will consider this
720  		 * controller to non-operational state and remove the dead ioc
721  		 * function.
722  		 */
723  		if (ioc->non_operational_loop++ < 5) {
724  			spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
725  							 flags);
726  			goto rearm_timer;
727  		}
728  
729  		/*
730  		 * Call _scsih_flush_pending_cmds callback so that we flush all
731  		 * pending commands back to OS. This call is required to avoid
732  		 * deadlock at block layer. Dead IOC will fail to do diag reset,
733  		 * and this call is safe since dead ioc will never return any
734  		 * command back from HW.
735  		 */
736  		mpt3sas_base_pause_mq_polling(ioc);
737  		ioc->schedule_dead_ioc_flush_running_cmds(ioc);
738  		/*
739  		 * Set remove_host flag early since kernel thread will
740  		 * take some time to execute.
741  		 */
742  		ioc->remove_host = 1;
743  		/*Remove the Dead Host */
744  		p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
745  		    "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
746  		if (IS_ERR(p))
747  			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
748  				__func__);
749  		else
750  			ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
751  				__func__);
752  		return; /* don't rearm timer */
753  	}
754  
755  	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
756  		u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
757  		    ioc->manu_pg11.CoreDumpTOSec :
758  		    MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
759  
760  		timeout /= (FAULT_POLLING_INTERVAL/1000);
761  
762  		if (ioc->ioc_coredump_loop == 0) {
763  			mpt3sas_print_coredump_info(ioc,
764  			    doorbell & MPI2_DOORBELL_DATA_MASK);
765  			/* do not accept any IOs and disable the interrupts */
766  			spin_lock_irqsave(
767  			    &ioc->ioc_reset_in_progress_lock, flags);
768  			ioc->shost_recovery = 1;
769  			spin_unlock_irqrestore(
770  			    &ioc->ioc_reset_in_progress_lock, flags);
771  			mpt3sas_base_mask_interrupts(ioc);
772  			mpt3sas_base_pause_mq_polling(ioc);
773  			_base_clear_outstanding_commands(ioc);
774  		}
775  
776  		ioc_info(ioc, "%s: CoreDump loop %d.",
777  		    __func__, ioc->ioc_coredump_loop);
778  
779  		/* Wait until CoreDump completes or times out */
780  		if (ioc->ioc_coredump_loop++ < timeout) {
781  			spin_lock_irqsave(
782  			    &ioc->ioc_reset_in_progress_lock, flags);
783  			goto rearm_timer;
784  		}
785  	}
786  
787  	if (ioc->ioc_coredump_loop) {
788  		if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP)
789  			ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
790  			    __func__, ioc->ioc_coredump_loop);
791  		else
792  			ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
793  			    __func__, ioc->ioc_coredump_loop);
794  		ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
795  	}
796  	ioc->non_operational_loop = 0;
797  	if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
798  		rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
799  		ioc_warn(ioc, "%s: hard reset: %s\n",
800  			 __func__, rc == 0 ? "success" : "failed");
801  		doorbell = mpt3sas_base_get_iocstate(ioc, 0);
802  		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
803  			mpt3sas_print_fault_code(ioc, doorbell &
804  			    MPI2_DOORBELL_DATA_MASK);
805  		} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
806  		    MPI2_IOC_STATE_COREDUMP)
807  			mpt3sas_print_coredump_info(ioc, doorbell &
808  			    MPI2_DOORBELL_DATA_MASK);
809  		if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
810  		    MPI2_IOC_STATE_OPERATIONAL)
811  			return; /* don't rearm timer */
812  	}
813  	ioc->ioc_coredump_loop = 0;
814  	if (ioc->time_sync_interval &&
815  	    ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
816  		ioc->timestamp_update_count = 0;
817  		_base_sync_drv_fw_timestamp(ioc);
818  	}
819  	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
820   rearm_timer:
821  	if (ioc->fault_reset_work_q)
822  		queue_delayed_work(ioc->fault_reset_work_q,
823  		    &ioc->fault_reset_work,
824  		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
825  	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
826  }
827  
828  /**
829   * mpt3sas_base_start_watchdog - start the fault_reset_work_q
830   * @ioc: per adapter object
831   *
832   * Context: sleep.
833   */
834  void
mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER * ioc)835  mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
836  {
837  	unsigned long	 flags;
838  
839  	if (ioc->fault_reset_work_q)
840  		return;
841  
842  	ioc->timestamp_update_count = 0;
843  	/* initialize fault polling */
844  
845  	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
846  	snprintf(ioc->fault_reset_work_q_name,
847  	    sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
848  	    ioc->driver_name, ioc->id);
849  	ioc->fault_reset_work_q = alloc_ordered_workqueue(
850  		"%s", WQ_MEM_RECLAIM, ioc->fault_reset_work_q_name);
851  	if (!ioc->fault_reset_work_q) {
852  		ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
853  		return;
854  	}
855  	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
856  	if (ioc->fault_reset_work_q)
857  		queue_delayed_work(ioc->fault_reset_work_q,
858  		    &ioc->fault_reset_work,
859  		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
860  	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
861  }
862  
863  /**
864   * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
865   * @ioc: per adapter object
866   *
867   * Context: sleep.
868   */
869  void
mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER * ioc)870  mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
871  {
872  	unsigned long flags;
873  	struct workqueue_struct *wq;
874  
875  	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
876  	wq = ioc->fault_reset_work_q;
877  	ioc->fault_reset_work_q = NULL;
878  	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
879  	if (wq) {
880  		if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
881  			flush_workqueue(wq);
882  		destroy_workqueue(wq);
883  	}
884  }
885  
886  /**
887   * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
888   * @ioc: per adapter object
889   * @fault_code: fault code
890   */
891  void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER * ioc,u16 fault_code)892  mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
893  {
894  	ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
895  }
896  
897  /**
898   * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
899   * @ioc: per adapter object
900   * @fault_code: fault code
901   *
902   * Return: nothing.
903   */
904  void
mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER * ioc,u16 fault_code)905  mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
906  {
907  	ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
908  }
909  
910  /**
911   * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
912   * completes or times out
913   * @ioc: per adapter object
914   * @caller: caller function name
915   *
916   * Return: 0 for success, non-zero for failure.
917   */
918  int
mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER * ioc,const char * caller)919  mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
920  		const char *caller)
921  {
922  	u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
923  			ioc->manu_pg11.CoreDumpTOSec :
924  			MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS;
925  
926  	int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
927  					timeout);
928  
929  	if (ioc_state)
930  		ioc_err(ioc,
931  		    "%s: CoreDump timed out. (ioc_state=0x%x)\n",
932  		    caller, ioc_state);
933  	else
934  		ioc_info(ioc,
935  		    "%s: CoreDump completed. (ioc_state=0x%x)\n",
936  		    caller, ioc_state);
937  
938  	return ioc_state;
939  }
940  
941  /**
942   * mpt3sas_halt_firmware - halt's mpt controller firmware
943   * @ioc: per adapter object
944   *
945   * For debugging timeout related issues.  Writing 0xCOFFEE00
946   * to the doorbell register will halt controller firmware. With
947   * the purpose to stop both driver and firmware, the enduser can
948   * obtain a ring buffer from controller UART.
949   */
950  void
mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER * ioc)951  mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
952  {
953  	u32 doorbell;
954  
955  	if (!ioc->fwfault_debug)
956  		return;
957  
958  	dump_stack();
959  
960  	doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
961  	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
962  		mpt3sas_print_fault_code(ioc, doorbell &
963  		    MPI2_DOORBELL_DATA_MASK);
964  	} else if ((doorbell & MPI2_IOC_STATE_MASK) ==
965  	    MPI2_IOC_STATE_COREDUMP) {
966  		mpt3sas_print_coredump_info(ioc, doorbell &
967  		    MPI2_DOORBELL_DATA_MASK);
968  	} else {
969  		writel(0xC0FFEE00, &ioc->chip->Doorbell);
970  		ioc_err(ioc, "Firmware is halted due to command timeout\n");
971  	}
972  
973  	if (ioc->fwfault_debug == 2)
974  		for (;;)
975  			;
976  	else
977  		panic("panic in %s\n", __func__);
978  }
979  
980  /**
981   * _base_sas_ioc_info - verbose translation of the ioc status
982   * @ioc: per adapter object
983   * @mpi_reply: reply mf payload returned from firmware
984   * @request_hdr: request mf
985   */
986  static void
_base_sas_ioc_info(struct MPT3SAS_ADAPTER * ioc,MPI2DefaultReply_t * mpi_reply,MPI2RequestHeader_t * request_hdr)987  _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
988  	MPI2RequestHeader_t *request_hdr)
989  {
990  	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
991  	    MPI2_IOCSTATUS_MASK;
992  	char *desc = NULL;
993  	u16 frame_sz;
994  	char *func_str = NULL;
995  
996  	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
997  	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
998  	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
999  	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
1000  		return;
1001  
1002  	if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
1003  		return;
1004  	/*
1005  	 * Older Firmware version doesn't support driver trigger pages.
1006  	 * So, skip displaying 'config invalid type' type
1007  	 * of error message.
1008  	 */
1009  	if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
1010  		Mpi2ConfigRequest_t *rqst = (Mpi2ConfigRequest_t *)request_hdr;
1011  
1012  		if ((rqst->ExtPageType ==
1013  		    MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER) &&
1014  		    !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
1015  			return;
1016  		}
1017  	}
1018  
1019  	switch (ioc_status) {
1020  
1021  /****************************************************************************
1022  *  Common IOCStatus values for all replies
1023  ****************************************************************************/
1024  
1025  	case MPI2_IOCSTATUS_INVALID_FUNCTION:
1026  		desc = "invalid function";
1027  		break;
1028  	case MPI2_IOCSTATUS_BUSY:
1029  		desc = "busy";
1030  		break;
1031  	case MPI2_IOCSTATUS_INVALID_SGL:
1032  		desc = "invalid sgl";
1033  		break;
1034  	case MPI2_IOCSTATUS_INTERNAL_ERROR:
1035  		desc = "internal error";
1036  		break;
1037  	case MPI2_IOCSTATUS_INVALID_VPID:
1038  		desc = "invalid vpid";
1039  		break;
1040  	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
1041  		desc = "insufficient resources";
1042  		break;
1043  	case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
1044  		desc = "insufficient power";
1045  		break;
1046  	case MPI2_IOCSTATUS_INVALID_FIELD:
1047  		desc = "invalid field";
1048  		break;
1049  	case MPI2_IOCSTATUS_INVALID_STATE:
1050  		desc = "invalid state";
1051  		break;
1052  	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
1053  		desc = "op state not supported";
1054  		break;
1055  
1056  /****************************************************************************
1057  *  Config IOCStatus values
1058  ****************************************************************************/
1059  
1060  	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
1061  		desc = "config invalid action";
1062  		break;
1063  	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
1064  		desc = "config invalid type";
1065  		break;
1066  	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
1067  		desc = "config invalid page";
1068  		break;
1069  	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
1070  		desc = "config invalid data";
1071  		break;
1072  	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
1073  		desc = "config no defaults";
1074  		break;
1075  	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
1076  		desc = "config can't commit";
1077  		break;
1078  
1079  /****************************************************************************
1080  *  SCSI IO Reply
1081  ****************************************************************************/
1082  
1083  	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
1084  	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
1085  	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1086  	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
1087  	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
1088  	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
1089  	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
1090  	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
1091  	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
1092  	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
1093  	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
1094  	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
1095  		break;
1096  
1097  /****************************************************************************
1098  *  For use by SCSI Initiator and SCSI Target end-to-end data protection
1099  ****************************************************************************/
1100  
1101  	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
1102  		desc = "eedp guard error";
1103  		break;
1104  	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
1105  		desc = "eedp ref tag error";
1106  		break;
1107  	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
1108  		desc = "eedp app tag error";
1109  		break;
1110  
1111  /****************************************************************************
1112  *  SCSI Target values
1113  ****************************************************************************/
1114  
1115  	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
1116  		desc = "target invalid io index";
1117  		break;
1118  	case MPI2_IOCSTATUS_TARGET_ABORTED:
1119  		desc = "target aborted";
1120  		break;
1121  	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
1122  		desc = "target no conn retryable";
1123  		break;
1124  	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
1125  		desc = "target no connection";
1126  		break;
1127  	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
1128  		desc = "target xfer count mismatch";
1129  		break;
1130  	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
1131  		desc = "target data offset error";
1132  		break;
1133  	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
1134  		desc = "target too much write data";
1135  		break;
1136  	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
1137  		desc = "target iu too short";
1138  		break;
1139  	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
1140  		desc = "target ack nak timeout";
1141  		break;
1142  	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
1143  		desc = "target nak received";
1144  		break;
1145  
1146  /****************************************************************************
1147  *  Serial Attached SCSI values
1148  ****************************************************************************/
1149  
1150  	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
1151  		desc = "smp request failed";
1152  		break;
1153  	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
1154  		desc = "smp data overrun";
1155  		break;
1156  
1157  /****************************************************************************
1158  *  Diagnostic Buffer Post / Diagnostic Release values
1159  ****************************************************************************/
1160  
1161  	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
1162  		desc = "diagnostic released";
1163  		break;
1164  	default:
1165  		break;
1166  	}
1167  
1168  	if (!desc)
1169  		return;
1170  
1171  	switch (request_hdr->Function) {
1172  	case MPI2_FUNCTION_CONFIG:
1173  		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1174  		func_str = "config_page";
1175  		break;
1176  	case MPI2_FUNCTION_SCSI_TASK_MGMT:
1177  		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
1178  		func_str = "task_mgmt";
1179  		break;
1180  	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
1181  		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
1182  		func_str = "sas_iounit_ctl";
1183  		break;
1184  	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
1185  		frame_sz = sizeof(Mpi2SepRequest_t);
1186  		func_str = "enclosure";
1187  		break;
1188  	case MPI2_FUNCTION_IOC_INIT:
1189  		frame_sz = sizeof(Mpi2IOCInitRequest_t);
1190  		func_str = "ioc_init";
1191  		break;
1192  	case MPI2_FUNCTION_PORT_ENABLE:
1193  		frame_sz = sizeof(Mpi2PortEnableRequest_t);
1194  		func_str = "port_enable";
1195  		break;
1196  	case MPI2_FUNCTION_SMP_PASSTHROUGH:
1197  		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1198  		func_str = "smp_passthru";
1199  		break;
1200  	case MPI2_FUNCTION_NVME_ENCAPSULATED:
1201  		frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
1202  		    ioc->sge_size;
1203  		func_str = "nvme_encapsulated";
1204  		break;
1205  	default:
1206  		frame_sz = 32;
1207  		func_str = "unknown";
1208  		break;
1209  	}
1210  
1211  	ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1212  		 desc, ioc_status, request_hdr, func_str);
1213  
1214  	_debug_dump_mf(request_hdr, frame_sz/4);
1215  }
1216  
1217  /**
1218   * _base_display_event_data - verbose translation of firmware asyn events
1219   * @ioc: per adapter object
1220   * @mpi_reply: reply mf payload returned from firmware
1221   */
1222  static void
_base_display_event_data(struct MPT3SAS_ADAPTER * ioc,Mpi2EventNotificationReply_t * mpi_reply)1223  _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1224  	Mpi2EventNotificationReply_t *mpi_reply)
1225  {
1226  	char *desc = NULL;
1227  	u16 event;
1228  
1229  	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1230  		return;
1231  
1232  	event = le16_to_cpu(mpi_reply->Event);
1233  
1234  	switch (event) {
1235  	case MPI2_EVENT_LOG_DATA:
1236  		desc = "Log Data";
1237  		break;
1238  	case MPI2_EVENT_STATE_CHANGE:
1239  		desc = "Status Change";
1240  		break;
1241  	case MPI2_EVENT_HARD_RESET_RECEIVED:
1242  		desc = "Hard Reset Received";
1243  		break;
1244  	case MPI2_EVENT_EVENT_CHANGE:
1245  		desc = "Event Change";
1246  		break;
1247  	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1248  		desc = "Device Status Change";
1249  		break;
1250  	case MPI2_EVENT_IR_OPERATION_STATUS:
1251  		if (!ioc->hide_ir_msg)
1252  			desc = "IR Operation Status";
1253  		break;
1254  	case MPI2_EVENT_SAS_DISCOVERY:
1255  	{
1256  		Mpi2EventDataSasDiscovery_t *event_data =
1257  		    (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1258  		ioc_info(ioc, "Discovery: (%s)",
1259  			 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1260  			 "start" : "stop");
1261  		if (event_data->DiscoveryStatus)
1262  			pr_cont(" discovery_status(0x%08x)",
1263  			    le32_to_cpu(event_data->DiscoveryStatus));
1264  		pr_cont("\n");
1265  		return;
1266  	}
1267  	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1268  		desc = "SAS Broadcast Primitive";
1269  		break;
1270  	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1271  		desc = "SAS Init Device Status Change";
1272  		break;
1273  	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1274  		desc = "SAS Init Table Overflow";
1275  		break;
1276  	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1277  		desc = "SAS Topology Change List";
1278  		break;
1279  	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1280  		desc = "SAS Enclosure Device Status Change";
1281  		break;
1282  	case MPI2_EVENT_IR_VOLUME:
1283  		if (!ioc->hide_ir_msg)
1284  			desc = "IR Volume";
1285  		break;
1286  	case MPI2_EVENT_IR_PHYSICAL_DISK:
1287  		if (!ioc->hide_ir_msg)
1288  			desc = "IR Physical Disk";
1289  		break;
1290  	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1291  		if (!ioc->hide_ir_msg)
1292  			desc = "IR Configuration Change List";
1293  		break;
1294  	case MPI2_EVENT_LOG_ENTRY_ADDED:
1295  		if (!ioc->hide_ir_msg)
1296  			desc = "Log Entry Added";
1297  		break;
1298  	case MPI2_EVENT_TEMP_THRESHOLD:
1299  		desc = "Temperature Threshold";
1300  		break;
1301  	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1302  		desc = "Cable Event";
1303  		break;
1304  	case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1305  		desc = "SAS Device Discovery Error";
1306  		break;
1307  	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1308  		desc = "PCIE Device Status Change";
1309  		break;
1310  	case MPI2_EVENT_PCIE_ENUMERATION:
1311  	{
1312  		Mpi26EventDataPCIeEnumeration_t *event_data =
1313  			(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1314  		ioc_info(ioc, "PCIE Enumeration: (%s)",
1315  			 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1316  			 "start" : "stop");
1317  		if (event_data->EnumerationStatus)
1318  			pr_cont("enumeration_status(0x%08x)",
1319  				le32_to_cpu(event_data->EnumerationStatus));
1320  		pr_cont("\n");
1321  		return;
1322  	}
1323  	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1324  		desc = "PCIE Topology Change List";
1325  		break;
1326  	}
1327  
1328  	if (!desc)
1329  		return;
1330  
1331  	ioc_info(ioc, "%s\n", desc);
1332  }
1333  
1334  /**
1335   * _base_sas_log_info - verbose translation of firmware log info
1336   * @ioc: per adapter object
1337   * @log_info: log info
1338   */
1339  static void
_base_sas_log_info(struct MPT3SAS_ADAPTER * ioc,u32 log_info)1340  _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
1341  {
1342  	union loginfo_type {
1343  		u32	loginfo;
1344  		struct {
1345  			u32	subcode:16;
1346  			u32	code:8;
1347  			u32	originator:4;
1348  			u32	bus_type:4;
1349  		} dw;
1350  	};
1351  	union loginfo_type sas_loginfo;
1352  	char *originator_str = NULL;
1353  
1354  	sas_loginfo.loginfo = log_info;
1355  	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
1356  		return;
1357  
1358  	/* each nexus loss loginfo */
1359  	if (log_info == 0x31170000)
1360  		return;
1361  
1362  	/* eat the loginfos associated with task aborts */
1363  	if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1364  	    0x31140000 || log_info == 0x31130000))
1365  		return;
1366  
1367  	switch (sas_loginfo.dw.originator) {
1368  	case 0:
1369  		originator_str = "IOP";
1370  		break;
1371  	case 1:
1372  		originator_str = "PL";
1373  		break;
1374  	case 2:
1375  		if (!ioc->hide_ir_msg)
1376  			originator_str = "IR";
1377  		else
1378  			originator_str = "WarpDrive";
1379  		break;
1380  	}
1381  
1382  	ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1383  		 log_info,
1384  		 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1385  }
1386  
1387  /**
1388   * _base_display_reply_info - handle reply descriptors depending on IOC Status
1389   * @ioc: per adapter object
1390   * @smid: system request message index
1391   * @msix_index: MSIX table index supplied by the OS
1392   * @reply: reply message frame (lower 32bit addr)
1393   */
1394  static void
_base_display_reply_info(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)1395  _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1396  	u32 reply)
1397  {
1398  	MPI2DefaultReply_t *mpi_reply;
1399  	u16 ioc_status;
1400  	u32 loginfo = 0;
1401  
1402  	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1403  	if (unlikely(!mpi_reply)) {
1404  		ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1405  			__FILE__, __LINE__, __func__);
1406  		return;
1407  	}
1408  	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1409  
1410  	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1411  	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
1412  		_base_sas_ioc_info(ioc, mpi_reply,
1413  		   mpt3sas_base_get_msg_frame(ioc, smid));
1414  	}
1415  
1416  	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1417  		loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1418  		_base_sas_log_info(ioc, loginfo);
1419  	}
1420  
1421  	if (ioc_status || loginfo) {
1422  		ioc_status &= MPI2_IOCSTATUS_MASK;
1423  		mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1424  	}
1425  }
1426  
1427  /**
1428   * mpt3sas_base_done - base internal command completion routine
1429   * @ioc: per adapter object
1430   * @smid: system request message index
1431   * @msix_index: MSIX table index supplied by the OS
1432   * @reply: reply message frame(lower 32bit addr)
1433   *
1434   * Return:
1435   * 1 meaning mf should be freed from _base_interrupt
1436   * 0 means the mf is freed from this function.
1437   */
1438  u8
mpt3sas_base_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)1439  mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1440  	u32 reply)
1441  {
1442  	MPI2DefaultReply_t *mpi_reply;
1443  
1444  	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1445  	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1446  		return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1447  
1448  	if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1449  		return 1;
1450  
1451  	ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1452  	if (mpi_reply) {
1453  		ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1454  		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1455  	}
1456  	ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1457  
1458  	complete(&ioc->base_cmds.done);
1459  	return 1;
1460  }
1461  
1462  /**
1463   * _base_async_event - main callback handler for firmware asyn events
1464   * @ioc: per adapter object
1465   * @msix_index: MSIX table index supplied by the OS
1466   * @reply: reply message frame(lower 32bit addr)
1467   *
1468   * Return:
1469   * 1 meaning mf should be freed from _base_interrupt
1470   * 0 means the mf is freed from this function.
1471   */
1472  static u8
_base_async_event(struct MPT3SAS_ADAPTER * ioc,u8 msix_index,u32 reply)1473  _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1474  {
1475  	Mpi2EventNotificationReply_t *mpi_reply;
1476  	Mpi2EventAckRequest_t *ack_request;
1477  	u16 smid;
1478  	struct _event_ack_list *delayed_event_ack;
1479  
1480  	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1481  	if (!mpi_reply)
1482  		return 1;
1483  	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1484  		return 1;
1485  
1486  	_base_display_event_data(ioc, mpi_reply);
1487  
1488  	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1489  		goto out;
1490  	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1491  	if (!smid) {
1492  		delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1493  					GFP_ATOMIC);
1494  		if (!delayed_event_ack)
1495  			goto out;
1496  		INIT_LIST_HEAD(&delayed_event_ack->list);
1497  		delayed_event_ack->Event = mpi_reply->Event;
1498  		delayed_event_ack->EventContext = mpi_reply->EventContext;
1499  		list_add_tail(&delayed_event_ack->list,
1500  				&ioc->delayed_event_ack_list);
1501  		dewtprintk(ioc,
1502  			   ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1503  				    le16_to_cpu(mpi_reply->Event)));
1504  		goto out;
1505  	}
1506  
1507  	ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1508  	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1509  	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1510  	ack_request->Event = mpi_reply->Event;
1511  	ack_request->EventContext = mpi_reply->EventContext;
1512  	ack_request->VF_ID = 0;  /* TODO */
1513  	ack_request->VP_ID = 0;
1514  	ioc->put_smid_default(ioc, smid);
1515  
1516   out:
1517  
1518  	/* scsih callback handler */
1519  	mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1520  
1521  	/* ctl callback handler */
1522  	mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1523  
1524  	return 1;
1525  }
1526  
1527  static struct scsiio_tracker *
_get_st_from_smid(struct MPT3SAS_ADAPTER * ioc,u16 smid)1528  _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1529  {
1530  	struct scsi_cmnd *cmd;
1531  
1532  	if (WARN_ON(!smid) ||
1533  	    WARN_ON(smid >= ioc->hi_priority_smid))
1534  		return NULL;
1535  
1536  	cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1537  	if (cmd)
1538  		return scsi_cmd_priv(cmd);
1539  
1540  	return NULL;
1541  }
1542  
1543  /**
1544   * _base_get_cb_idx - obtain the callback index
1545   * @ioc: per adapter object
1546   * @smid: system request message index
1547   *
1548   * Return: callback index.
1549   */
1550  static u8
_base_get_cb_idx(struct MPT3SAS_ADAPTER * ioc,u16 smid)1551  _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1552  {
1553  	int i;
1554  	u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1555  	u8 cb_idx = 0xFF;
1556  
1557  	if (smid < ioc->hi_priority_smid) {
1558  		struct scsiio_tracker *st;
1559  
1560  		if (smid < ctl_smid) {
1561  			st = _get_st_from_smid(ioc, smid);
1562  			if (st)
1563  				cb_idx = st->cb_idx;
1564  		} else if (smid == ctl_smid)
1565  			cb_idx = ioc->ctl_cb_idx;
1566  	} else if (smid < ioc->internal_smid) {
1567  		i = smid - ioc->hi_priority_smid;
1568  		cb_idx = ioc->hpr_lookup[i].cb_idx;
1569  	} else if (smid <= ioc->hba_queue_depth) {
1570  		i = smid - ioc->internal_smid;
1571  		cb_idx = ioc->internal_lookup[i].cb_idx;
1572  	}
1573  	return cb_idx;
1574  }
1575  
1576  /**
1577   * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
1578   *				when driver is flushing out the IOs.
1579   * @ioc: per adapter object
1580   *
1581   * Pause polling on the mq poll (io uring) queues when driver is flushing
1582   * out the IOs. Otherwise we may see the race condition of completing the same
1583   * IO from two paths.
1584   *
1585   * Returns nothing.
1586   */
1587  void
mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER * ioc)1588  mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1589  {
1590  	int iopoll_q_count =
1591  	    ioc->reply_queue_count - ioc->iopoll_q_start_index;
1592  	int qid;
1593  
1594  	for (qid = 0; qid < iopoll_q_count; qid++)
1595  		atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
1596  
1597  	/*
1598  	 * wait for current poll to complete.
1599  	 */
1600  	for (qid = 0; qid < iopoll_q_count; qid++) {
1601  		while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
1602  			cpu_relax();
1603  			udelay(500);
1604  		}
1605  	}
1606  }
1607  
1608  /**
1609   * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
1610   * @ioc: per adapter object
1611   *
1612   * Returns nothing.
1613   */
1614  void
mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER * ioc)1615  mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1616  {
1617  	int iopoll_q_count =
1618  	    ioc->reply_queue_count - ioc->iopoll_q_start_index;
1619  	int qid;
1620  
1621  	for (qid = 0; qid < iopoll_q_count; qid++)
1622  		atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
1623  }
1624  
1625  /**
1626   * mpt3sas_base_mask_interrupts - disable interrupts
1627   * @ioc: per adapter object
1628   *
1629   * Disabling ResetIRQ, Reply and Doorbell Interrupts
1630   */
1631  void
mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER * ioc)1632  mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1633  {
1634  	u32 him_register;
1635  
1636  	ioc->mask_interrupts = 1;
1637  	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1638  	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1639  	writel(him_register, &ioc->chip->HostInterruptMask);
1640  	ioc->base_readl(&ioc->chip->HostInterruptMask);
1641  }
1642  
1643  /**
1644   * mpt3sas_base_unmask_interrupts - enable interrupts
1645   * @ioc: per adapter object
1646   *
1647   * Enabling only Reply Interrupts
1648   */
1649  void
mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER * ioc)1650  mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1651  {
1652  	u32 him_register;
1653  
1654  	him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1655  	him_register &= ~MPI2_HIM_RIM;
1656  	writel(him_register, &ioc->chip->HostInterruptMask);
1657  	ioc->mask_interrupts = 0;
1658  }
1659  
1660  union reply_descriptor {
1661  	u64 word;
1662  	struct {
1663  		u32 low;
1664  		u32 high;
1665  	} u;
1666  };
1667  
base_mod64(u64 dividend,u32 divisor)1668  static u32 base_mod64(u64 dividend, u32 divisor)
1669  {
1670  	u32 remainder;
1671  
1672  	if (!divisor)
1673  		pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1674  	remainder = do_div(dividend, divisor);
1675  	return remainder;
1676  }
1677  
1678  /**
1679   * _base_process_reply_queue - Process reply descriptors from reply
1680   *		descriptor post queue.
1681   * @reply_q: per IRQ's reply queue object.
1682   *
1683   * Return: number of reply descriptors processed from reply
1684   *		descriptor queue.
1685   */
1686  static int
_base_process_reply_queue(struct adapter_reply_queue * reply_q)1687  _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1688  {
1689  	union reply_descriptor rd;
1690  	u64 completed_cmds;
1691  	u8 request_descript_type;
1692  	u16 smid;
1693  	u8 cb_idx;
1694  	u32 reply;
1695  	u8 msix_index = reply_q->msix_index;
1696  	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1697  	Mpi2ReplyDescriptorsUnion_t *rpf;
1698  	u8 rc;
1699  
1700  	completed_cmds = 0;
1701  	if (!atomic_add_unless(&reply_q->busy, 1, 1))
1702  		return completed_cmds;
1703  
1704  	rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1705  	request_descript_type = rpf->Default.ReplyFlags
1706  	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1707  	if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1708  		atomic_dec(&reply_q->busy);
1709  		return completed_cmds;
1710  	}
1711  
1712  	cb_idx = 0xFF;
1713  	do {
1714  		rd.word = le64_to_cpu(rpf->Words);
1715  		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1716  			goto out;
1717  		reply = 0;
1718  		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1719  		if (request_descript_type ==
1720  		    MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1721  		    request_descript_type ==
1722  		    MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1723  		    request_descript_type ==
1724  		    MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1725  			cb_idx = _base_get_cb_idx(ioc, smid);
1726  			if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1727  			    (likely(mpt_callbacks[cb_idx] != NULL))) {
1728  				rc = mpt_callbacks[cb_idx](ioc, smid,
1729  				    msix_index, 0);
1730  				if (rc)
1731  					mpt3sas_base_free_smid(ioc, smid);
1732  			}
1733  		} else if (request_descript_type ==
1734  		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1735  			reply = le32_to_cpu(
1736  			    rpf->AddressReply.ReplyFrameAddress);
1737  			if (reply > ioc->reply_dma_max_address ||
1738  			    reply < ioc->reply_dma_min_address)
1739  				reply = 0;
1740  			if (smid) {
1741  				cb_idx = _base_get_cb_idx(ioc, smid);
1742  				if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1743  				    (likely(mpt_callbacks[cb_idx] != NULL))) {
1744  					rc = mpt_callbacks[cb_idx](ioc, smid,
1745  					    msix_index, reply);
1746  					if (reply)
1747  						_base_display_reply_info(ioc,
1748  						    smid, msix_index, reply);
1749  					if (rc)
1750  						mpt3sas_base_free_smid(ioc,
1751  						    smid);
1752  				}
1753  			} else {
1754  				_base_async_event(ioc, msix_index, reply);
1755  			}
1756  
1757  			/* reply free queue handling */
1758  			if (reply) {
1759  				ioc->reply_free_host_index =
1760  				    (ioc->reply_free_host_index ==
1761  				    (ioc->reply_free_queue_depth - 1)) ?
1762  				    0 : ioc->reply_free_host_index + 1;
1763  				ioc->reply_free[ioc->reply_free_host_index] =
1764  				    cpu_to_le32(reply);
1765  				if (ioc->is_mcpu_endpoint)
1766  					_base_clone_reply_to_sys_mem(ioc,
1767  						reply,
1768  						ioc->reply_free_host_index);
1769  				writel(ioc->reply_free_host_index,
1770  				    &ioc->chip->ReplyFreeHostIndex);
1771  			}
1772  		}
1773  
1774  		rpf->Words = cpu_to_le64(ULLONG_MAX);
1775  		reply_q->reply_post_host_index =
1776  		    (reply_q->reply_post_host_index ==
1777  		    (ioc->reply_post_queue_depth - 1)) ? 0 :
1778  		    reply_q->reply_post_host_index + 1;
1779  		request_descript_type =
1780  		    reply_q->reply_post_free[reply_q->reply_post_host_index].
1781  		    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1782  		completed_cmds++;
1783  		/* Update the reply post host index after continuously
1784  		 * processing the threshold number of Reply Descriptors.
1785  		 * So that FW can find enough entries to post the Reply
1786  		 * Descriptors in the reply descriptor post queue.
1787  		 */
1788  		if (completed_cmds >= ioc->thresh_hold) {
1789  			if (ioc->combined_reply_queue) {
1790  				writel(reply_q->reply_post_host_index |
1791  						((msix_index  & 7) <<
1792  						 MPI2_RPHI_MSIX_INDEX_SHIFT),
1793  				    ioc->replyPostRegisterIndex[msix_index/8]);
1794  			} else {
1795  				writel(reply_q->reply_post_host_index |
1796  						(msix_index <<
1797  						 MPI2_RPHI_MSIX_INDEX_SHIFT),
1798  						&ioc->chip->ReplyPostHostIndex);
1799  			}
1800  			if (!reply_q->is_iouring_poll_q &&
1801  			    !reply_q->irq_poll_scheduled) {
1802  				reply_q->irq_poll_scheduled = true;
1803  				irq_poll_sched(&reply_q->irqpoll);
1804  			}
1805  			atomic_dec(&reply_q->busy);
1806  			return completed_cmds;
1807  		}
1808  		if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1809  			goto out;
1810  		if (!reply_q->reply_post_host_index)
1811  			rpf = reply_q->reply_post_free;
1812  		else
1813  			rpf++;
1814  	} while (1);
1815  
1816   out:
1817  
1818  	if (!completed_cmds) {
1819  		atomic_dec(&reply_q->busy);
1820  		return completed_cmds;
1821  	}
1822  
1823  	if (ioc->is_warpdrive) {
1824  		writel(reply_q->reply_post_host_index,
1825  		ioc->reply_post_host_index[msix_index]);
1826  		atomic_dec(&reply_q->busy);
1827  		return completed_cmds;
1828  	}
1829  
1830  	/* Update Reply Post Host Index.
1831  	 * For those HBA's which support combined reply queue feature
1832  	 * 1. Get the correct Supplemental Reply Post Host Index Register.
1833  	 *    i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1834  	 *    Index Register address bank i.e replyPostRegisterIndex[],
1835  	 * 2. Then update this register with new reply host index value
1836  	 *    in ReplyPostIndex field and the MSIxIndex field with
1837  	 *    msix_index value reduced to a value between 0 and 7,
1838  	 *    using a modulo 8 operation. Since each Supplemental Reply Post
1839  	 *    Host Index Register supports 8 MSI-X vectors.
1840  	 *
1841  	 * For other HBA's just update the Reply Post Host Index register with
1842  	 * new reply host index value in ReplyPostIndex Field and msix_index
1843  	 * value in MSIxIndex field.
1844  	 */
1845  	if (ioc->combined_reply_queue)
1846  		writel(reply_q->reply_post_host_index | ((msix_index  & 7) <<
1847  			MPI2_RPHI_MSIX_INDEX_SHIFT),
1848  			ioc->replyPostRegisterIndex[msix_index/8]);
1849  	else
1850  		writel(reply_q->reply_post_host_index | (msix_index <<
1851  			MPI2_RPHI_MSIX_INDEX_SHIFT),
1852  			&ioc->chip->ReplyPostHostIndex);
1853  	atomic_dec(&reply_q->busy);
1854  	return completed_cmds;
1855  }
1856  
1857  /**
1858   * mpt3sas_blk_mq_poll - poll the blk mq poll queue
1859   * @shost: Scsi_Host object
1860   * @queue_num: hw ctx queue number
1861   *
1862   * Return number of entries that has been processed from poll queue.
1863   */
mpt3sas_blk_mq_poll(struct Scsi_Host * shost,unsigned int queue_num)1864  int mpt3sas_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
1865  {
1866  	struct MPT3SAS_ADAPTER *ioc =
1867  	    (struct MPT3SAS_ADAPTER *)shost->hostdata;
1868  	struct adapter_reply_queue *reply_q;
1869  	int num_entries = 0;
1870  	int qid = queue_num - ioc->iopoll_q_start_index;
1871  
1872  	if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
1873  	    !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
1874  		return 0;
1875  
1876  	reply_q = ioc->io_uring_poll_queues[qid].reply_q;
1877  
1878  	num_entries = _base_process_reply_queue(reply_q);
1879  	atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
1880  
1881  	return num_entries;
1882  }
1883  
1884  /**
1885   * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1886   * @irq: irq number (not used)
1887   * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1888   *
1889   * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1890   */
1891  static irqreturn_t
_base_interrupt(int irq,void * bus_id)1892  _base_interrupt(int irq, void *bus_id)
1893  {
1894  	struct adapter_reply_queue *reply_q = bus_id;
1895  	struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1896  
1897  	if (ioc->mask_interrupts)
1898  		return IRQ_NONE;
1899  	if (reply_q->irq_poll_scheduled)
1900  		return IRQ_HANDLED;
1901  	return ((_base_process_reply_queue(reply_q) > 0) ?
1902  			IRQ_HANDLED : IRQ_NONE);
1903  }
1904  
1905  /**
1906   * _base_irqpoll - IRQ poll callback handler
1907   * @irqpoll: irq_poll object
1908   * @budget: irq poll weight
1909   *
1910   * Return: number of reply descriptors processed
1911   */
1912  static int
_base_irqpoll(struct irq_poll * irqpoll,int budget)1913  _base_irqpoll(struct irq_poll *irqpoll, int budget)
1914  {
1915  	struct adapter_reply_queue *reply_q;
1916  	int num_entries = 0;
1917  
1918  	reply_q = container_of(irqpoll, struct adapter_reply_queue,
1919  			irqpoll);
1920  	if (reply_q->irq_line_enable) {
1921  		disable_irq_nosync(reply_q->os_irq);
1922  		reply_q->irq_line_enable = false;
1923  	}
1924  	num_entries = _base_process_reply_queue(reply_q);
1925  	if (num_entries < budget) {
1926  		irq_poll_complete(irqpoll);
1927  		reply_q->irq_poll_scheduled = false;
1928  		reply_q->irq_line_enable = true;
1929  		enable_irq(reply_q->os_irq);
1930  		/*
1931  		 * Go for one more round of processing the
1932  		 * reply descriptor post queue in case the HBA
1933  		 * Firmware has posted some reply descriptors
1934  		 * while reenabling the IRQ.
1935  		 */
1936  		_base_process_reply_queue(reply_q);
1937  	}
1938  
1939  	return num_entries;
1940  }
1941  
1942  /**
1943   * _base_init_irqpolls - initliaze IRQ polls
1944   * @ioc: per adapter object
1945   *
1946   * Return: nothing
1947   */
1948  static void
_base_init_irqpolls(struct MPT3SAS_ADAPTER * ioc)1949  _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1950  {
1951  	struct adapter_reply_queue *reply_q, *next;
1952  
1953  	if (list_empty(&ioc->reply_queue_list))
1954  		return;
1955  
1956  	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1957  		if (reply_q->is_iouring_poll_q)
1958  			continue;
1959  		irq_poll_init(&reply_q->irqpoll,
1960  			ioc->hba_queue_depth/4, _base_irqpoll);
1961  		reply_q->irq_poll_scheduled = false;
1962  		reply_q->irq_line_enable = true;
1963  		reply_q->os_irq = pci_irq_vector(ioc->pdev,
1964  		    reply_q->msix_index);
1965  	}
1966  }
1967  
1968  /**
1969   * _base_is_controller_msix_enabled - is controller support muli-reply queues
1970   * @ioc: per adapter object
1971   *
1972   * Return: Whether or not MSI/X is enabled.
1973   */
1974  static inline int
_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER * ioc)1975  _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1976  {
1977  	return (ioc->facts.IOCCapabilities &
1978  	    MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1979  }
1980  
1981  /**
1982   * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1983   * @ioc: per adapter object
1984   * @poll: poll over reply descriptor pools incase interrupt for
1985   *		timed-out SCSI command got delayed
1986   * Context: non-ISR context
1987   *
1988   * Called when a Task Management request has completed.
1989   */
1990  void
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER * ioc,u8 poll)1991  mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1992  {
1993  	struct adapter_reply_queue *reply_q;
1994  
1995  	/* If MSIX capability is turned off
1996  	 * then multi-queues are not enabled
1997  	 */
1998  	if (!_base_is_controller_msix_enabled(ioc))
1999  		return;
2000  
2001  	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2002  		if (ioc->shost_recovery || ioc->remove_host ||
2003  				ioc->pci_error_recovery)
2004  			return;
2005  		/* TMs are on msix_index == 0 */
2006  		if (reply_q->msix_index == 0)
2007  			continue;
2008  
2009  		if (reply_q->is_iouring_poll_q) {
2010  			_base_process_reply_queue(reply_q);
2011  			continue;
2012  		}
2013  
2014  		synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
2015  		if (reply_q->irq_poll_scheduled) {
2016  			/* Calling irq_poll_disable will wait for any pending
2017  			 * callbacks to have completed.
2018  			 */
2019  			irq_poll_disable(&reply_q->irqpoll);
2020  			irq_poll_enable(&reply_q->irqpoll);
2021  			/* check how the scheduled poll has ended,
2022  			 * clean up only if necessary
2023  			 */
2024  			if (reply_q->irq_poll_scheduled) {
2025  				reply_q->irq_poll_scheduled = false;
2026  				reply_q->irq_line_enable = true;
2027  				enable_irq(reply_q->os_irq);
2028  			}
2029  		}
2030  
2031  		if (poll)
2032  			_base_process_reply_queue(reply_q);
2033  	}
2034  }
2035  
2036  /**
2037   * mpt3sas_base_release_callback_handler - clear interrupt callback handler
2038   * @cb_idx: callback index
2039   */
2040  void
mpt3sas_base_release_callback_handler(u8 cb_idx)2041  mpt3sas_base_release_callback_handler(u8 cb_idx)
2042  {
2043  	mpt_callbacks[cb_idx] = NULL;
2044  }
2045  
2046  /**
2047   * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
2048   * @cb_func: callback function
2049   *
2050   * Return: Index of @cb_func.
2051   */
2052  u8
mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)2053  mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
2054  {
2055  	u8 cb_idx;
2056  
2057  	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
2058  		if (mpt_callbacks[cb_idx] == NULL)
2059  			break;
2060  
2061  	mpt_callbacks[cb_idx] = cb_func;
2062  	return cb_idx;
2063  }
2064  
2065  /**
2066   * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
2067   */
2068  void
mpt3sas_base_initialize_callback_handler(void)2069  mpt3sas_base_initialize_callback_handler(void)
2070  {
2071  	u8 cb_idx;
2072  
2073  	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
2074  		mpt3sas_base_release_callback_handler(cb_idx);
2075  }
2076  
2077  
2078  /**
2079   * _base_build_zero_len_sge - build zero length sg entry
2080   * @ioc: per adapter object
2081   * @paddr: virtual address for SGE
2082   *
2083   * Create a zero length scatter gather entry to insure the IOCs hardware has
2084   * something to use if the target device goes brain dead and tries
2085   * to send data even when none is asked for.
2086   */
2087  static void
_base_build_zero_len_sge(struct MPT3SAS_ADAPTER * ioc,void * paddr)2088  _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2089  {
2090  	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
2091  	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
2092  	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
2093  	    MPI2_SGE_FLAGS_SHIFT);
2094  	ioc->base_add_sg_single(paddr, flags_length, -1);
2095  }
2096  
2097  /**
2098   * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
2099   * @paddr: virtual address for SGE
2100   * @flags_length: SGE flags and data transfer length
2101   * @dma_addr: Physical address
2102   */
2103  static void
_base_add_sg_single_32(void * paddr,u32 flags_length,dma_addr_t dma_addr)2104  _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2105  {
2106  	Mpi2SGESimple32_t *sgel = paddr;
2107  
2108  	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
2109  	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2110  	sgel->FlagsLength = cpu_to_le32(flags_length);
2111  	sgel->Address = cpu_to_le32(dma_addr);
2112  }
2113  
2114  
2115  /**
2116   * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
2117   * @paddr: virtual address for SGE
2118   * @flags_length: SGE flags and data transfer length
2119   * @dma_addr: Physical address
2120   */
2121  static void
_base_add_sg_single_64(void * paddr,u32 flags_length,dma_addr_t dma_addr)2122  _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
2123  {
2124  	Mpi2SGESimple64_t *sgel = paddr;
2125  
2126  	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2127  	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
2128  	sgel->FlagsLength = cpu_to_le32(flags_length);
2129  	sgel->Address = cpu_to_le64(dma_addr);
2130  }
2131  
2132  /**
2133   * _base_get_chain_buffer_tracker - obtain chain tracker
2134   * @ioc: per adapter object
2135   * @scmd: SCSI commands of the IO request
2136   *
2137   * Return: chain tracker from chain_lookup table using key as
2138   * smid and smid's chain_offset.
2139   */
2140  static struct chain_tracker *
_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)2141  _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2142  			       struct scsi_cmnd *scmd)
2143  {
2144  	struct chain_tracker *chain_req;
2145  	struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2146  	u16 smid = st->smid;
2147  	u8 chain_offset =
2148  	   atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2149  
2150  	if (chain_offset == ioc->chains_needed_per_io)
2151  		return NULL;
2152  
2153  	chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2154  	atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2155  	return chain_req;
2156  }
2157  
2158  
2159  /**
2160   * _base_build_sg - build generic sg
2161   * @ioc: per adapter object
2162   * @psge: virtual address for SGE
2163   * @data_out_dma: physical address for WRITES
2164   * @data_out_sz: data xfer size for WRITES
2165   * @data_in_dma: physical address for READS
2166   * @data_in_sz: data xfer size for READS
2167   */
2168  static void
_base_build_sg(struct MPT3SAS_ADAPTER * ioc,void * psge,dma_addr_t data_out_dma,size_t data_out_sz,dma_addr_t data_in_dma,size_t data_in_sz)2169  _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2170  	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2171  	size_t data_in_sz)
2172  {
2173  	u32 sgl_flags;
2174  
2175  	if (!data_out_sz && !data_in_sz) {
2176  		_base_build_zero_len_sge(ioc, psge);
2177  		return;
2178  	}
2179  
2180  	if (data_out_sz && data_in_sz) {
2181  		/* WRITE sgel first */
2182  		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2183  		    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2184  		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2185  		ioc->base_add_sg_single(psge, sgl_flags |
2186  		    data_out_sz, data_out_dma);
2187  
2188  		/* incr sgel */
2189  		psge += ioc->sge_size;
2190  
2191  		/* READ sgel last */
2192  		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2193  		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2194  		    MPI2_SGE_FLAGS_END_OF_LIST);
2195  		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2196  		ioc->base_add_sg_single(psge, sgl_flags |
2197  		    data_in_sz, data_in_dma);
2198  	} else if (data_out_sz) /* WRITE */ {
2199  		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2200  		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2201  		    MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
2202  		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2203  		ioc->base_add_sg_single(psge, sgl_flags |
2204  		    data_out_sz, data_out_dma);
2205  	} else if (data_in_sz) /* READ */ {
2206  		sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2207  		    MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2208  		    MPI2_SGE_FLAGS_END_OF_LIST);
2209  		sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2210  		ioc->base_add_sg_single(psge, sgl_flags |
2211  		    data_in_sz, data_in_dma);
2212  	}
2213  }
2214  
2215  /* IEEE format sgls */
2216  
2217  /**
2218   * _base_build_nvme_prp - This function is called for NVMe end devices to build
2219   *                        a native SGL (NVMe PRP).
2220   * @ioc: per adapter object
2221   * @smid: system request message index for getting asscociated SGL
2222   * @nvme_encap_request: the NVMe request msg frame pointer
2223   * @data_out_dma: physical address for WRITES
2224   * @data_out_sz: data xfer size for WRITES
2225   * @data_in_dma: physical address for READS
2226   * @data_in_sz: data xfer size for READS
2227   *
2228   * The native SGL is built starting in the first PRP
2229   * entry of the NVMe message (PRP1).  If the data buffer is small enough to be
2230   * described entirely using PRP1, then PRP2 is not used.  If needed, PRP2 is
2231   * used to describe a larger data buffer.  If the data buffer is too large to
2232   * describe using the two PRP entriess inside the NVMe message, then PRP1
2233   * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2234   * list located elsewhere in memory to describe the remaining data memory
2235   * segments.  The PRP list will be contiguous.
2236   *
2237   * The native SGL for NVMe devices is a Physical Region Page (PRP).  A PRP
2238   * consists of a list of PRP entries to describe a number of noncontigous
2239   * physical memory segments as a single memory buffer, just as a SGL does.  Note
2240   * however, that this function is only used by the IOCTL call, so the memory
2241   * given will be guaranteed to be contiguous.  There is no need to translate
2242   * non-contiguous SGL into a PRP in this case.  All PRPs will describe
2243   * contiguous space that is one page size each.
2244   *
2245   * Each NVMe message contains two PRP entries.  The first (PRP1) either contains
2246   * a PRP list pointer or a PRP element, depending upon the command.  PRP2
2247   * contains the second PRP element if the memory being described fits within 2
2248   * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2249   *
2250   * A PRP list pointer contains the address of a PRP list, structured as a linear
2251   * array of PRP entries.  Each PRP entry in this list describes a segment of
2252   * physical memory.
2253   *
2254   * Each 64-bit PRP entry comprises an address and an offset field.  The address
2255   * always points at the beginning of a 4KB physical memory page, and the offset
2256   * describes where within that 4KB page the memory segment begins.  Only the
2257   * first element in a PRP list may contain a non-zero offset, implying that all
2258   * memory segments following the first begin at the start of a 4KB page.
2259   *
2260   * Each PRP element normally describes 4KB of physical memory, with exceptions
2261   * for the first and last elements in the list.  If the memory being described
2262   * by the list begins at a non-zero offset within the first 4KB page, then the
2263   * first PRP element will contain a non-zero offset indicating where the region
2264   * begins within the 4KB page.  The last memory segment may end before the end
2265   * of the 4KB segment, depending upon the overall size of the memory being
2266   * described by the PRP list.
2267   *
2268   * Since PRP entries lack any indication of size, the overall data buffer length
2269   * is used to determine where the end of the data memory buffer is located, and
2270   * how many PRP entries are required to describe it.
2271   */
2272  static void
_base_build_nvme_prp(struct MPT3SAS_ADAPTER * ioc,u16 smid,Mpi26NVMeEncapsulatedRequest_t * nvme_encap_request,dma_addr_t data_out_dma,size_t data_out_sz,dma_addr_t data_in_dma,size_t data_in_sz)2273  _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2274  	Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
2275  	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2276  	size_t data_in_sz)
2277  {
2278  	int		prp_size = NVME_PRP_SIZE;
2279  	__le64		*prp_entry, *prp1_entry, *prp2_entry;
2280  	__le64		*prp_page;
2281  	dma_addr_t	prp_entry_dma, prp_page_dma, dma_addr;
2282  	u32		offset, entry_len;
2283  	u32		page_mask_result, page_mask;
2284  	size_t		length;
2285  	struct mpt3sas_nvme_cmd *nvme_cmd =
2286  		(void *)nvme_encap_request->NVMe_Command;
2287  
2288  	/*
2289  	 * Not all commands require a data transfer. If no data, just return
2290  	 * without constructing any PRP.
2291  	 */
2292  	if (!data_in_sz && !data_out_sz)
2293  		return;
2294  	prp1_entry = &nvme_cmd->prp1;
2295  	prp2_entry = &nvme_cmd->prp2;
2296  	prp_entry = prp1_entry;
2297  	/*
2298  	 * For the PRP entries, use the specially allocated buffer of
2299  	 * contiguous memory.
2300  	 */
2301  	prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2302  	prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2303  
2304  	/*
2305  	 * Check if we are within 1 entry of a page boundary we don't
2306  	 * want our first entry to be a PRP List entry.
2307  	 */
2308  	page_mask = ioc->page_size - 1;
2309  	page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
2310  	if (!page_mask_result) {
2311  		/* Bump up to next page boundary. */
2312  		prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2313  		prp_page_dma = prp_page_dma + prp_size;
2314  	}
2315  
2316  	/*
2317  	 * Set PRP physical pointer, which initially points to the current PRP
2318  	 * DMA memory page.
2319  	 */
2320  	prp_entry_dma = prp_page_dma;
2321  
2322  	/* Get physical address and length of the data buffer. */
2323  	if (data_in_sz) {
2324  		dma_addr = data_in_dma;
2325  		length = data_in_sz;
2326  	} else {
2327  		dma_addr = data_out_dma;
2328  		length = data_out_sz;
2329  	}
2330  
2331  	/* Loop while the length is not zero. */
2332  	while (length) {
2333  		/*
2334  		 * Check if we need to put a list pointer here if we are at
2335  		 * page boundary - prp_size (8 bytes).
2336  		 */
2337  		page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2338  		if (!page_mask_result) {
2339  			/*
2340  			 * This is the last entry in a PRP List, so we need to
2341  			 * put a PRP list pointer here.  What this does is:
2342  			 *   - bump the current memory pointer to the next
2343  			 *     address, which will be the next full page.
2344  			 *   - set the PRP Entry to point to that page.  This
2345  			 *     is now the PRP List pointer.
2346  			 *   - bump the PRP Entry pointer the start of the
2347  			 *     next page.  Since all of this PRP memory is
2348  			 *     contiguous, no need to get a new page - it's
2349  			 *     just the next address.
2350  			 */
2351  			prp_entry_dma++;
2352  			*prp_entry = cpu_to_le64(prp_entry_dma);
2353  			prp_entry++;
2354  		}
2355  
2356  		/* Need to handle if entry will be part of a page. */
2357  		offset = dma_addr & page_mask;
2358  		entry_len = ioc->page_size - offset;
2359  
2360  		if (prp_entry == prp1_entry) {
2361  			/*
2362  			 * Must fill in the first PRP pointer (PRP1) before
2363  			 * moving on.
2364  			 */
2365  			*prp1_entry = cpu_to_le64(dma_addr);
2366  
2367  			/*
2368  			 * Now point to the second PRP entry within the
2369  			 * command (PRP2).
2370  			 */
2371  			prp_entry = prp2_entry;
2372  		} else if (prp_entry == prp2_entry) {
2373  			/*
2374  			 * Should the PRP2 entry be a PRP List pointer or just
2375  			 * a regular PRP pointer?  If there is more than one
2376  			 * more page of data, must use a PRP List pointer.
2377  			 */
2378  			if (length > ioc->page_size) {
2379  				/*
2380  				 * PRP2 will contain a PRP List pointer because
2381  				 * more PRP's are needed with this command. The
2382  				 * list will start at the beginning of the
2383  				 * contiguous buffer.
2384  				 */
2385  				*prp2_entry = cpu_to_le64(prp_entry_dma);
2386  
2387  				/*
2388  				 * The next PRP Entry will be the start of the
2389  				 * first PRP List.
2390  				 */
2391  				prp_entry = prp_page;
2392  			} else {
2393  				/*
2394  				 * After this, the PRP Entries are complete.
2395  				 * This command uses 2 PRP's and no PRP list.
2396  				 */
2397  				*prp2_entry = cpu_to_le64(dma_addr);
2398  			}
2399  		} else {
2400  			/*
2401  			 * Put entry in list and bump the addresses.
2402  			 *
2403  			 * After PRP1 and PRP2 are filled in, this will fill in
2404  			 * all remaining PRP entries in a PRP List, one per
2405  			 * each time through the loop.
2406  			 */
2407  			*prp_entry = cpu_to_le64(dma_addr);
2408  			prp_entry++;
2409  			prp_entry_dma++;
2410  		}
2411  
2412  		/*
2413  		 * Bump the phys address of the command's data buffer by the
2414  		 * entry_len.
2415  		 */
2416  		dma_addr += entry_len;
2417  
2418  		/* Decrement length accounting for last partial page. */
2419  		if (entry_len > length)
2420  			length = 0;
2421  		else
2422  			length -= entry_len;
2423  	}
2424  }
2425  
2426  /**
2427   * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
2428   *			SGLs specific to NVMe drives only
2429   *
2430   * @ioc:		per adapter object
2431   * @scmd:		SCSI command from the mid-layer
2432   * @mpi_request:	mpi request
2433   * @smid:		msg Index
2434   * @sge_count:		scatter gather element count.
2435   *
2436   * Return:		true: PRPs are built
2437   *			false: IEEE SGLs needs to be built
2438   */
2439  static void
base_make_prp_nvme(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,Mpi25SCSIIORequest_t * mpi_request,u16 smid,int sge_count)2440  base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2441  		struct scsi_cmnd *scmd,
2442  		Mpi25SCSIIORequest_t *mpi_request,
2443  		u16 smid, int sge_count)
2444  {
2445  	int sge_len, num_prp_in_chain = 0;
2446  	Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2447  	__le64 *curr_buff;
2448  	dma_addr_t msg_dma, sge_addr, offset;
2449  	u32 page_mask, page_mask_result;
2450  	struct scatterlist *sg_scmd;
2451  	u32 first_prp_len;
2452  	int data_len = scsi_bufflen(scmd);
2453  	u32 nvme_pg_size;
2454  
2455  	nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2456  	/*
2457  	 * Nvme has a very convoluted prp format.  One prp is required
2458  	 * for each page or partial page. Driver need to split up OS sg_list
2459  	 * entries if it is longer than one page or cross a page
2460  	 * boundary.  Driver also have to insert a PRP list pointer entry as
2461  	 * the last entry in each physical page of the PRP list.
2462  	 *
2463  	 * NOTE: The first PRP "entry" is actually placed in the first
2464  	 * SGL entry in the main message as IEEE 64 format.  The 2nd
2465  	 * entry in the main message is the chain element, and the rest
2466  	 * of the PRP entries are built in the contiguous pcie buffer.
2467  	 */
2468  	page_mask = nvme_pg_size - 1;
2469  
2470  	/*
2471  	 * Native SGL is needed.
2472  	 * Put a chain element in main message frame that points to the first
2473  	 * chain buffer.
2474  	 *
2475  	 * NOTE:  The ChainOffset field must be 0 when using a chain pointer to
2476  	 *        a native SGL.
2477  	 */
2478  
2479  	/* Set main message chain element pointer */
2480  	main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2481  	/*
2482  	 * For NVMe the chain element needs to be the 2nd SG entry in the main
2483  	 * message.
2484  	 */
2485  	main_chain_element = (Mpi25IeeeSgeChain64_t *)
2486  		((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2487  
2488  	/*
2489  	 * For the PRP entries, use the specially allocated buffer of
2490  	 * contiguous memory.  Normal chain buffers can't be used
2491  	 * because each chain buffer would need to be the size of an OS
2492  	 * page (4k).
2493  	 */
2494  	curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2495  	msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2496  
2497  	main_chain_element->Address = cpu_to_le64(msg_dma);
2498  	main_chain_element->NextChainOffset = 0;
2499  	main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2500  			MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2501  			MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2502  
2503  	/* Build first prp, sge need not to be page aligned*/
2504  	ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2505  	sg_scmd = scsi_sglist(scmd);
2506  	sge_addr = sg_dma_address(sg_scmd);
2507  	sge_len = sg_dma_len(sg_scmd);
2508  
2509  	offset = sge_addr & page_mask;
2510  	first_prp_len = nvme_pg_size - offset;
2511  
2512  	ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2513  	ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2514  
2515  	data_len -= first_prp_len;
2516  
2517  	if (sge_len > first_prp_len) {
2518  		sge_addr += first_prp_len;
2519  		sge_len -= first_prp_len;
2520  	} else if (data_len && (sge_len == first_prp_len)) {
2521  		sg_scmd = sg_next(sg_scmd);
2522  		sge_addr = sg_dma_address(sg_scmd);
2523  		sge_len = sg_dma_len(sg_scmd);
2524  	}
2525  
2526  	for (;;) {
2527  		offset = sge_addr & page_mask;
2528  
2529  		/* Put PRP pointer due to page boundary*/
2530  		page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2531  		if (unlikely(!page_mask_result)) {
2532  			scmd_printk(KERN_NOTICE,
2533  				scmd, "page boundary curr_buff: 0x%p\n",
2534  				curr_buff);
2535  			msg_dma += 8;
2536  			*curr_buff = cpu_to_le64(msg_dma);
2537  			curr_buff++;
2538  			num_prp_in_chain++;
2539  		}
2540  
2541  		*curr_buff = cpu_to_le64(sge_addr);
2542  		curr_buff++;
2543  		msg_dma += 8;
2544  		num_prp_in_chain++;
2545  
2546  		sge_addr += nvme_pg_size;
2547  		sge_len -= nvme_pg_size;
2548  		data_len -= nvme_pg_size;
2549  
2550  		if (data_len <= 0)
2551  			break;
2552  
2553  		if (sge_len > 0)
2554  			continue;
2555  
2556  		sg_scmd = sg_next(sg_scmd);
2557  		sge_addr = sg_dma_address(sg_scmd);
2558  		sge_len = sg_dma_len(sg_scmd);
2559  	}
2560  
2561  	main_chain_element->Length =
2562  		cpu_to_le32(num_prp_in_chain * sizeof(u64));
2563  	return;
2564  }
2565  
2566  static bool
base_is_prp_possible(struct MPT3SAS_ADAPTER * ioc,struct _pcie_device * pcie_device,struct scsi_cmnd * scmd,int sge_count)2567  base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2568  	struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2569  {
2570  	u32 data_length = 0;
2571  	bool build_prp = true;
2572  
2573  	data_length = scsi_bufflen(scmd);
2574  	if (pcie_device &&
2575  	    (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2576  		build_prp = false;
2577  		return build_prp;
2578  	}
2579  
2580  	/* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2581  	 * we built IEEE SGL
2582  	 */
2583  	if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2584  		build_prp = false;
2585  
2586  	return build_prp;
2587  }
2588  
2589  /**
2590   * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2591   * determine if the driver needs to build a native SGL.  If so, that native
2592   * SGL is built in the special contiguous buffers allocated especially for
2593   * PCIe SGL creation.  If the driver will not build a native SGL, return
2594   * TRUE and a normal IEEE SGL will be built.  Currently this routine
2595   * supports NVMe.
2596   * @ioc: per adapter object
2597   * @mpi_request: mf request pointer
2598   * @smid: system request message index
2599   * @scmd: scsi command
2600   * @pcie_device: points to the PCIe device's info
2601   *
2602   * Return: 0 if native SGL was built, 1 if no SGL was built
2603   */
2604  static int
_base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER * ioc,Mpi25SCSIIORequest_t * mpi_request,u16 smid,struct scsi_cmnd * scmd,struct _pcie_device * pcie_device)2605  _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2606  	Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2607  	struct _pcie_device *pcie_device)
2608  {
2609  	int sges_left;
2610  
2611  	/* Get the SG list pointer and info. */
2612  	sges_left = scsi_dma_map(scmd);
2613  	if (sges_left < 0)
2614  		return 1;
2615  
2616  	/* Check if we need to build a native SG list. */
2617  	if (!base_is_prp_possible(ioc, pcie_device,
2618  				scmd, sges_left)) {
2619  		/* We built a native SG list, just return. */
2620  		goto out;
2621  	}
2622  
2623  	/*
2624  	 * Build native NVMe PRP.
2625  	 */
2626  	base_make_prp_nvme(ioc, scmd, mpi_request,
2627  			smid, sges_left);
2628  
2629  	return 0;
2630  out:
2631  	scsi_dma_unmap(scmd);
2632  	return 1;
2633  }
2634  
2635  /**
2636   * _base_add_sg_single_ieee - add sg element for IEEE format
2637   * @paddr: virtual address for SGE
2638   * @flags: SGE flags
2639   * @chain_offset: number of 128 byte elements from start of segment
2640   * @length: data transfer length
2641   * @dma_addr: Physical address
2642   */
2643  static void
_base_add_sg_single_ieee(void * paddr,u8 flags,u8 chain_offset,u32 length,dma_addr_t dma_addr)2644  _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2645  	dma_addr_t dma_addr)
2646  {
2647  	Mpi25IeeeSgeChain64_t *sgel = paddr;
2648  
2649  	sgel->Flags = flags;
2650  	sgel->NextChainOffset = chain_offset;
2651  	sgel->Length = cpu_to_le32(length);
2652  	sgel->Address = cpu_to_le64(dma_addr);
2653  }
2654  
2655  /**
2656   * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2657   * @ioc: per adapter object
2658   * @paddr: virtual address for SGE
2659   *
2660   * Create a zero length scatter gather entry to insure the IOCs hardware has
2661   * something to use if the target device goes brain dead and tries
2662   * to send data even when none is asked for.
2663   */
2664  static void
_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER * ioc,void * paddr)2665  _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2666  {
2667  	u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2668  		MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2669  		MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2670  
2671  	_base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2672  }
2673  
_base_scsi_dma_map(struct scsi_cmnd * cmd)2674  static inline int _base_scsi_dma_map(struct scsi_cmnd *cmd)
2675  {
2676  	/*
2677  	 * Some firmware versions byte-swap the REPORT ZONES command reply from
2678  	 * ATA-ZAC devices by directly accessing in the host buffer. This does
2679  	 * not respect the default command DMA direction and causes IOMMU page
2680  	 * faults on some architectures with an IOMMU enforcing write mappings
2681  	 * (e.g. AMD hosts). Avoid such issue by making the report zones buffer
2682  	 * mapping bi-directional.
2683  	 */
2684  	if (cmd->cmnd[0] == ZBC_IN && cmd->cmnd[1] == ZI_REPORT_ZONES)
2685  		cmd->sc_data_direction = DMA_BIDIRECTIONAL;
2686  
2687  	return scsi_dma_map(cmd);
2688  }
2689  
2690  /**
2691   * _base_build_sg_scmd - main sg creation routine
2692   *		pcie_device is unused here!
2693   * @ioc: per adapter object
2694   * @scmd: scsi command
2695   * @smid: system request message index
2696   * @unused: unused pcie_device pointer
2697   * Context: none.
2698   *
2699   * The main routine that builds scatter gather table from a given
2700   * scsi request sent via the .queuecommand main handler.
2701   *
2702   * Return: 0 success, anything else error
2703   */
2704  static int
_base_build_sg_scmd(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,u16 smid,struct _pcie_device * unused)2705  _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2706  	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2707  {
2708  	Mpi2SCSIIORequest_t *mpi_request;
2709  	dma_addr_t chain_dma;
2710  	struct scatterlist *sg_scmd;
2711  	void *sg_local, *chain;
2712  	u32 chain_offset;
2713  	u32 chain_length;
2714  	u32 chain_flags;
2715  	int sges_left;
2716  	u32 sges_in_segment;
2717  	u32 sgl_flags;
2718  	u32 sgl_flags_last_element;
2719  	u32 sgl_flags_end_buffer;
2720  	struct chain_tracker *chain_req;
2721  
2722  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2723  
2724  	/* init scatter gather flags */
2725  	sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2726  	if (scmd->sc_data_direction == DMA_TO_DEVICE)
2727  		sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2728  	sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2729  	    << MPI2_SGE_FLAGS_SHIFT;
2730  	sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2731  	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2732  	    << MPI2_SGE_FLAGS_SHIFT;
2733  	sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2734  
2735  	sg_scmd = scsi_sglist(scmd);
2736  	sges_left = _base_scsi_dma_map(scmd);
2737  	if (sges_left < 0)
2738  		return -ENOMEM;
2739  
2740  	sg_local = &mpi_request->SGL;
2741  	sges_in_segment = ioc->max_sges_in_main_message;
2742  	if (sges_left <= sges_in_segment)
2743  		goto fill_in_last_segment;
2744  
2745  	mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2746  	    (sges_in_segment * ioc->sge_size))/4;
2747  
2748  	/* fill in main message segment when there is a chain following */
2749  	while (sges_in_segment) {
2750  		if (sges_in_segment == 1)
2751  			ioc->base_add_sg_single(sg_local,
2752  			    sgl_flags_last_element | sg_dma_len(sg_scmd),
2753  			    sg_dma_address(sg_scmd));
2754  		else
2755  			ioc->base_add_sg_single(sg_local, sgl_flags |
2756  			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2757  		sg_scmd = sg_next(sg_scmd);
2758  		sg_local += ioc->sge_size;
2759  		sges_left--;
2760  		sges_in_segment--;
2761  	}
2762  
2763  	/* initializing the chain flags and pointers */
2764  	chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2765  	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2766  	if (!chain_req)
2767  		return -1;
2768  	chain = chain_req->chain_buffer;
2769  	chain_dma = chain_req->chain_buffer_dma;
2770  	do {
2771  		sges_in_segment = (sges_left <=
2772  		    ioc->max_sges_in_chain_message) ? sges_left :
2773  		    ioc->max_sges_in_chain_message;
2774  		chain_offset = (sges_left == sges_in_segment) ?
2775  		    0 : (sges_in_segment * ioc->sge_size)/4;
2776  		chain_length = sges_in_segment * ioc->sge_size;
2777  		if (chain_offset) {
2778  			chain_offset = chain_offset <<
2779  			    MPI2_SGE_CHAIN_OFFSET_SHIFT;
2780  			chain_length += ioc->sge_size;
2781  		}
2782  		ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2783  		    chain_length, chain_dma);
2784  		sg_local = chain;
2785  		if (!chain_offset)
2786  			goto fill_in_last_segment;
2787  
2788  		/* fill in chain segments */
2789  		while (sges_in_segment) {
2790  			if (sges_in_segment == 1)
2791  				ioc->base_add_sg_single(sg_local,
2792  				    sgl_flags_last_element |
2793  				    sg_dma_len(sg_scmd),
2794  				    sg_dma_address(sg_scmd));
2795  			else
2796  				ioc->base_add_sg_single(sg_local, sgl_flags |
2797  				    sg_dma_len(sg_scmd),
2798  				    sg_dma_address(sg_scmd));
2799  			sg_scmd = sg_next(sg_scmd);
2800  			sg_local += ioc->sge_size;
2801  			sges_left--;
2802  			sges_in_segment--;
2803  		}
2804  
2805  		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2806  		if (!chain_req)
2807  			return -1;
2808  		chain = chain_req->chain_buffer;
2809  		chain_dma = chain_req->chain_buffer_dma;
2810  	} while (1);
2811  
2812  
2813   fill_in_last_segment:
2814  
2815  	/* fill the last segment */
2816  	while (sges_left) {
2817  		if (sges_left == 1)
2818  			ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2819  			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2820  		else
2821  			ioc->base_add_sg_single(sg_local, sgl_flags |
2822  			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2823  		sg_scmd = sg_next(sg_scmd);
2824  		sg_local += ioc->sge_size;
2825  		sges_left--;
2826  	}
2827  
2828  	return 0;
2829  }
2830  
2831  /**
2832   * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2833   * @ioc: per adapter object
2834   * @scmd: scsi command
2835   * @smid: system request message index
2836   * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2837   * constructed on need.
2838   * Context: none.
2839   *
2840   * The main routine that builds scatter gather table from a given
2841   * scsi request sent via the .queuecommand main handler.
2842   *
2843   * Return: 0 success, anything else error
2844   */
2845  static int
_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd,u16 smid,struct _pcie_device * pcie_device)2846  _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2847  	struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2848  {
2849  	Mpi25SCSIIORequest_t *mpi_request;
2850  	dma_addr_t chain_dma;
2851  	struct scatterlist *sg_scmd;
2852  	void *sg_local, *chain;
2853  	u32 chain_offset;
2854  	u32 chain_length;
2855  	int sges_left;
2856  	u32 sges_in_segment;
2857  	u8 simple_sgl_flags;
2858  	u8 simple_sgl_flags_last;
2859  	u8 chain_sgl_flags;
2860  	struct chain_tracker *chain_req;
2861  
2862  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2863  
2864  	/* init scatter gather flags */
2865  	simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2866  	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2867  	simple_sgl_flags_last = simple_sgl_flags |
2868  	    MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2869  	chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2870  	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2871  
2872  	/* Check if we need to build a native SG list. */
2873  	if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2874  			smid, scmd, pcie_device) == 0)) {
2875  		/* We built a native SG list, just return. */
2876  		return 0;
2877  	}
2878  
2879  	sg_scmd = scsi_sglist(scmd);
2880  	sges_left = _base_scsi_dma_map(scmd);
2881  	if (sges_left < 0)
2882  		return -ENOMEM;
2883  
2884  	sg_local = &mpi_request->SGL;
2885  	sges_in_segment = (ioc->request_sz -
2886  		   offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2887  	if (sges_left <= sges_in_segment)
2888  		goto fill_in_last_segment;
2889  
2890  	mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
2891  	    (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2892  
2893  	/* fill in main message segment when there is a chain following */
2894  	while (sges_in_segment > 1) {
2895  		_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2896  		    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2897  		sg_scmd = sg_next(sg_scmd);
2898  		sg_local += ioc->sge_size_ieee;
2899  		sges_left--;
2900  		sges_in_segment--;
2901  	}
2902  
2903  	/* initializing the pointers */
2904  	chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2905  	if (!chain_req)
2906  		return -1;
2907  	chain = chain_req->chain_buffer;
2908  	chain_dma = chain_req->chain_buffer_dma;
2909  	do {
2910  		sges_in_segment = (sges_left <=
2911  		    ioc->max_sges_in_chain_message) ? sges_left :
2912  		    ioc->max_sges_in_chain_message;
2913  		chain_offset = (sges_left == sges_in_segment) ?
2914  		    0 : sges_in_segment;
2915  		chain_length = sges_in_segment * ioc->sge_size_ieee;
2916  		if (chain_offset)
2917  			chain_length += ioc->sge_size_ieee;
2918  		_base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2919  		    chain_offset, chain_length, chain_dma);
2920  
2921  		sg_local = chain;
2922  		if (!chain_offset)
2923  			goto fill_in_last_segment;
2924  
2925  		/* fill in chain segments */
2926  		while (sges_in_segment) {
2927  			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2928  			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2929  			sg_scmd = sg_next(sg_scmd);
2930  			sg_local += ioc->sge_size_ieee;
2931  			sges_left--;
2932  			sges_in_segment--;
2933  		}
2934  
2935  		chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2936  		if (!chain_req)
2937  			return -1;
2938  		chain = chain_req->chain_buffer;
2939  		chain_dma = chain_req->chain_buffer_dma;
2940  	} while (1);
2941  
2942  
2943   fill_in_last_segment:
2944  
2945  	/* fill the last segment */
2946  	while (sges_left > 0) {
2947  		if (sges_left == 1)
2948  			_base_add_sg_single_ieee(sg_local,
2949  			    simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2950  			    sg_dma_address(sg_scmd));
2951  		else
2952  			_base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2953  			    sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2954  		sg_scmd = sg_next(sg_scmd);
2955  		sg_local += ioc->sge_size_ieee;
2956  		sges_left--;
2957  	}
2958  
2959  	return 0;
2960  }
2961  
2962  /**
2963   * _base_build_sg_ieee - build generic sg for IEEE format
2964   * @ioc: per adapter object
2965   * @psge: virtual address for SGE
2966   * @data_out_dma: physical address for WRITES
2967   * @data_out_sz: data xfer size for WRITES
2968   * @data_in_dma: physical address for READS
2969   * @data_in_sz: data xfer size for READS
2970   */
2971  static void
_base_build_sg_ieee(struct MPT3SAS_ADAPTER * ioc,void * psge,dma_addr_t data_out_dma,size_t data_out_sz,dma_addr_t data_in_dma,size_t data_in_sz)2972  _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2973  	dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2974  	size_t data_in_sz)
2975  {
2976  	u8 sgl_flags;
2977  
2978  	if (!data_out_sz && !data_in_sz) {
2979  		_base_build_zero_len_sge_ieee(ioc, psge);
2980  		return;
2981  	}
2982  
2983  	if (data_out_sz && data_in_sz) {
2984  		/* WRITE sgel first */
2985  		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2986  		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2987  		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2988  		    data_out_dma);
2989  
2990  		/* incr sgel */
2991  		psge += ioc->sge_size_ieee;
2992  
2993  		/* READ sgel last */
2994  		sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2995  		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2996  		    data_in_dma);
2997  	} else if (data_out_sz) /* WRITE */ {
2998  		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2999  		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
3000  		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
3001  		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
3002  		    data_out_dma);
3003  	} else if (data_in_sz) /* READ */ {
3004  		sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
3005  		    MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
3006  		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
3007  		_base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
3008  		    data_in_dma);
3009  	}
3010  }
3011  
3012  #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
3013  
3014  /**
3015   * _base_config_dma_addressing - set dma addressing
3016   * @ioc: per adapter object
3017   * @pdev: PCI device struct
3018   *
3019   * Return: 0 for success, non-zero for failure.
3020   */
3021  static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER * ioc,struct pci_dev * pdev)3022  _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
3023  {
3024  	struct sysinfo s;
3025  	u64 coherent_dma_mask, dma_mask;
3026  
3027  	if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
3028  		ioc->dma_mask = 32;
3029  		coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
3030  	/* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
3031  	} else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
3032  		ioc->dma_mask = 63;
3033  		coherent_dma_mask = dma_mask = DMA_BIT_MASK(63);
3034  	} else {
3035  		ioc->dma_mask = 64;
3036  		coherent_dma_mask = dma_mask = DMA_BIT_MASK(64);
3037  	}
3038  
3039  	if (ioc->use_32bit_dma)
3040  		coherent_dma_mask = DMA_BIT_MASK(32);
3041  
3042  	if (dma_set_mask(&pdev->dev, dma_mask) ||
3043  	    dma_set_coherent_mask(&pdev->dev, coherent_dma_mask))
3044  		return -ENODEV;
3045  
3046  	if (ioc->dma_mask > 32) {
3047  		ioc->base_add_sg_single = &_base_add_sg_single_64;
3048  		ioc->sge_size = sizeof(Mpi2SGESimple64_t);
3049  	} else {
3050  		ioc->base_add_sg_single = &_base_add_sg_single_32;
3051  		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
3052  	}
3053  
3054  	si_meminfo(&s);
3055  	ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
3056  		ioc->dma_mask, convert_to_kb(s.totalram));
3057  
3058  	return 0;
3059  }
3060  
3061  /**
3062   * _base_check_enable_msix - checks MSIX capabable.
3063   * @ioc: per adapter object
3064   *
3065   * Check to see if card is capable of MSIX, and set number
3066   * of available msix vectors
3067   */
3068  static int
_base_check_enable_msix(struct MPT3SAS_ADAPTER * ioc)3069  _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3070  {
3071  	int base;
3072  	u16 message_control;
3073  
3074  	/* Check whether controller SAS2008 B0 controller,
3075  	 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
3076  	 */
3077  	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
3078  	    ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
3079  		return -EINVAL;
3080  	}
3081  
3082  	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
3083  	if (!base) {
3084  		dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
3085  		return -EINVAL;
3086  	}
3087  
3088  	/* get msix vector count */
3089  	/* NUMA_IO not supported for older controllers */
3090  	if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
3091  	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
3092  	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
3093  	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
3094  	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
3095  	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
3096  	    ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
3097  		ioc->msix_vector_count = 1;
3098  	else {
3099  		pci_read_config_word(ioc->pdev, base + 2, &message_control);
3100  		ioc->msix_vector_count = (message_control & 0x3FF) + 1;
3101  	}
3102  	dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
3103  				  ioc->msix_vector_count));
3104  	return 0;
3105  }
3106  
3107  /**
3108   * mpt3sas_base_free_irq - free irq
3109   * @ioc: per adapter object
3110   *
3111   * Freeing respective reply_queue from the list.
3112   */
3113  void
mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER * ioc)3114  mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
3115  {
3116  	unsigned int irq;
3117  	struct adapter_reply_queue *reply_q, *next;
3118  
3119  	if (list_empty(&ioc->reply_queue_list))
3120  		return;
3121  
3122  	list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
3123  		list_del(&reply_q->list);
3124  		if (reply_q->is_iouring_poll_q) {
3125  			kfree(reply_q);
3126  			continue;
3127  		}
3128  
3129  		if (ioc->smp_affinity_enable) {
3130  			irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
3131  			irq_update_affinity_hint(irq, NULL);
3132  		}
3133  		free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3134  			 reply_q);
3135  		kfree(reply_q);
3136  	}
3137  }
3138  
3139  /**
3140   * _base_request_irq - request irq
3141   * @ioc: per adapter object
3142   * @index: msix index into vector table
3143   *
3144   * Inserting respective reply_queue into the list.
3145   */
3146  static int
_base_request_irq(struct MPT3SAS_ADAPTER * ioc,u8 index)3147  _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3148  {
3149  	struct pci_dev *pdev = ioc->pdev;
3150  	struct adapter_reply_queue *reply_q;
3151  	int r, qid;
3152  
3153  	reply_q =  kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
3154  	if (!reply_q) {
3155  		ioc_err(ioc, "unable to allocate memory %zu!\n",
3156  			sizeof(struct adapter_reply_queue));
3157  		return -ENOMEM;
3158  	}
3159  	reply_q->ioc = ioc;
3160  	reply_q->msix_index = index;
3161  
3162  	atomic_set(&reply_q->busy, 0);
3163  
3164  	if (index >= ioc->iopoll_q_start_index) {
3165  		qid = index - ioc->iopoll_q_start_index;
3166  		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-mq-poll%d",
3167  		    ioc->driver_name, ioc->id, qid);
3168  		reply_q->is_iouring_poll_q = 1;
3169  		ioc->io_uring_poll_queues[qid].reply_q = reply_q;
3170  		goto out;
3171  	}
3172  
3173  
3174  	if (ioc->msix_enable)
3175  		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
3176  		    ioc->driver_name, ioc->id, index);
3177  	else
3178  		snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
3179  		    ioc->driver_name, ioc->id);
3180  	r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
3181  			IRQF_SHARED, reply_q->name, reply_q);
3182  	if (r) {
3183  		pr_err("%s: unable to allocate interrupt %d!\n",
3184  		       reply_q->name, pci_irq_vector(pdev, index));
3185  		kfree(reply_q);
3186  		return -EBUSY;
3187  	}
3188  out:
3189  	INIT_LIST_HEAD(&reply_q->list);
3190  	list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3191  	return 0;
3192  }
3193  
3194  /**
3195   * _base_assign_reply_queues - assigning msix index for each cpu
3196   * @ioc: per adapter object
3197   *
3198   * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
3199   */
3200  static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER * ioc)3201  _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3202  {
3203  	unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
3204  	struct adapter_reply_queue *reply_q;
3205  	int iopoll_q_count = ioc->reply_queue_count -
3206  	    ioc->iopoll_q_start_index;
3207  	const struct cpumask *mask;
3208  
3209  	if (!_base_is_controller_msix_enabled(ioc))
3210  		return;
3211  
3212  	if (ioc->msix_load_balance)
3213  		return;
3214  
3215  	memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3216  
3217  	nr_cpus = num_online_cpus();
3218  	nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3219  					       ioc->facts.MaxMSIxVectors);
3220  	if (!nr_msix)
3221  		return;
3222  
3223  	if (ioc->smp_affinity_enable) {
3224  
3225  		/*
3226  		 * set irq affinity to local numa node for those irqs
3227  		 * corresponding to high iops queues.
3228  		 */
3229  		if (ioc->high_iops_queues) {
3230  			mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
3231  			for (index = 0; index < ioc->high_iops_queues;
3232  			    index++) {
3233  				irq = pci_irq_vector(ioc->pdev, index);
3234  				irq_set_affinity_and_hint(irq, mask);
3235  			}
3236  		}
3237  
3238  		list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3239  			const cpumask_t *mask;
3240  
3241  			if (reply_q->msix_index < ioc->high_iops_queues ||
3242  			    reply_q->msix_index >= ioc->iopoll_q_start_index)
3243  				continue;
3244  
3245  			mask = pci_irq_get_affinity(ioc->pdev,
3246  			    reply_q->msix_index);
3247  			if (!mask) {
3248  				ioc_warn(ioc, "no affinity for msi %x\n",
3249  					 reply_q->msix_index);
3250  				goto fall_back;
3251  			}
3252  
3253  			for_each_cpu_and(cpu, mask, cpu_online_mask) {
3254  				if (cpu >= ioc->cpu_msix_table_sz)
3255  					break;
3256  				ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3257  			}
3258  		}
3259  		return;
3260  	}
3261  
3262  fall_back:
3263  	cpu = cpumask_first(cpu_online_mask);
3264  	nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
3265  	index = 0;
3266  
3267  	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3268  		unsigned int i, group = nr_cpus / nr_msix;
3269  
3270  		if (reply_q->msix_index < ioc->high_iops_queues ||
3271  		    reply_q->msix_index >= ioc->iopoll_q_start_index)
3272  			continue;
3273  
3274  		if (cpu >= nr_cpus)
3275  			break;
3276  
3277  		if (index < nr_cpus % nr_msix)
3278  			group++;
3279  
3280  		for (i = 0 ; i < group ; i++) {
3281  			ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3282  			cpu = cpumask_next(cpu, cpu_online_mask);
3283  		}
3284  		index++;
3285  	}
3286  }
3287  
3288  /**
3289   * _base_check_and_enable_high_iops_queues - enable high iops mode
3290   * @ioc: per adapter object
3291   * @hba_msix_vector_count: msix vectors supported by HBA
3292   *
3293   * Enable high iops queues only if
3294   *  - HBA is a SEA/AERO controller and
3295   *  - MSI-Xs vector supported by the HBA is 128 and
3296   *  - total CPU count in the system >=16 and
3297   *  - loaded driver with default max_msix_vectors module parameter and
3298   *  - system booted in non kdump mode
3299   *
3300   * Return: nothing.
3301   */
3302  static void
_base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER * ioc,int hba_msix_vector_count)3303  _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3304  		int hba_msix_vector_count)
3305  {
3306  	u16 lnksta, speed;
3307  
3308  	/*
3309  	 * Disable high iops queues if io uring poll queues are enabled.
3310  	 */
3311  	if (perf_mode == MPT_PERF_MODE_IOPS ||
3312  	    perf_mode == MPT_PERF_MODE_LATENCY ||
3313  	    ioc->io_uring_poll_queues) {
3314  		ioc->high_iops_queues = 0;
3315  		return;
3316  	}
3317  
3318  	if (perf_mode == MPT_PERF_MODE_DEFAULT) {
3319  
3320  		pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3321  		speed = lnksta & PCI_EXP_LNKSTA_CLS;
3322  
3323  		if (speed < 0x4) {
3324  			ioc->high_iops_queues = 0;
3325  			return;
3326  		}
3327  	}
3328  
3329  	if (!reset_devices && ioc->is_aero_ioc &&
3330  	    hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3331  	    num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3332  	    max_msix_vectors == -1)
3333  		ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3334  	else
3335  		ioc->high_iops_queues = 0;
3336  }
3337  
3338  /**
3339   * mpt3sas_base_disable_msix - disables msix
3340   * @ioc: per adapter object
3341   *
3342   */
3343  void
mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER * ioc)3344  mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3345  {
3346  	if (!ioc->msix_enable)
3347  		return;
3348  	pci_free_irq_vectors(ioc->pdev);
3349  	ioc->msix_enable = 0;
3350  	kfree(ioc->io_uring_poll_queues);
3351  }
3352  
3353  /**
3354   * _base_alloc_irq_vectors - allocate msix vectors
3355   * @ioc: per adapter object
3356   *
3357   */
3358  static int
_base_alloc_irq_vectors(struct MPT3SAS_ADAPTER * ioc)3359  _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3360  {
3361  	int i, irq_flags = PCI_IRQ_MSIX;
3362  	struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3363  	struct irq_affinity *descp = &desc;
3364  	/*
3365  	 * Don't allocate msix vectors for poll_queues.
3366  	 * msix_vectors is always within a range of FW supported reply queue.
3367  	 */
3368  	int nr_msix_vectors = ioc->iopoll_q_start_index;
3369  
3370  
3371  	if (ioc->smp_affinity_enable)
3372  		irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
3373  	else
3374  		descp = NULL;
3375  
3376  	ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
3377  	    ioc->reply_queue_count, nr_msix_vectors);
3378  
3379  	i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3380  	    ioc->high_iops_queues,
3381  	    nr_msix_vectors, irq_flags, descp);
3382  
3383  	return i;
3384  }
3385  
3386  /**
3387   * _base_enable_msix - enables msix, failback to io_apic
3388   * @ioc: per adapter object
3389   *
3390   */
3391  static int
_base_enable_msix(struct MPT3SAS_ADAPTER * ioc)3392  _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3393  {
3394  	int r;
3395  	int i, local_max_msix_vectors;
3396  	u8 try_msix = 0;
3397  	int iopoll_q_count = 0;
3398  
3399  	ioc->msix_load_balance = false;
3400  
3401  	if (msix_disable == -1 || msix_disable == 0)
3402  		try_msix = 1;
3403  
3404  	if (!try_msix)
3405  		goto try_ioapic;
3406  
3407  	if (_base_check_enable_msix(ioc) != 0)
3408  		goto try_ioapic;
3409  
3410  	ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3411  	pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3412  		ioc->cpu_count, max_msix_vectors);
3413  
3414  	ioc->reply_queue_count =
3415  		min_t(int, ioc->cpu_count, ioc->msix_vector_count);
3416  
3417  	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3418  		local_max_msix_vectors = (reset_devices) ? 1 : 8;
3419  	else
3420  		local_max_msix_vectors = max_msix_vectors;
3421  
3422  	if (local_max_msix_vectors == 0)
3423  		goto try_ioapic;
3424  
3425  	/*
3426  	 * Enable msix_load_balance only if combined reply queue mode is
3427  	 * disabled on SAS3 & above generation HBA devices.
3428  	 */
3429  	if (!ioc->combined_reply_queue &&
3430  	    ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3431  		ioc_info(ioc,
3432  		    "combined ReplyQueue is off, Enabling msix load balance\n");
3433  		ioc->msix_load_balance = true;
3434  	}
3435  
3436  	/*
3437  	 * smp affinity setting is not need when msix load balance
3438  	 * is enabled.
3439  	 */
3440  	if (ioc->msix_load_balance)
3441  		ioc->smp_affinity_enable = 0;
3442  
3443  	if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
3444  		ioc->shost->host_tagset = 0;
3445  
3446  	/*
3447  	 * Enable io uring poll queues only if host_tagset is enabled.
3448  	 */
3449  	if (ioc->shost->host_tagset)
3450  		iopoll_q_count = poll_queues;
3451  
3452  	if (iopoll_q_count) {
3453  		ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
3454  		    sizeof(struct io_uring_poll_queue), GFP_KERNEL);
3455  		if (!ioc->io_uring_poll_queues)
3456  			iopoll_q_count = 0;
3457  	}
3458  
3459  	if (ioc->is_aero_ioc)
3460  		_base_check_and_enable_high_iops_queues(ioc,
3461  		    ioc->msix_vector_count);
3462  
3463  	/*
3464  	 * Add high iops queues count to reply queue count if high iops queues
3465  	 * are enabled.
3466  	 */
3467  	ioc->reply_queue_count = min_t(int,
3468  	    ioc->reply_queue_count + ioc->high_iops_queues,
3469  	    ioc->msix_vector_count);
3470  
3471  	/*
3472  	 * Adjust the reply queue count incase reply queue count
3473  	 * exceeds the user provided MSIx vectors count.
3474  	 */
3475  	if (local_max_msix_vectors > 0)
3476  		ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3477  		    ioc->reply_queue_count);
3478  	/*
3479  	 * Add io uring poll queues count to reply queues count
3480  	 * if io uring is enabled in driver.
3481  	 */
3482  	if (iopoll_q_count) {
3483  		if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
3484  			iopoll_q_count = 0;
3485  		ioc->reply_queue_count = min_t(int,
3486  		    ioc->reply_queue_count + iopoll_q_count,
3487  		    ioc->msix_vector_count);
3488  	}
3489  
3490  	/*
3491  	 * Starting index of io uring poll queues in reply queue list.
3492  	 */
3493  	ioc->iopoll_q_start_index =
3494  	    ioc->reply_queue_count - iopoll_q_count;
3495  
3496  	r = _base_alloc_irq_vectors(ioc);
3497  	if (r < 0) {
3498  		ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3499  		goto try_ioapic;
3500  	}
3501  
3502  	/*
3503  	 * Adjust the reply queue count if the allocated
3504  	 * MSIx vectors is less then the requested number
3505  	 * of MSIx vectors.
3506  	 */
3507  	if (r < ioc->iopoll_q_start_index) {
3508  		ioc->reply_queue_count = r + iopoll_q_count;
3509  		ioc->iopoll_q_start_index =
3510  		    ioc->reply_queue_count - iopoll_q_count;
3511  	}
3512  
3513  	ioc->msix_enable = 1;
3514  	for (i = 0; i < ioc->reply_queue_count; i++) {
3515  		r = _base_request_irq(ioc, i);
3516  		if (r) {
3517  			mpt3sas_base_free_irq(ioc);
3518  			mpt3sas_base_disable_msix(ioc);
3519  			goto try_ioapic;
3520  		}
3521  	}
3522  
3523  	ioc_info(ioc, "High IOPs queues : %s\n",
3524  			ioc->high_iops_queues ? "enabled" : "disabled");
3525  
3526  	return 0;
3527  
3528  /* failback to io_apic interrupt routing */
3529   try_ioapic:
3530  	ioc->high_iops_queues = 0;
3531  	ioc_info(ioc, "High IOPs queues : disabled\n");
3532  	ioc->reply_queue_count = 1;
3533  	ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
3534  	r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_INTX);
3535  	if (r < 0) {
3536  		dfailprintk(ioc,
3537  			    ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3538  				     r));
3539  	} else
3540  		r = _base_request_irq(ioc, 0);
3541  
3542  	return r;
3543  }
3544  
3545  /**
3546   * mpt3sas_base_unmap_resources - free controller resources
3547   * @ioc: per adapter object
3548   */
3549  static void
mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER * ioc)3550  mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3551  {
3552  	struct pci_dev *pdev = ioc->pdev;
3553  
3554  	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3555  
3556  	mpt3sas_base_free_irq(ioc);
3557  	mpt3sas_base_disable_msix(ioc);
3558  
3559  	kfree(ioc->replyPostRegisterIndex);
3560  	ioc->replyPostRegisterIndex = NULL;
3561  
3562  
3563  	if (ioc->chip_phys) {
3564  		iounmap(ioc->chip);
3565  		ioc->chip_phys = 0;
3566  	}
3567  
3568  	if (pci_is_enabled(pdev)) {
3569  		pci_release_selected_regions(ioc->pdev, ioc->bars);
3570  		pci_disable_device(pdev);
3571  	}
3572  }
3573  
3574  static int
3575  _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3576  
3577  /**
3578   * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
3579   *     and if it is in fault state then issue diag reset.
3580   * @ioc: per adapter object
3581   *
3582   * Return: 0 for success, non-zero for failure.
3583   */
3584  int
mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER * ioc)3585  mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3586  {
3587  	u32 ioc_state;
3588  	int rc = -EFAULT;
3589  
3590  	dinitprintk(ioc, pr_info("%s\n", __func__));
3591  	if (ioc->pci_error_recovery)
3592  		return 0;
3593  	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3594  	dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3595  
3596  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3597  		mpt3sas_print_fault_code(ioc, ioc_state &
3598  		    MPI2_DOORBELL_DATA_MASK);
3599  		mpt3sas_base_mask_interrupts(ioc);
3600  		rc = _base_diag_reset(ioc);
3601  	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
3602  	    MPI2_IOC_STATE_COREDUMP) {
3603  		mpt3sas_print_coredump_info(ioc, ioc_state &
3604  		     MPI2_DOORBELL_DATA_MASK);
3605  		mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3606  		mpt3sas_base_mask_interrupts(ioc);
3607  		rc = _base_diag_reset(ioc);
3608  	}
3609  
3610  	return rc;
3611  }
3612  
3613  /**
3614   * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3615   * @ioc: per adapter object
3616   *
3617   * Return: 0 for success, non-zero for failure.
3618   */
3619  int
mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER * ioc)3620  mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3621  {
3622  	struct pci_dev *pdev = ioc->pdev;
3623  	u32 memap_sz;
3624  	u32 pio_sz;
3625  	int i, r = 0, rc;
3626  	u64 pio_chip = 0;
3627  	phys_addr_t chip_phys = 0;
3628  	struct adapter_reply_queue *reply_q;
3629  	int iopoll_q_count = 0;
3630  
3631  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3632  
3633  	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3634  	if (pci_enable_device_mem(pdev)) {
3635  		ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3636  		ioc->bars = 0;
3637  		return -ENODEV;
3638  	}
3639  
3640  
3641  	if (pci_request_selected_regions(pdev, ioc->bars,
3642  	    ioc->driver_name)) {
3643  		ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3644  		ioc->bars = 0;
3645  		r = -ENODEV;
3646  		goto out_fail;
3647  	}
3648  
3649  	pci_set_master(pdev);
3650  
3651  
3652  	if (_base_config_dma_addressing(ioc, pdev) != 0) {
3653  		ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3654  		r = -ENODEV;
3655  		goto out_fail;
3656  	}
3657  
3658  	for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3659  	     (!memap_sz || !pio_sz); i++) {
3660  		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3661  			if (pio_sz)
3662  				continue;
3663  			pio_chip = (u64)pci_resource_start(pdev, i);
3664  			pio_sz = pci_resource_len(pdev, i);
3665  		} else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3666  			if (memap_sz)
3667  				continue;
3668  			ioc->chip_phys = pci_resource_start(pdev, i);
3669  			chip_phys = ioc->chip_phys;
3670  			memap_sz = pci_resource_len(pdev, i);
3671  			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3672  		}
3673  	}
3674  
3675  	if (ioc->chip == NULL) {
3676  		ioc_err(ioc,
3677  		    "unable to map adapter memory! or resource not found\n");
3678  		r = -EINVAL;
3679  		goto out_fail;
3680  	}
3681  
3682  	mpt3sas_base_mask_interrupts(ioc);
3683  
3684  	r = _base_get_ioc_facts(ioc);
3685  	if (r) {
3686  		rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
3687  		if (rc || (_base_get_ioc_facts(ioc)))
3688  			goto out_fail;
3689  	}
3690  
3691  	if (!ioc->rdpq_array_enable_assigned) {
3692  		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3693  		ioc->rdpq_array_enable_assigned = 1;
3694  	}
3695  
3696  	r = _base_enable_msix(ioc);
3697  	if (r)
3698  		goto out_fail;
3699  
3700  	iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
3701  	for (i = 0; i < iopoll_q_count; i++) {
3702  		atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
3703  		atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
3704  	}
3705  
3706  	if (!ioc->is_driver_loading)
3707  		_base_init_irqpolls(ioc);
3708  	/* Use the Combined reply queue feature only for SAS3 C0 & higher
3709  	 * revision HBAs and also only when reply queue count is greater than 8
3710  	 */
3711  	if (ioc->combined_reply_queue) {
3712  		/* Determine the Supplemental Reply Post Host Index Registers
3713  		 * Addresse. Supplemental Reply Post Host Index Registers
3714  		 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3715  		 * each register is at offset bytes of
3716  		 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3717  		 */
3718  		ioc->replyPostRegisterIndex = kcalloc(
3719  		     ioc->combined_reply_index_count,
3720  		     sizeof(resource_size_t *), GFP_KERNEL);
3721  		if (!ioc->replyPostRegisterIndex) {
3722  			ioc_err(ioc,
3723  			    "allocation for replyPostRegisterIndex failed!\n");
3724  			r = -ENOMEM;
3725  			goto out_fail;
3726  		}
3727  
3728  		for (i = 0; i < ioc->combined_reply_index_count; i++) {
3729  			ioc->replyPostRegisterIndex[i] =
3730  				(resource_size_t __iomem *)
3731  				((u8 __force *)&ioc->chip->Doorbell +
3732  				 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3733  				 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3734  		}
3735  	}
3736  
3737  	if (ioc->is_warpdrive) {
3738  		ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3739  		    &ioc->chip->ReplyPostHostIndex;
3740  
3741  		for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3742  			ioc->reply_post_host_index[i] =
3743  			(resource_size_t __iomem *)
3744  			((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3745  			* 4)));
3746  	}
3747  
3748  	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3749  		if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
3750  			pr_info("%s: enabled: index: %d\n",
3751  			    reply_q->name, reply_q->msix_index);
3752  			continue;
3753  		}
3754  
3755  		pr_info("%s: %s enabled: IRQ %d\n",
3756  			reply_q->name,
3757  			ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3758  			pci_irq_vector(ioc->pdev, reply_q->msix_index));
3759  	}
3760  
3761  	ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3762  		 &chip_phys, ioc->chip, memap_sz);
3763  	ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3764  		 (unsigned long long)pio_chip, pio_sz);
3765  
3766  	/* Save PCI configuration state for recovery from PCI AER/EEH errors */
3767  	pci_save_state(pdev);
3768  	return 0;
3769  
3770   out_fail:
3771  	mpt3sas_base_unmap_resources(ioc);
3772  	return r;
3773  }
3774  
3775  /**
3776   * mpt3sas_base_get_msg_frame - obtain request mf pointer
3777   * @ioc: per adapter object
3778   * @smid: system request message index(smid zero is invalid)
3779   *
3780   * Return: virt pointer to message frame.
3781   */
3782  void *
mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER * ioc,u16 smid)3783  mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3784  {
3785  	return (void *)(ioc->request + (smid * ioc->request_sz));
3786  }
3787  
3788  /**
3789   * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3790   * @ioc: per adapter object
3791   * @smid: system request message index
3792   *
3793   * Return: virt pointer to sense buffer.
3794   */
3795  void *
mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER * ioc,u16 smid)3796  mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3797  {
3798  	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3799  }
3800  
3801  /**
3802   * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3803   * @ioc: per adapter object
3804   * @smid: system request message index
3805   *
3806   * Return: phys pointer to the low 32bit address of the sense buffer.
3807   */
3808  __le32
mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER * ioc,u16 smid)3809  mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3810  {
3811  	return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3812  	    SCSI_SENSE_BUFFERSIZE));
3813  }
3814  
3815  /**
3816   * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3817   * @ioc: per adapter object
3818   * @smid: system request message index
3819   *
3820   * Return: virt pointer to a PCIe SGL.
3821   */
3822  void *
mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER * ioc,u16 smid)3823  mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3824  {
3825  	return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3826  }
3827  
3828  /**
3829   * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3830   * @ioc: per adapter object
3831   * @smid: system request message index
3832   *
3833   * Return: phys pointer to the address of the PCIe buffer.
3834   */
3835  dma_addr_t
mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER * ioc,u16 smid)3836  mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3837  {
3838  	return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3839  }
3840  
3841  /**
3842   * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3843   * @ioc: per adapter object
3844   * @phys_addr: lower 32 physical addr of the reply
3845   *
3846   * Converts 32bit lower physical addr into a virt address.
3847   */
3848  void *
mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER * ioc,u32 phys_addr)3849  mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3850  {
3851  	if (!phys_addr)
3852  		return NULL;
3853  	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3854  }
3855  
3856  /**
3857   * _base_get_msix_index - get the msix index
3858   * @ioc: per adapter object
3859   * @scmd: scsi_cmnd object
3860   *
3861   * Return: msix index of general reply queues,
3862   * i.e. reply queue on which IO request's reply
3863   * should be posted by the HBA firmware.
3864   */
3865  static inline u8
_base_get_msix_index(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3866  _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3867  	struct scsi_cmnd *scmd)
3868  {
3869  	/* Enables reply_queue load balancing */
3870  	if (ioc->msix_load_balance)
3871  		return ioc->reply_queue_count ?
3872  		    base_mod64(atomic64_add_return(1,
3873  		    &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3874  
3875  	if (scmd && ioc->shost->nr_hw_queues > 1) {
3876  		u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
3877  
3878  		return blk_mq_unique_tag_to_hwq(tag) +
3879  			ioc->high_iops_queues;
3880  	}
3881  
3882  	return ioc->cpu_msix_table[raw_smp_processor_id()];
3883  }
3884  
3885  /**
3886   * _base_get_high_iops_msix_index - get the msix index of
3887   *				high iops queues
3888   * @ioc: per adapter object
3889   * @scmd: scsi_cmnd object
3890   *
3891   * Return: msix index of high iops reply queues.
3892   * i.e. high iops reply queue on which IO request's
3893   * reply should be posted by the HBA firmware.
3894   */
3895  static inline u8
_base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER * ioc,struct scsi_cmnd * scmd)3896  _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3897  	struct scsi_cmnd *scmd)
3898  {
3899  	/**
3900  	 * Round robin the IO interrupts among the high iops
3901  	 * reply queues in terms of batch count 16 when outstanding
3902  	 * IOs on the target device is >=8.
3903  	 */
3904  
3905  	if (scsi_device_busy(scmd->device) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3906  		return base_mod64((
3907  		    atomic64_add_return(1, &ioc->high_iops_outstanding) /
3908  		    MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3909  		    MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3910  
3911  	return _base_get_msix_index(ioc, scmd);
3912  }
3913  
3914  /**
3915   * mpt3sas_base_get_smid - obtain a free smid from internal queue
3916   * @ioc: per adapter object
3917   * @cb_idx: callback index
3918   *
3919   * Return: smid (zero is invalid)
3920   */
3921  u16
mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER * ioc,u8 cb_idx)3922  mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3923  {
3924  	unsigned long flags;
3925  	struct request_tracker *request;
3926  	u16 smid;
3927  
3928  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3929  	if (list_empty(&ioc->internal_free_list)) {
3930  		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3931  		ioc_err(ioc, "%s: smid not available\n", __func__);
3932  		return 0;
3933  	}
3934  
3935  	request = list_entry(ioc->internal_free_list.next,
3936  	    struct request_tracker, tracker_list);
3937  	request->cb_idx = cb_idx;
3938  	smid = request->smid;
3939  	list_del(&request->tracker_list);
3940  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3941  	return smid;
3942  }
3943  
3944  /**
3945   * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3946   * @ioc: per adapter object
3947   * @cb_idx: callback index
3948   * @scmd: pointer to scsi command object
3949   *
3950   * Return: smid (zero is invalid)
3951   */
3952  u16
mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER * ioc,u8 cb_idx,struct scsi_cmnd * scmd)3953  mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3954  	struct scsi_cmnd *scmd)
3955  {
3956  	struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3957  	u16 smid;
3958  	u32 tag, unique_tag;
3959  
3960  	unique_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
3961  	tag = blk_mq_unique_tag_to_tag(unique_tag);
3962  
3963  	/*
3964  	 * Store hw queue number corresponding to the tag.
3965  	 * This hw queue number is used later to determine
3966  	 * the unique_tag using the logic below. This unique_tag
3967  	 * is used to retrieve the scmd pointer corresponding
3968  	 * to tag using scsi_host_find_tag() API.
3969  	 *
3970  	 * tag = smid - 1;
3971  	 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
3972  	 */
3973  	ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3974  
3975  	smid = tag + 1;
3976  	request->cb_idx = cb_idx;
3977  	request->smid = smid;
3978  	request->scmd = scmd;
3979  	INIT_LIST_HEAD(&request->chain_list);
3980  	return smid;
3981  }
3982  
3983  /**
3984   * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3985   * @ioc: per adapter object
3986   * @cb_idx: callback index
3987   *
3988   * Return: smid (zero is invalid)
3989   */
3990  u16
mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER * ioc,u8 cb_idx)3991  mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3992  {
3993  	unsigned long flags;
3994  	struct request_tracker *request;
3995  	u16 smid;
3996  
3997  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3998  	if (list_empty(&ioc->hpr_free_list)) {
3999  		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4000  		return 0;
4001  	}
4002  
4003  	request = list_entry(ioc->hpr_free_list.next,
4004  	    struct request_tracker, tracker_list);
4005  	request->cb_idx = cb_idx;
4006  	smid = request->smid;
4007  	list_del(&request->tracker_list);
4008  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4009  	return smid;
4010  }
4011  
4012  static void
_base_recovery_check(struct MPT3SAS_ADAPTER * ioc)4013  _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
4014  {
4015  	/*
4016  	 * See _wait_for_commands_to_complete() call with regards to this code.
4017  	 */
4018  	if (ioc->shost_recovery && ioc->pending_io_count) {
4019  		ioc->pending_io_count = scsi_host_busy(ioc->shost);
4020  		if (ioc->pending_io_count == 0)
4021  			wake_up(&ioc->reset_wq);
4022  	}
4023  }
4024  
mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER * ioc,struct scsiio_tracker * st)4025  void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
4026  			   struct scsiio_tracker *st)
4027  {
4028  	if (WARN_ON(st->smid == 0))
4029  		return;
4030  	st->cb_idx = 0xFF;
4031  	st->direct_io = 0;
4032  	st->scmd = NULL;
4033  	atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
4034  	st->smid = 0;
4035  }
4036  
4037  /**
4038   * mpt3sas_base_free_smid - put smid back on free_list
4039   * @ioc: per adapter object
4040   * @smid: system request message index
4041   */
4042  void
mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER * ioc,u16 smid)4043  mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4044  {
4045  	unsigned long flags;
4046  	int i;
4047  
4048  	if (smid < ioc->hi_priority_smid) {
4049  		struct scsiio_tracker *st;
4050  		void *request;
4051  
4052  		st = _get_st_from_smid(ioc, smid);
4053  		if (!st) {
4054  			_base_recovery_check(ioc);
4055  			return;
4056  		}
4057  
4058  		/* Clear MPI request frame */
4059  		request = mpt3sas_base_get_msg_frame(ioc, smid);
4060  		memset(request, 0, ioc->request_sz);
4061  
4062  		mpt3sas_base_clear_st(ioc, st);
4063  		_base_recovery_check(ioc);
4064  		ioc->io_queue_num[smid - 1] = 0;
4065  		return;
4066  	}
4067  
4068  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4069  	if (smid < ioc->internal_smid) {
4070  		/* hi-priority */
4071  		i = smid - ioc->hi_priority_smid;
4072  		ioc->hpr_lookup[i].cb_idx = 0xFF;
4073  		list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
4074  	} else if (smid <= ioc->hba_queue_depth) {
4075  		/* internal queue */
4076  		i = smid - ioc->internal_smid;
4077  		ioc->internal_lookup[i].cb_idx = 0xFF;
4078  		list_add(&ioc->internal_lookup[i].tracker_list,
4079  		    &ioc->internal_free_list);
4080  	}
4081  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4082  }
4083  
4084  /**
4085   * _base_mpi_ep_writeq - 32 bit write to MMIO
4086   * @b: data payload
4087   * @addr: address in MMIO space
4088   * @writeq_lock: spin lock
4089   *
4090   * This special handling for MPI EP to take care of 32 bit
4091   * environment where its not quarenteed to send the entire word
4092   * in one transfer.
4093   */
4094  static inline void
_base_mpi_ep_writeq(__u64 b,volatile void __iomem * addr,spinlock_t * writeq_lock)4095  _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
4096  					spinlock_t *writeq_lock)
4097  {
4098  	unsigned long flags;
4099  
4100  	spin_lock_irqsave(writeq_lock, flags);
4101  	__raw_writel((u32)(b), addr);
4102  	__raw_writel((u32)(b >> 32), (addr + 4));
4103  	spin_unlock_irqrestore(writeq_lock, flags);
4104  }
4105  
4106  /**
4107   * _base_writeq - 64 bit write to MMIO
4108   * @b: data payload
4109   * @addr: address in MMIO space
4110   * @writeq_lock: spin lock
4111   *
4112   * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
4113   * care of 32 bit environment where its not quarenteed to send the entire word
4114   * in one transfer.
4115   */
4116  #if defined(writeq) && defined(CONFIG_64BIT)
4117  static inline void
_base_writeq(__u64 b,volatile void __iomem * addr,spinlock_t * writeq_lock)4118  _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
4119  {
4120  	wmb();
4121  	__raw_writeq(b, addr);
4122  	barrier();
4123  }
4124  #else
4125  static inline void
_base_writeq(__u64 b,volatile void __iomem * addr,spinlock_t * writeq_lock)4126  _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
4127  {
4128  	_base_mpi_ep_writeq(b, addr, writeq_lock);
4129  }
4130  #endif
4131  
4132  /**
4133   * _base_set_and_get_msix_index - get the msix index and assign to msix_io
4134   *                                variable of scsi tracker
4135   * @ioc: per adapter object
4136   * @smid: system request message index
4137   *
4138   * Return: msix index.
4139   */
4140  static u8
_base_set_and_get_msix_index(struct MPT3SAS_ADAPTER * ioc,u16 smid)4141  _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4142  {
4143  	struct scsiio_tracker *st = NULL;
4144  
4145  	if (smid < ioc->hi_priority_smid)
4146  		st = _get_st_from_smid(ioc, smid);
4147  
4148  	if (st == NULL)
4149  		return  _base_get_msix_index(ioc, NULL);
4150  
4151  	st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
4152  	return st->msix_io;
4153  }
4154  
4155  /**
4156   * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
4157   * @ioc: per adapter object
4158   * @smid: system request message index
4159   * @handle: device handle
4160   */
4161  static void
_base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4162  _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
4163  	u16 smid, u16 handle)
4164  {
4165  	Mpi2RequestDescriptorUnion_t descriptor;
4166  	u64 *request = (u64 *)&descriptor;
4167  	void *mpi_req_iomem;
4168  	__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4169  
4170  	_clone_sg_entries(ioc, (void *) mfp, smid);
4171  	mpi_req_iomem = (void __force *)ioc->chip +
4172  			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4173  	_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4174  					ioc->request_sz);
4175  	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4176  	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4177  	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4178  	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4179  	descriptor.SCSIIO.LMID = 0;
4180  	_base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4181  	    &ioc->scsi_lookup_lock);
4182  }
4183  
4184  /**
4185   * _base_put_smid_scsi_io - send SCSI_IO request to firmware
4186   * @ioc: per adapter object
4187   * @smid: system request message index
4188   * @handle: device handle
4189   */
4190  static void
_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4191  _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
4192  {
4193  	Mpi2RequestDescriptorUnion_t descriptor;
4194  	u64 *request = (u64 *)&descriptor;
4195  
4196  
4197  	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4198  	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4199  	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4200  	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4201  	descriptor.SCSIIO.LMID = 0;
4202  	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4203  	    &ioc->scsi_lookup_lock);
4204  }
4205  
4206  /**
4207   * _base_put_smid_fast_path - send fast path request to firmware
4208   * @ioc: per adapter object
4209   * @smid: system request message index
4210   * @handle: device handle
4211   */
4212  static void
_base_put_smid_fast_path(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4213  _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4214  	u16 handle)
4215  {
4216  	Mpi2RequestDescriptorUnion_t descriptor;
4217  	u64 *request = (u64 *)&descriptor;
4218  
4219  	descriptor.SCSIIO.RequestFlags =
4220  	    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4221  	descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4222  	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
4223  	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
4224  	descriptor.SCSIIO.LMID = 0;
4225  	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4226  	    &ioc->scsi_lookup_lock);
4227  }
4228  
4229  /**
4230   * _base_put_smid_hi_priority - send Task Management request to firmware
4231   * @ioc: per adapter object
4232   * @smid: system request message index
4233   * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4234   */
4235  static void
_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 msix_task)4236  _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4237  	u16 msix_task)
4238  {
4239  	Mpi2RequestDescriptorUnion_t descriptor;
4240  	void *mpi_req_iomem;
4241  	u64 *request;
4242  
4243  	if (ioc->is_mcpu_endpoint) {
4244  		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4245  
4246  		/* TBD 256 is offset within sys register. */
4247  		mpi_req_iomem = (void __force *)ioc->chip
4248  					+ MPI_FRAME_START_OFFSET
4249  					+ (smid * ioc->request_sz);
4250  		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4251  							ioc->request_sz);
4252  	}
4253  
4254  	request = (u64 *)&descriptor;
4255  
4256  	descriptor.HighPriority.RequestFlags =
4257  	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4258  	descriptor.HighPriority.MSIxIndex =  msix_task;
4259  	descriptor.HighPriority.SMID = cpu_to_le16(smid);
4260  	descriptor.HighPriority.LMID = 0;
4261  	descriptor.HighPriority.Reserved1 = 0;
4262  	if (ioc->is_mcpu_endpoint)
4263  		_base_mpi_ep_writeq(*request,
4264  				&ioc->chip->RequestDescriptorPostLow,
4265  				&ioc->scsi_lookup_lock);
4266  	else
4267  		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4268  		    &ioc->scsi_lookup_lock);
4269  }
4270  
4271  /**
4272   * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
4273   *  firmware
4274   * @ioc: per adapter object
4275   * @smid: system request message index
4276   */
4277  void
mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER * ioc,u16 smid)4278  mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4279  {
4280  	Mpi2RequestDescriptorUnion_t descriptor;
4281  	u64 *request = (u64 *)&descriptor;
4282  
4283  	descriptor.Default.RequestFlags =
4284  		MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
4285  	descriptor.Default.MSIxIndex =  _base_set_and_get_msix_index(ioc, smid);
4286  	descriptor.Default.SMID = cpu_to_le16(smid);
4287  	descriptor.Default.LMID = 0;
4288  	descriptor.Default.DescriptorTypeDependent = 0;
4289  	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4290  	    &ioc->scsi_lookup_lock);
4291  }
4292  
4293  /**
4294   * _base_put_smid_default - Default, primarily used for config pages
4295   * @ioc: per adapter object
4296   * @smid: system request message index
4297   */
4298  static void
_base_put_smid_default(struct MPT3SAS_ADAPTER * ioc,u16 smid)4299  _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4300  {
4301  	Mpi2RequestDescriptorUnion_t descriptor;
4302  	void *mpi_req_iomem;
4303  	u64 *request;
4304  
4305  	if (ioc->is_mcpu_endpoint) {
4306  		__le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4307  
4308  		_clone_sg_entries(ioc, (void *) mfp, smid);
4309  		/* TBD 256 is offset within sys register */
4310  		mpi_req_iomem = (void __force *)ioc->chip +
4311  			MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4312  		_base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
4313  							ioc->request_sz);
4314  	}
4315  	request = (u64 *)&descriptor;
4316  	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4317  	descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4318  	descriptor.Default.SMID = cpu_to_le16(smid);
4319  	descriptor.Default.LMID = 0;
4320  	descriptor.Default.DescriptorTypeDependent = 0;
4321  	if (ioc->is_mcpu_endpoint)
4322  		_base_mpi_ep_writeq(*request,
4323  				&ioc->chip->RequestDescriptorPostLow,
4324  				&ioc->scsi_lookup_lock);
4325  	else
4326  		_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4327  				&ioc->scsi_lookup_lock);
4328  }
4329  
4330  /**
4331   * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4332   *   Atomic Request Descriptor
4333   * @ioc: per adapter object
4334   * @smid: system request message index
4335   * @handle: device handle, unused in this function, for function type match
4336   *
4337   * Return: nothing.
4338   */
4339  static void
_base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4340  _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4341  	u16 handle)
4342  {
4343  	Mpi26AtomicRequestDescriptor_t descriptor;
4344  	u32 *request = (u32 *)&descriptor;
4345  
4346  	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
4347  	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4348  	descriptor.SMID = cpu_to_le16(smid);
4349  
4350  	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4351  }
4352  
4353  /**
4354   * _base_put_smid_fast_path_atomic - send fast path request to firmware
4355   * using Atomic Request Descriptor
4356   * @ioc: per adapter object
4357   * @smid: system request message index
4358   * @handle: device handle, unused in this function, for function type match
4359   * Return: nothing
4360   */
4361  static void
_base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 handle)4362  _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4363  	u16 handle)
4364  {
4365  	Mpi26AtomicRequestDescriptor_t descriptor;
4366  	u32 *request = (u32 *)&descriptor;
4367  
4368  	descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
4369  	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4370  	descriptor.SMID = cpu_to_le16(smid);
4371  
4372  	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4373  }
4374  
4375  /**
4376   * _base_put_smid_hi_priority_atomic - send Task Management request to
4377   * firmware using Atomic Request Descriptor
4378   * @ioc: per adapter object
4379   * @smid: system request message index
4380   * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4381   *
4382   * Return: nothing.
4383   */
4384  static void
_base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER * ioc,u16 smid,u16 msix_task)4385  _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4386  	u16 msix_task)
4387  {
4388  	Mpi26AtomicRequestDescriptor_t descriptor;
4389  	u32 *request = (u32 *)&descriptor;
4390  
4391  	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
4392  	descriptor.MSIxIndex = msix_task;
4393  	descriptor.SMID = cpu_to_le16(smid);
4394  
4395  	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4396  }
4397  
4398  /**
4399   * _base_put_smid_default_atomic - Default, primarily used for config pages
4400   * use Atomic Request Descriptor
4401   * @ioc: per adapter object
4402   * @smid: system request message index
4403   *
4404   * Return: nothing.
4405   */
4406  static void
_base_put_smid_default_atomic(struct MPT3SAS_ADAPTER * ioc,u16 smid)4407  _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4408  {
4409  	Mpi26AtomicRequestDescriptor_t descriptor;
4410  	u32 *request = (u32 *)&descriptor;
4411  
4412  	descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
4413  	descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4414  	descriptor.SMID = cpu_to_le16(smid);
4415  
4416  	writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4417  }
4418  
4419  /**
4420   * _base_display_OEMs_branding - Display branding string
4421   * @ioc: per adapter object
4422   */
4423  static void
_base_display_OEMs_branding(struct MPT3SAS_ADAPTER * ioc)4424  _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4425  {
4426  	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4427  		return;
4428  
4429  	switch (ioc->pdev->subsystem_vendor) {
4430  	case PCI_VENDOR_ID_INTEL:
4431  		switch (ioc->pdev->device) {
4432  		case MPI2_MFGPAGE_DEVID_SAS2008:
4433  			switch (ioc->pdev->subsystem_device) {
4434  			case MPT2SAS_INTEL_RMS2LL080_SSDID:
4435  				ioc_info(ioc, "%s\n",
4436  					 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4437  				break;
4438  			case MPT2SAS_INTEL_RMS2LL040_SSDID:
4439  				ioc_info(ioc, "%s\n",
4440  					 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4441  				break;
4442  			case MPT2SAS_INTEL_SSD910_SSDID:
4443  				ioc_info(ioc, "%s\n",
4444  					 MPT2SAS_INTEL_SSD910_BRANDING);
4445  				break;
4446  			default:
4447  				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4448  					 ioc->pdev->subsystem_device);
4449  				break;
4450  			}
4451  			break;
4452  		case MPI2_MFGPAGE_DEVID_SAS2308_2:
4453  			switch (ioc->pdev->subsystem_device) {
4454  			case MPT2SAS_INTEL_RS25GB008_SSDID:
4455  				ioc_info(ioc, "%s\n",
4456  					 MPT2SAS_INTEL_RS25GB008_BRANDING);
4457  				break;
4458  			case MPT2SAS_INTEL_RMS25JB080_SSDID:
4459  				ioc_info(ioc, "%s\n",
4460  					 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4461  				break;
4462  			case MPT2SAS_INTEL_RMS25JB040_SSDID:
4463  				ioc_info(ioc, "%s\n",
4464  					 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4465  				break;
4466  			case MPT2SAS_INTEL_RMS25KB080_SSDID:
4467  				ioc_info(ioc, "%s\n",
4468  					 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4469  				break;
4470  			case MPT2SAS_INTEL_RMS25KB040_SSDID:
4471  				ioc_info(ioc, "%s\n",
4472  					 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4473  				break;
4474  			case MPT2SAS_INTEL_RMS25LB040_SSDID:
4475  				ioc_info(ioc, "%s\n",
4476  					 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4477  				break;
4478  			case MPT2SAS_INTEL_RMS25LB080_SSDID:
4479  				ioc_info(ioc, "%s\n",
4480  					 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4481  				break;
4482  			default:
4483  				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4484  					 ioc->pdev->subsystem_device);
4485  				break;
4486  			}
4487  			break;
4488  		case MPI25_MFGPAGE_DEVID_SAS3008:
4489  			switch (ioc->pdev->subsystem_device) {
4490  			case MPT3SAS_INTEL_RMS3JC080_SSDID:
4491  				ioc_info(ioc, "%s\n",
4492  					 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4493  				break;
4494  
4495  			case MPT3SAS_INTEL_RS3GC008_SSDID:
4496  				ioc_info(ioc, "%s\n",
4497  					 MPT3SAS_INTEL_RS3GC008_BRANDING);
4498  				break;
4499  			case MPT3SAS_INTEL_RS3FC044_SSDID:
4500  				ioc_info(ioc, "%s\n",
4501  					 MPT3SAS_INTEL_RS3FC044_BRANDING);
4502  				break;
4503  			case MPT3SAS_INTEL_RS3UC080_SSDID:
4504  				ioc_info(ioc, "%s\n",
4505  					 MPT3SAS_INTEL_RS3UC080_BRANDING);
4506  				break;
4507  			default:
4508  				ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4509  					 ioc->pdev->subsystem_device);
4510  				break;
4511  			}
4512  			break;
4513  		default:
4514  			ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4515  				 ioc->pdev->subsystem_device);
4516  			break;
4517  		}
4518  		break;
4519  	case PCI_VENDOR_ID_DELL:
4520  		switch (ioc->pdev->device) {
4521  		case MPI2_MFGPAGE_DEVID_SAS2008:
4522  			switch (ioc->pdev->subsystem_device) {
4523  			case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4524  				ioc_info(ioc, "%s\n",
4525  					 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4526  				break;
4527  			case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4528  				ioc_info(ioc, "%s\n",
4529  					 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4530  				break;
4531  			case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4532  				ioc_info(ioc, "%s\n",
4533  					 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4534  				break;
4535  			case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4536  				ioc_info(ioc, "%s\n",
4537  					 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4538  				break;
4539  			case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4540  				ioc_info(ioc, "%s\n",
4541  					 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4542  				break;
4543  			case MPT2SAS_DELL_PERC_H200_SSDID:
4544  				ioc_info(ioc, "%s\n",
4545  					 MPT2SAS_DELL_PERC_H200_BRANDING);
4546  				break;
4547  			case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4548  				ioc_info(ioc, "%s\n",
4549  					 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4550  				break;
4551  			default:
4552  				ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4553  					 ioc->pdev->subsystem_device);
4554  				break;
4555  			}
4556  			break;
4557  		case MPI25_MFGPAGE_DEVID_SAS3008:
4558  			switch (ioc->pdev->subsystem_device) {
4559  			case MPT3SAS_DELL_12G_HBA_SSDID:
4560  				ioc_info(ioc, "%s\n",
4561  					 MPT3SAS_DELL_12G_HBA_BRANDING);
4562  				break;
4563  			default:
4564  				ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4565  					 ioc->pdev->subsystem_device);
4566  				break;
4567  			}
4568  			break;
4569  		default:
4570  			ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4571  				 ioc->pdev->subsystem_device);
4572  			break;
4573  		}
4574  		break;
4575  	case PCI_VENDOR_ID_CISCO:
4576  		switch (ioc->pdev->device) {
4577  		case MPI25_MFGPAGE_DEVID_SAS3008:
4578  			switch (ioc->pdev->subsystem_device) {
4579  			case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4580  				ioc_info(ioc, "%s\n",
4581  					 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4582  				break;
4583  			case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4584  				ioc_info(ioc, "%s\n",
4585  					 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4586  				break;
4587  			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4588  				ioc_info(ioc, "%s\n",
4589  					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4590  				break;
4591  			default:
4592  				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4593  					 ioc->pdev->subsystem_device);
4594  				break;
4595  			}
4596  			break;
4597  		case MPI25_MFGPAGE_DEVID_SAS3108_1:
4598  			switch (ioc->pdev->subsystem_device) {
4599  			case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4600  				ioc_info(ioc, "%s\n",
4601  					 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4602  				break;
4603  			case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4604  				ioc_info(ioc, "%s\n",
4605  					 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4606  				break;
4607  			default:
4608  				ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4609  					 ioc->pdev->subsystem_device);
4610  				break;
4611  			}
4612  			break;
4613  		default:
4614  			ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4615  				 ioc->pdev->subsystem_device);
4616  			break;
4617  		}
4618  		break;
4619  	case MPT2SAS_HP_3PAR_SSVID:
4620  		switch (ioc->pdev->device) {
4621  		case MPI2_MFGPAGE_DEVID_SAS2004:
4622  			switch (ioc->pdev->subsystem_device) {
4623  			case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4624  				ioc_info(ioc, "%s\n",
4625  					 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4626  				break;
4627  			default:
4628  				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4629  					 ioc->pdev->subsystem_device);
4630  				break;
4631  			}
4632  			break;
4633  		case MPI2_MFGPAGE_DEVID_SAS2308_2:
4634  			switch (ioc->pdev->subsystem_device) {
4635  			case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4636  				ioc_info(ioc, "%s\n",
4637  					 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4638  				break;
4639  			case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4640  				ioc_info(ioc, "%s\n",
4641  					 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4642  				break;
4643  			case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4644  				ioc_info(ioc, "%s\n",
4645  					 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4646  				break;
4647  			case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4648  				ioc_info(ioc, "%s\n",
4649  					 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4650  				break;
4651  			default:
4652  				ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4653  					 ioc->pdev->subsystem_device);
4654  				break;
4655  			}
4656  			break;
4657  		default:
4658  			ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4659  				 ioc->pdev->subsystem_device);
4660  			break;
4661  		}
4662  		break;
4663  	default:
4664  		break;
4665  	}
4666  }
4667  
4668  /**
4669   * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4670   *				version from FW Image Header.
4671   * @ioc: per adapter object
4672   *
4673   * Return: 0 for success, non-zero for failure.
4674   */
4675  	static int
_base_display_fwpkg_version(struct MPT3SAS_ADAPTER * ioc)4676  _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4677  {
4678  	Mpi2FWImageHeader_t *fw_img_hdr;
4679  	Mpi26ComponentImageHeader_t *cmp_img_hdr;
4680  	Mpi25FWUploadRequest_t *mpi_request;
4681  	Mpi2FWUploadReply_t mpi_reply;
4682  	int r = 0, issue_diag_reset = 0;
4683  	u32  package_version = 0;
4684  	void *fwpkg_data = NULL;
4685  	dma_addr_t fwpkg_data_dma;
4686  	u16 smid, ioc_status;
4687  	size_t data_length;
4688  
4689  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4690  
4691  	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4692  		ioc_err(ioc, "%s: internal command already in use\n", __func__);
4693  		return -EAGAIN;
4694  	}
4695  
4696  	data_length = sizeof(Mpi2FWImageHeader_t);
4697  	fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4698  			&fwpkg_data_dma, GFP_KERNEL);
4699  	if (!fwpkg_data) {
4700  		ioc_err(ioc,
4701  		    "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4702  			__FILE__, __LINE__, __func__);
4703  		return -ENOMEM;
4704  	}
4705  
4706  	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4707  	if (!smid) {
4708  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4709  		r = -EAGAIN;
4710  		goto out;
4711  	}
4712  
4713  	ioc->base_cmds.status = MPT3_CMD_PENDING;
4714  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4715  	ioc->base_cmds.smid = smid;
4716  	memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4717  	mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4718  	mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4719  	mpi_request->ImageSize = cpu_to_le32(data_length);
4720  	ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4721  			data_length);
4722  	init_completion(&ioc->base_cmds.done);
4723  	ioc->put_smid_default(ioc, smid);
4724  	/* Wait for 15 seconds */
4725  	wait_for_completion_timeout(&ioc->base_cmds.done,
4726  			FW_IMG_HDR_READ_TIMEOUT*HZ);
4727  	ioc_info(ioc, "%s: complete\n", __func__);
4728  	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4729  		ioc_err(ioc, "%s: timeout\n", __func__);
4730  		_debug_dump_mf(mpi_request,
4731  				sizeof(Mpi25FWUploadRequest_t)/4);
4732  		issue_diag_reset = 1;
4733  	} else {
4734  		memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4735  		if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4736  			memcpy(&mpi_reply, ioc->base_cmds.reply,
4737  					sizeof(Mpi2FWUploadReply_t));
4738  			ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4739  						MPI2_IOCSTATUS_MASK;
4740  			if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4741  				fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4742  				if (le32_to_cpu(fw_img_hdr->Signature) ==
4743  				    MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) {
4744  					cmp_img_hdr =
4745  					    (Mpi26ComponentImageHeader_t *)
4746  					    (fwpkg_data);
4747  					package_version =
4748  					    le32_to_cpu(
4749  					    cmp_img_hdr->ApplicationSpecific);
4750  				} else
4751  					package_version =
4752  					    le32_to_cpu(
4753  					    fw_img_hdr->PackageVersion.Word);
4754  				if (package_version)
4755  					ioc_info(ioc,
4756  					"FW Package Ver(%02d.%02d.%02d.%02d)\n",
4757  					((package_version) & 0xFF000000) >> 24,
4758  					((package_version) & 0x00FF0000) >> 16,
4759  					((package_version) & 0x0000FF00) >> 8,
4760  					(package_version) & 0x000000FF);
4761  			} else {
4762  				_debug_dump_mf(&mpi_reply,
4763  						sizeof(Mpi2FWUploadReply_t)/4);
4764  			}
4765  		}
4766  	}
4767  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4768  out:
4769  	if (fwpkg_data)
4770  		dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4771  				fwpkg_data_dma);
4772  	if (issue_diag_reset) {
4773  		if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
4774  			return -EFAULT;
4775  		if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
4776  			return -EFAULT;
4777  		r = -EAGAIN;
4778  	}
4779  	return r;
4780  }
4781  
4782  /**
4783   * _base_display_ioc_capabilities - Display IOC's capabilities.
4784   * @ioc: per adapter object
4785   */
4786  static void
_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER * ioc)4787  _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4788  {
4789  	int i = 0;
4790  	char desc[17] = {0};
4791  	u32 iounit_pg1_flags;
4792  
4793  	memtostr(desc, ioc->manu_pg0.ChipName);
4794  	ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
4795  		 desc,
4796  		 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4797  		 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4798  		 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4799  		 ioc->facts.FWVersion.Word & 0x000000FF,
4800  		 ioc->pdev->revision);
4801  
4802  	_base_display_OEMs_branding(ioc);
4803  
4804  	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4805  		pr_info("%sNVMe", i ? "," : "");
4806  		i++;
4807  	}
4808  
4809  	ioc_info(ioc, "Protocol=(");
4810  
4811  	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4812  		pr_cont("Initiator");
4813  		i++;
4814  	}
4815  
4816  	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4817  		pr_cont("%sTarget", i ? "," : "");
4818  		i++;
4819  	}
4820  
4821  	i = 0;
4822  	pr_cont("), Capabilities=(");
4823  
4824  	if (!ioc->hide_ir_msg) {
4825  		if (ioc->facts.IOCCapabilities &
4826  		    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4827  			pr_cont("Raid");
4828  			i++;
4829  		}
4830  	}
4831  
4832  	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4833  		pr_cont("%sTLR", i ? "," : "");
4834  		i++;
4835  	}
4836  
4837  	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4838  		pr_cont("%sMulticast", i ? "," : "");
4839  		i++;
4840  	}
4841  
4842  	if (ioc->facts.IOCCapabilities &
4843  	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4844  		pr_cont("%sBIDI Target", i ? "," : "");
4845  		i++;
4846  	}
4847  
4848  	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4849  		pr_cont("%sEEDP", i ? "," : "");
4850  		i++;
4851  	}
4852  
4853  	if (ioc->facts.IOCCapabilities &
4854  	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4855  		pr_cont("%sSnapshot Buffer", i ? "," : "");
4856  		i++;
4857  	}
4858  
4859  	if (ioc->facts.IOCCapabilities &
4860  	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4861  		pr_cont("%sDiag Trace Buffer", i ? "," : "");
4862  		i++;
4863  	}
4864  
4865  	if (ioc->facts.IOCCapabilities &
4866  	    MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4867  		pr_cont("%sDiag Extended Buffer", i ? "," : "");
4868  		i++;
4869  	}
4870  
4871  	if (ioc->facts.IOCCapabilities &
4872  	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4873  		pr_cont("%sTask Set Full", i ? "," : "");
4874  		i++;
4875  	}
4876  
4877  	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4878  	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4879  		pr_cont("%sNCQ", i ? "," : "");
4880  		i++;
4881  	}
4882  
4883  	pr_cont(")\n");
4884  }
4885  
4886  /**
4887   * mpt3sas_base_update_missing_delay - change the missing delay timers
4888   * @ioc: per adapter object
4889   * @device_missing_delay: amount of time till device is reported missing
4890   * @io_missing_delay: interval IO is returned when there is a missing device
4891   *
4892   * Passed on the command line, this function will modify the device missing
4893   * delay, as well as the io missing delay. This should be called at driver
4894   * load time.
4895   */
4896  void
mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER * ioc,u16 device_missing_delay,u8 io_missing_delay)4897  mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4898  	u16 device_missing_delay, u8 io_missing_delay)
4899  {
4900  	u16 dmd, dmd_new, dmd_orignal;
4901  	u8 io_missing_delay_original;
4902  	u16 sz;
4903  	Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4904  	Mpi2ConfigReply_t mpi_reply;
4905  	u8 num_phys = 0;
4906  	u16 ioc_status;
4907  
4908  	mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4909  	if (!num_phys)
4910  		return;
4911  
4912  	sz = struct_size(sas_iounit_pg1, PhyData, num_phys);
4913  	sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4914  	if (!sas_iounit_pg1) {
4915  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4916  			__FILE__, __LINE__, __func__);
4917  		goto out;
4918  	}
4919  	if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4920  	    sas_iounit_pg1, sz))) {
4921  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4922  			__FILE__, __LINE__, __func__);
4923  		goto out;
4924  	}
4925  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4926  	    MPI2_IOCSTATUS_MASK;
4927  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4928  		ioc_err(ioc, "failure at %s:%d/%s()!\n",
4929  			__FILE__, __LINE__, __func__);
4930  		goto out;
4931  	}
4932  
4933  	/* device missing delay */
4934  	dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4935  	if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4936  		dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4937  	else
4938  		dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4939  	dmd_orignal = dmd;
4940  	if (device_missing_delay > 0x7F) {
4941  		dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4942  		    device_missing_delay;
4943  		dmd = dmd / 16;
4944  		dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4945  	} else
4946  		dmd = device_missing_delay;
4947  	sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4948  
4949  	/* io missing delay */
4950  	io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4951  	sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4952  
4953  	if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4954  	    sz)) {
4955  		if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4956  			dmd_new = (dmd &
4957  			    MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4958  		else
4959  			dmd_new =
4960  		    dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4961  		ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4962  			 dmd_orignal, dmd_new);
4963  		ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4964  			 io_missing_delay_original,
4965  			 io_missing_delay);
4966  		ioc->device_missing_delay = dmd_new;
4967  		ioc->io_missing_delay = io_missing_delay;
4968  	}
4969  
4970  out:
4971  	kfree(sas_iounit_pg1);
4972  }
4973  
4974  /**
4975   * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4976   *    according to performance mode.
4977   * @ioc : per adapter object
4978   *
4979   * Return: zero on success; otherwise return EAGAIN error code asking the
4980   * caller to retry.
4981   */
4982  static int
_base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER * ioc)4983  _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4984  {
4985  	Mpi2IOCPage1_t ioc_pg1;
4986  	Mpi2ConfigReply_t mpi_reply;
4987  	int rc;
4988  
4989  	rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4990  	if (rc)
4991  		return rc;
4992  	memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4993  
4994  	switch (perf_mode) {
4995  	case MPT_PERF_MODE_DEFAULT:
4996  	case MPT_PERF_MODE_BALANCED:
4997  		if (ioc->high_iops_queues) {
4998  			ioc_info(ioc,
4999  				"Enable interrupt coalescing only for first\t"
5000  				"%d reply queues\n",
5001  				MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
5002  			/*
5003  			 * If 31st bit is zero then interrupt coalescing is
5004  			 * enabled for all reply descriptor post queues.
5005  			 * If 31st bit is set to one then user can
5006  			 * enable/disable interrupt coalescing on per reply
5007  			 * descriptor post queue group(8) basis. So to enable
5008  			 * interrupt coalescing only on first reply descriptor
5009  			 * post queue group 31st bit and zero th bit is enabled.
5010  			 */
5011  			ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
5012  			    ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
5013  			rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5014  			if (rc)
5015  				return rc;
5016  			ioc_info(ioc, "performance mode: balanced\n");
5017  			return 0;
5018  		}
5019  		fallthrough;
5020  	case MPT_PERF_MODE_LATENCY:
5021  		/*
5022  		 * Enable interrupt coalescing on all reply queues
5023  		 * with timeout value 0xA
5024  		 */
5025  		ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
5026  		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
5027  		ioc_pg1.ProductSpecific = 0;
5028  		rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5029  		if (rc)
5030  			return rc;
5031  		ioc_info(ioc, "performance mode: latency\n");
5032  		break;
5033  	case MPT_PERF_MODE_IOPS:
5034  		/*
5035  		 * Enable interrupt coalescing on all reply queues.
5036  		 */
5037  		ioc_info(ioc,
5038  		    "performance mode: iops with coalescing timeout: 0x%x\n",
5039  		    le32_to_cpu(ioc_pg1.CoalescingTimeout));
5040  		ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
5041  		ioc_pg1.ProductSpecific = 0;
5042  		rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5043  		if (rc)
5044  			return rc;
5045  		break;
5046  	}
5047  	return 0;
5048  }
5049  
5050  /**
5051   * _base_get_event_diag_triggers - get event diag trigger values from
5052   *				persistent pages
5053   * @ioc : per adapter object
5054   *
5055   * Return: nothing.
5056   */
5057  static int
_base_get_event_diag_triggers(struct MPT3SAS_ADAPTER * ioc)5058  _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5059  {
5060  	Mpi26DriverTriggerPage2_t trigger_pg2;
5061  	struct SL_WH_EVENT_TRIGGER_T *event_tg;
5062  	MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY *mpi_event_tg;
5063  	Mpi2ConfigReply_t mpi_reply;
5064  	int r = 0, i = 0;
5065  	u16 count = 0;
5066  	u16 ioc_status;
5067  
5068  	r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
5069  	    &trigger_pg2);
5070  	if (r)
5071  		return r;
5072  
5073  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5074  	    MPI2_IOCSTATUS_MASK;
5075  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5076  		dinitprintk(ioc,
5077  		    ioc_err(ioc,
5078  		    "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
5079  		   __func__, ioc_status));
5080  		return 0;
5081  	}
5082  
5083  	if (le16_to_cpu(trigger_pg2.NumMPIEventTrigger)) {
5084  		count = le16_to_cpu(trigger_pg2.NumMPIEventTrigger);
5085  		count = min_t(u16, NUM_VALID_ENTRIES, count);
5086  		ioc->diag_trigger_event.ValidEntries = count;
5087  
5088  		event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
5089  		mpi_event_tg = &trigger_pg2.MPIEventTriggers[0];
5090  		for (i = 0; i < count; i++) {
5091  			event_tg->EventValue = le16_to_cpu(
5092  			    mpi_event_tg->MPIEventCode);
5093  			event_tg->LogEntryQualifier = le16_to_cpu(
5094  			    mpi_event_tg->MPIEventCodeSpecific);
5095  			event_tg++;
5096  			mpi_event_tg++;
5097  		}
5098  	}
5099  	return 0;
5100  }
5101  
5102  /**
5103   * _base_get_scsi_diag_triggers - get scsi diag trigger values from
5104   *				persistent pages
5105   * @ioc : per adapter object
5106   *
5107   * Return: 0 on success; otherwise return failure status.
5108   */
5109  static int
_base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER * ioc)5110  _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5111  {
5112  	Mpi26DriverTriggerPage3_t trigger_pg3;
5113  	struct SL_WH_SCSI_TRIGGER_T *scsi_tg;
5114  	MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY *mpi_scsi_tg;
5115  	Mpi2ConfigReply_t mpi_reply;
5116  	int r = 0, i = 0;
5117  	u16 count = 0;
5118  	u16 ioc_status;
5119  
5120  	r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
5121  	    &trigger_pg3);
5122  	if (r)
5123  		return r;
5124  
5125  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5126  	    MPI2_IOCSTATUS_MASK;
5127  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5128  		dinitprintk(ioc,
5129  		    ioc_err(ioc,
5130  		    "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
5131  		    __func__, ioc_status));
5132  		return 0;
5133  	}
5134  
5135  	if (le16_to_cpu(trigger_pg3.NumSCSISenseTrigger)) {
5136  		count = le16_to_cpu(trigger_pg3.NumSCSISenseTrigger);
5137  		count = min_t(u16, NUM_VALID_ENTRIES, count);
5138  		ioc->diag_trigger_scsi.ValidEntries = count;
5139  
5140  		scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
5141  		mpi_scsi_tg = &trigger_pg3.SCSISenseTriggers[0];
5142  		for (i = 0; i < count; i++) {
5143  			scsi_tg->ASCQ = mpi_scsi_tg->ASCQ;
5144  			scsi_tg->ASC = mpi_scsi_tg->ASC;
5145  			scsi_tg->SenseKey = mpi_scsi_tg->SenseKey;
5146  
5147  			scsi_tg++;
5148  			mpi_scsi_tg++;
5149  		}
5150  	}
5151  	return 0;
5152  }
5153  
5154  /**
5155   * _base_get_mpi_diag_triggers - get mpi diag trigger values from
5156   *				persistent pages
5157   * @ioc : per adapter object
5158   *
5159   * Return: 0 on success; otherwise return failure status.
5160   */
5161  static int
_base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER * ioc)5162  _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5163  {
5164  	Mpi26DriverTriggerPage4_t trigger_pg4;
5165  	struct SL_WH_MPI_TRIGGER_T *status_tg;
5166  	MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY *mpi_status_tg;
5167  	Mpi2ConfigReply_t mpi_reply;
5168  	int r = 0, i = 0;
5169  	u16 count = 0;
5170  	u16 ioc_status;
5171  
5172  	r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
5173  	    &trigger_pg4);
5174  	if (r)
5175  		return r;
5176  
5177  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5178  	    MPI2_IOCSTATUS_MASK;
5179  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5180  		dinitprintk(ioc,
5181  		    ioc_err(ioc,
5182  		    "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
5183  		    __func__, ioc_status));
5184  		return 0;
5185  	}
5186  
5187  	if (le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger)) {
5188  		count = le16_to_cpu(trigger_pg4.NumIOCStatusLogInfoTrigger);
5189  		count = min_t(u16, NUM_VALID_ENTRIES, count);
5190  		ioc->diag_trigger_mpi.ValidEntries = count;
5191  
5192  		status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
5193  		mpi_status_tg = &trigger_pg4.IOCStatusLoginfoTriggers[0];
5194  
5195  		for (i = 0; i < count; i++) {
5196  			status_tg->IOCStatus = le16_to_cpu(
5197  			    mpi_status_tg->IOCStatus);
5198  			status_tg->IocLogInfo = le32_to_cpu(
5199  			    mpi_status_tg->LogInfo);
5200  
5201  			status_tg++;
5202  			mpi_status_tg++;
5203  		}
5204  	}
5205  	return 0;
5206  }
5207  
5208  /**
5209   * _base_get_master_diag_triggers - get master diag trigger values from
5210   *				persistent pages
5211   * @ioc : per adapter object
5212   *
5213   * Return: nothing.
5214   */
5215  static int
_base_get_master_diag_triggers(struct MPT3SAS_ADAPTER * ioc)5216  _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5217  {
5218  	Mpi26DriverTriggerPage1_t trigger_pg1;
5219  	Mpi2ConfigReply_t mpi_reply;
5220  	int r;
5221  	u16 ioc_status;
5222  
5223  	r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
5224  	    &trigger_pg1);
5225  	if (r)
5226  		return r;
5227  
5228  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5229  	    MPI2_IOCSTATUS_MASK;
5230  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5231  		dinitprintk(ioc,
5232  		    ioc_err(ioc,
5233  		    "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
5234  		   __func__, ioc_status));
5235  		return 0;
5236  	}
5237  
5238  	if (le16_to_cpu(trigger_pg1.NumMasterTrigger))
5239  		ioc->diag_trigger_master.MasterData |=
5240  		    le32_to_cpu(
5241  		    trigger_pg1.MasterTriggers[0].MasterTriggerFlags);
5242  	return 0;
5243  }
5244  
5245  /**
5246   * _base_check_for_trigger_pages_support - checks whether HBA FW supports
5247   *					driver trigger pages or not
5248   * @ioc : per adapter object
5249   * @trigger_flags : address where trigger page0's TriggerFlags value is copied
5250   *
5251   * Return: trigger flags mask if HBA FW supports driver trigger pages;
5252   * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
5253   * return EAGAIN if diag reset occurred due to FW fault and asking the
5254   * caller to retry the command.
5255   *
5256   */
5257  static int
_base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER * ioc,u32 * trigger_flags)5258  _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
5259  {
5260  	Mpi26DriverTriggerPage0_t trigger_pg0;
5261  	int r = 0;
5262  	Mpi2ConfigReply_t mpi_reply;
5263  	u16 ioc_status;
5264  
5265  	r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5266  	    &trigger_pg0);
5267  	if (r)
5268  		return r;
5269  
5270  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5271  	    MPI2_IOCSTATUS_MASK;
5272  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5273  		return -EFAULT;
5274  
5275  	*trigger_flags = le16_to_cpu(trigger_pg0.TriggerFlags);
5276  	return 0;
5277  }
5278  
5279  /**
5280   * _base_get_diag_triggers - Retrieve diag trigger values from
5281   *				persistent pages.
5282   * @ioc : per adapter object
5283   *
5284   * Return: zero on success; otherwise return EAGAIN error codes
5285   * asking the caller to retry.
5286   */
5287  static int
_base_get_diag_triggers(struct MPT3SAS_ADAPTER * ioc)5288  _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5289  {
5290  	int trigger_flags;
5291  	int r;
5292  
5293  	/*
5294  	 * Default setting of master trigger.
5295  	 */
5296  	ioc->diag_trigger_master.MasterData =
5297  	    (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
5298  
5299  	r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
5300  	if (r) {
5301  		if (r == -EAGAIN)
5302  			return r;
5303  		/*
5304  		 * Don't go for error handling when FW doesn't support
5305  		 * driver trigger pages.
5306  		 */
5307  		return 0;
5308  	}
5309  
5310  	ioc->supports_trigger_pages = 1;
5311  
5312  	/*
5313  	 * Retrieve master diag trigger values from driver trigger pg1
5314  	 * if master trigger bit enabled in TriggerFlags.
5315  	 */
5316  	if ((u16)trigger_flags &
5317  	    MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID) {
5318  		r = _base_get_master_diag_triggers(ioc);
5319  		if (r)
5320  			return r;
5321  	}
5322  
5323  	/*
5324  	 * Retrieve event diag trigger values from driver trigger pg2
5325  	 * if event trigger bit enabled in TriggerFlags.
5326  	 */
5327  	if ((u16)trigger_flags &
5328  	    MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID) {
5329  		r = _base_get_event_diag_triggers(ioc);
5330  		if (r)
5331  			return r;
5332  	}
5333  
5334  	/*
5335  	 * Retrieve scsi diag trigger values from driver trigger pg3
5336  	 * if scsi trigger bit enabled in TriggerFlags.
5337  	 */
5338  	if ((u16)trigger_flags &
5339  	    MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID) {
5340  		r = _base_get_scsi_diag_triggers(ioc);
5341  		if (r)
5342  			return r;
5343  	}
5344  	/*
5345  	 * Retrieve mpi error diag trigger values from driver trigger pg4
5346  	 * if loginfo trigger bit enabled in TriggerFlags.
5347  	 */
5348  	if ((u16)trigger_flags &
5349  	    MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID) {
5350  		r = _base_get_mpi_diag_triggers(ioc);
5351  		if (r)
5352  			return r;
5353  	}
5354  	return 0;
5355  }
5356  
5357  /**
5358   * _base_update_diag_trigger_pages - Update the driver trigger pages after
5359   *			online FW update, in case updated FW supports driver
5360   *			trigger pages.
5361   * @ioc : per adapter object
5362   *
5363   * Return: nothing.
5364   */
5365  static void
_base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER * ioc)5366  _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5367  {
5368  
5369  	if (ioc->diag_trigger_master.MasterData)
5370  		mpt3sas_config_update_driver_trigger_pg1(ioc,
5371  		    &ioc->diag_trigger_master, 1);
5372  
5373  	if (ioc->diag_trigger_event.ValidEntries)
5374  		mpt3sas_config_update_driver_trigger_pg2(ioc,
5375  		    &ioc->diag_trigger_event, 1);
5376  
5377  	if (ioc->diag_trigger_scsi.ValidEntries)
5378  		mpt3sas_config_update_driver_trigger_pg3(ioc,
5379  		    &ioc->diag_trigger_scsi, 1);
5380  
5381  	if (ioc->diag_trigger_mpi.ValidEntries)
5382  		mpt3sas_config_update_driver_trigger_pg4(ioc,
5383  		    &ioc->diag_trigger_mpi, 1);
5384  }
5385  
5386  /**
5387   * _base_assign_fw_reported_qd	- Get FW reported QD for SAS/SATA devices.
5388   *				- On failure set default QD values.
5389   * @ioc : per adapter object
5390   *
5391   * Returns 0 for success, non-zero for failure.
5392   *
5393   */
_base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER * ioc)5394  static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
5395  {
5396  	Mpi2ConfigReply_t mpi_reply;
5397  	Mpi2SasIOUnitPage1_t sas_iounit_pg1;
5398  	Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1;
5399  	u16 depth;
5400  	int rc = 0;
5401  
5402  	ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5403  	ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5404  	ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
5405  	ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
5406  	if (!ioc->is_gen35_ioc)
5407  		goto out;
5408  	/* sas iounit page 1 */
5409  	rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5410  	    &sas_iounit_pg1, sizeof(Mpi2SasIOUnitPage1_t));
5411  	if (rc) {
5412  		pr_err("%s: failure at %s:%d/%s()!\n",
5413  		    ioc->name, __FILE__, __LINE__, __func__);
5414  		goto out;
5415  	}
5416  
5417  	depth = le16_to_cpu(sas_iounit_pg1.SASWideMaxQueueDepth);
5418  	ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5419  
5420  	depth = le16_to_cpu(sas_iounit_pg1.SASNarrowMaxQueueDepth);
5421  	ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5422  
5423  	depth = sas_iounit_pg1.SATAMaxQDepth;
5424  	ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
5425  
5426  	/* pcie iounit page 1 */
5427  	rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
5428  	    &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t));
5429  	if (rc) {
5430  		pr_err("%s: failure at %s:%d/%s()!\n",
5431  		    ioc->name, __FILE__, __LINE__, __func__);
5432  		goto out;
5433  	}
5434  	ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
5435  	    (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) :
5436  	    MPT3SAS_NVME_QUEUE_DEPTH;
5437  out:
5438  	dinitprintk(ioc, pr_err(
5439  	    "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
5440  	    ioc->max_wideport_qd, ioc->max_narrowport_qd,
5441  	    ioc->max_sata_qd, ioc->max_nvme_qd));
5442  	return rc;
5443  }
5444  
5445  /**
5446   * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
5447   *
5448   * @ioc : per adapter object
5449   * @n   : ptr to the ATTO nvram structure
5450   * Return: 0 for success, non-zero for failure.
5451   */
5452  static int
mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER * ioc,struct ATTO_SAS_NVRAM * n)5453  mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
5454  			    struct ATTO_SAS_NVRAM *n)
5455  {
5456  	int r = -EINVAL;
5457  	union ATTO_SAS_ADDRESS *s1;
5458  	u32 len;
5459  	u8 *pb;
5460  	u8 ckSum;
5461  
5462  	/* validate nvram checksum */
5463  	pb = (u8 *) n;
5464  	ckSum = ATTO_SASNVR_CKSUM_SEED;
5465  	len = sizeof(struct ATTO_SAS_NVRAM);
5466  
5467  	while (len--)
5468  		ckSum = ckSum + pb[len];
5469  
5470  	if (ckSum) {
5471  		ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
5472  		return r;
5473  	}
5474  
5475  	s1 = (union ATTO_SAS_ADDRESS *) n->SasAddr;
5476  
5477  	if (n->Signature[0] != 'E'
5478  	|| n->Signature[1] != 'S'
5479  	|| n->Signature[2] != 'A'
5480  	|| n->Signature[3] != 'S')
5481  		ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
5482  	else if (n->Version > ATTO_SASNVR_VERSION)
5483  		ioc_info(ioc, "Invalid ATTO NVRAM version");
5484  	else if ((n->SasAddr[7] & (ATTO_SAS_ADDR_ALIGN - 1))
5485  			|| s1->b[0] != 0x50
5486  			|| s1->b[1] != 0x01
5487  			|| s1->b[2] != 0x08
5488  			|| (s1->b[3] & 0xF0) != 0x60
5489  			|| ((s1->b[3] & 0x0F) | le32_to_cpu(s1->d[1])) == 0) {
5490  		ioc_err(ioc, "Invalid ATTO SAS address\n");
5491  	} else
5492  		r = 0;
5493  	return r;
5494  }
5495  
5496  /**
5497   * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
5498   *
5499   * @ioc : per adapter object
5500   * @sas_addr : return sas address
5501   * Return: 0 for success, non-zero for failure.
5502   */
5503  static int
mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER * ioc,union ATTO_SAS_ADDRESS * sas_addr)5504  mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
5505  {
5506  	Mpi2ManufacturingPage1_t mfg_pg1;
5507  	Mpi2ConfigReply_t mpi_reply;
5508  	struct ATTO_SAS_NVRAM *nvram;
5509  	int r;
5510  	__be64 addr;
5511  
5512  	r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
5513  	if (r) {
5514  		ioc_err(ioc, "Failed to read manufacturing page 1\n");
5515  		return r;
5516  	}
5517  
5518  	/* validate nvram */
5519  	nvram = (struct ATTO_SAS_NVRAM *) mfg_pg1.VPD;
5520  	r = mpt3sas_atto_validate_nvram(ioc, nvram);
5521  	if (r)
5522  		return r;
5523  
5524  	addr = *((__be64 *) nvram->SasAddr);
5525  	sas_addr->q = cpu_to_le64(be64_to_cpu(addr));
5526  	return r;
5527  }
5528  
5529  /**
5530   * mpt3sas_atto_init - perform initializaion for ATTO branded
5531   *					adapter.
5532   * @ioc : per adapter object
5533   *5
5534   * Return: 0 for success, non-zero for failure.
5535   */
5536  static int
mpt3sas_atto_init(struct MPT3SAS_ADAPTER * ioc)5537  mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
5538  {
5539  	int sz = 0;
5540  	Mpi2BiosPage4_t *bios_pg4 = NULL;
5541  	Mpi2ConfigReply_t mpi_reply;
5542  	int r;
5543  	int ix;
5544  	union ATTO_SAS_ADDRESS sas_addr;
5545  	union ATTO_SAS_ADDRESS temp;
5546  	union ATTO_SAS_ADDRESS bias;
5547  
5548  	r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
5549  	if (r)
5550  		return r;
5551  
5552  	/* get header first to get size */
5553  	r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
5554  	if (r) {
5555  		ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
5556  		return r;
5557  	}
5558  
5559  	sz = mpi_reply.Header.PageLength * sizeof(u32);
5560  	bios_pg4 = kzalloc(sz, GFP_KERNEL);
5561  	if (!bios_pg4) {
5562  		ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
5563  		return -ENOMEM;
5564  	}
5565  
5566  	/* read bios page 4 */
5567  	r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
5568  	if (r) {
5569  		ioc_err(ioc, "Failed to read ATTO bios page 4\n");
5570  		goto out;
5571  	}
5572  
5573  	/* Update bios page 4 with the ATTO WWID */
5574  	bias.q = sas_addr.q;
5575  	bias.b[7] += ATTO_SAS_ADDR_DEVNAME_BIAS;
5576  
5577  	for (ix = 0; ix < bios_pg4->NumPhys; ix++) {
5578  		temp.q = sas_addr.q;
5579  		temp.b[7] += ix;
5580  		bios_pg4->Phy[ix].ReassignmentWWID = temp.q;
5581  		bios_pg4->Phy[ix].ReassignmentDeviceName = bias.q;
5582  	}
5583  	r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
5584  
5585  out:
5586  	kfree(bios_pg4);
5587  	return r;
5588  }
5589  
5590  /**
5591   * _base_static_config_pages - static start of day config pages
5592   * @ioc: per adapter object
5593   */
5594  static int
_base_static_config_pages(struct MPT3SAS_ADAPTER * ioc)5595  _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5596  {
5597  	Mpi2IOUnitPage8_t iounit_pg8;
5598  	Mpi2ConfigReply_t mpi_reply;
5599  	u32 iounit_pg1_flags;
5600  	int tg_flags = 0;
5601  	int rc;
5602  	ioc->nvme_abort_timeout = 30;
5603  
5604  	rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
5605  	    &ioc->manu_pg0);
5606  	if (rc)
5607  		return rc;
5608  	if (ioc->ir_firmware) {
5609  		rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5610  		    &ioc->manu_pg10);
5611  		if (rc)
5612  			return rc;
5613  	}
5614  
5615  	if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
5616  		rc = mpt3sas_atto_init(ioc);
5617  		if (rc)
5618  			return rc;
5619  	}
5620  
5621  	/*
5622  	 * Ensure correct T10 PI operation if vendor left EEDPTagMode
5623  	 * flag unset in NVDATA.
5624  	 */
5625  	rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
5626  	    &ioc->manu_pg11);
5627  	if (rc)
5628  		return rc;
5629  	if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5630  		pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5631  		    ioc->name);
5632  		ioc->manu_pg11.EEDPTagMode &= ~0x3;
5633  		ioc->manu_pg11.EEDPTagMode |= 0x1;
5634  		mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5635  		    &ioc->manu_pg11);
5636  	}
5637  	if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5638  		ioc->tm_custom_handling = 1;
5639  	else {
5640  		ioc->tm_custom_handling = 0;
5641  		if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5642  			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5643  		else if (ioc->manu_pg11.NVMeAbortTO >
5644  					NVME_TASK_ABORT_MAX_TIMEOUT)
5645  			ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5646  		else
5647  			ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5648  	}
5649  	ioc->time_sync_interval =
5650  	    ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5651  	if (ioc->time_sync_interval) {
5652  		if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5653  			ioc->time_sync_interval =
5654  			    ioc->time_sync_interval * SECONDS_PER_HOUR;
5655  		else
5656  			ioc->time_sync_interval =
5657  			    ioc->time_sync_interval * SECONDS_PER_MIN;
5658  		dinitprintk(ioc, ioc_info(ioc,
5659  		    "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5660  		    ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5661  		    MPT3SAS_TIMESYNC_UNIT_MASK) ? "Hour" : "Minute"));
5662  	} else {
5663  		if (ioc->is_gen35_ioc)
5664  			ioc_warn(ioc,
5665  			    "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5666  	}
5667  	rc = _base_assign_fw_reported_qd(ioc);
5668  	if (rc)
5669  		return rc;
5670  
5671  	/*
5672  	 * ATTO doesn't use bios page 2 and 3 for bios settings.
5673  	 */
5674  	if (ioc->pdev->vendor ==  MPI2_MFGPAGE_VENDORID_ATTO)
5675  		ioc->bios_pg3.BiosVersion = 0;
5676  	else {
5677  		rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5678  		if (rc)
5679  			return rc;
5680  		rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5681  		if (rc)
5682  			return rc;
5683  	}
5684  
5685  	rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5686  	if (rc)
5687  		return rc;
5688  	rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5689  	if (rc)
5690  		return rc;
5691  	rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5692  	if (rc)
5693  		return rc;
5694  	rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &iounit_pg8);
5695  	if (rc)
5696  		return rc;
5697  	_base_display_ioc_capabilities(ioc);
5698  
5699  	/*
5700  	 * Enable task_set_full handling in iounit_pg1 when the
5701  	 * facts capabilities indicate that its supported.
5702  	 */
5703  	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5704  	if ((ioc->facts.IOCCapabilities &
5705  	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
5706  		iounit_pg1_flags &=
5707  		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5708  	else
5709  		iounit_pg1_flags |=
5710  		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
5711  	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5712  	rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5713  	if (rc)
5714  		return rc;
5715  
5716  	if (iounit_pg8.NumSensors)
5717  		ioc->temp_sensors_count = iounit_pg8.NumSensors;
5718  	if (ioc->is_aero_ioc) {
5719  		rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
5720  		if (rc)
5721  			return rc;
5722  	}
5723  	if (ioc->is_gen35_ioc) {
5724  		if (ioc->is_driver_loading) {
5725  			rc = _base_get_diag_triggers(ioc);
5726  			if (rc)
5727  				return rc;
5728  		} else {
5729  			/*
5730  			 * In case of online HBA FW update operation,
5731  			 * check whether updated FW supports the driver trigger
5732  			 * pages or not.
5733  			 * - If previous FW has not supported driver trigger
5734  			 *   pages and newer FW supports them then update these
5735  			 *   pages with current diag trigger values.
5736  			 * - If previous FW has supported driver trigger pages
5737  			 *   and new FW doesn't support them then disable
5738  			 *   support_trigger_pages flag.
5739  			 */
5740  			_base_check_for_trigger_pages_support(ioc, &tg_flags);
5741  			if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5742  				_base_update_diag_trigger_pages(ioc);
5743  			else if (ioc->supports_trigger_pages &&
5744  			    tg_flags == -EFAULT)
5745  				ioc->supports_trigger_pages = 0;
5746  		}
5747  	}
5748  	return 0;
5749  }
5750  
5751  /**
5752   * mpt3sas_free_enclosure_list - release memory
5753   * @ioc: per adapter object
5754   *
5755   * Free memory allocated during enclosure add.
5756   */
5757  void
mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER * ioc)5758  mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5759  {
5760  	struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
5761  
5762  	/* Free enclosure list */
5763  	list_for_each_entry_safe(enclosure_dev,
5764  			enclosure_dev_next, &ioc->enclosure_list, list) {
5765  		list_del(&enclosure_dev->list);
5766  		kfree(enclosure_dev);
5767  	}
5768  }
5769  
5770  /**
5771   * _base_release_memory_pools - release memory
5772   * @ioc: per adapter object
5773   *
5774   * Free memory allocated from _base_allocate_memory_pools.
5775   */
5776  static void
_base_release_memory_pools(struct MPT3SAS_ADAPTER * ioc)5777  _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5778  {
5779  	int i = 0;
5780  	int j = 0;
5781  	int dma_alloc_count = 0;
5782  	struct chain_tracker *ct;
5783  	int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5784  
5785  	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5786  
5787  	if (ioc->request) {
5788  		dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5789  		    ioc->request,  ioc->request_dma);
5790  		dexitprintk(ioc,
5791  			    ioc_info(ioc, "request_pool(0x%p): free\n",
5792  				     ioc->request));
5793  		ioc->request = NULL;
5794  	}
5795  
5796  	if (ioc->sense) {
5797  		dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5798  		dma_pool_destroy(ioc->sense_dma_pool);
5799  		dexitprintk(ioc,
5800  			    ioc_info(ioc, "sense_pool(0x%p): free\n",
5801  				     ioc->sense));
5802  		ioc->sense = NULL;
5803  	}
5804  
5805  	if (ioc->reply) {
5806  		dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5807  		dma_pool_destroy(ioc->reply_dma_pool);
5808  		dexitprintk(ioc,
5809  			    ioc_info(ioc, "reply_pool(0x%p): free\n",
5810  				     ioc->reply));
5811  		ioc->reply = NULL;
5812  	}
5813  
5814  	if (ioc->reply_free) {
5815  		dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5816  		    ioc->reply_free_dma);
5817  		dma_pool_destroy(ioc->reply_free_dma_pool);
5818  		dexitprintk(ioc,
5819  			    ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5820  				     ioc->reply_free));
5821  		ioc->reply_free = NULL;
5822  	}
5823  
5824  	if (ioc->reply_post) {
5825  		dma_alloc_count = DIV_ROUND_UP(count,
5826  				RDPQ_MAX_INDEX_IN_ONE_CHUNK);
5827  		for (i = 0; i < count; i++) {
5828  			if (i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0
5829  			    && dma_alloc_count) {
5830  				if (ioc->reply_post[i].reply_post_free) {
5831  					dma_pool_free(
5832  					    ioc->reply_post_free_dma_pool,
5833  					    ioc->reply_post[i].reply_post_free,
5834  					ioc->reply_post[i].reply_post_free_dma);
5835  					dexitprintk(ioc, ioc_info(ioc,
5836  					   "reply_post_free_pool(0x%p): free\n",
5837  					   ioc->reply_post[i].reply_post_free));
5838  					ioc->reply_post[i].reply_post_free =
5839  									NULL;
5840  				}
5841  				--dma_alloc_count;
5842  			}
5843  		}
5844  		dma_pool_destroy(ioc->reply_post_free_dma_pool);
5845  		if (ioc->reply_post_free_array &&
5846  			ioc->rdpq_array_enable) {
5847  			dma_pool_free(ioc->reply_post_free_array_dma_pool,
5848  			    ioc->reply_post_free_array,
5849  			    ioc->reply_post_free_array_dma);
5850  			ioc->reply_post_free_array = NULL;
5851  		}
5852  		dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5853  		kfree(ioc->reply_post);
5854  	}
5855  
5856  	if (ioc->pcie_sgl_dma_pool) {
5857  		for (i = 0; i < ioc->scsiio_depth; i++) {
5858  			dma_pool_free(ioc->pcie_sgl_dma_pool,
5859  					ioc->pcie_sg_lookup[i].pcie_sgl,
5860  					ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5861  			ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5862  		}
5863  		dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5864  	}
5865  	kfree(ioc->pcie_sg_lookup);
5866  	ioc->pcie_sg_lookup = NULL;
5867  
5868  	if (ioc->config_page) {
5869  		dexitprintk(ioc,
5870  			    ioc_info(ioc, "config_page(0x%p): free\n",
5871  				     ioc->config_page));
5872  		dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5873  		    ioc->config_page, ioc->config_page_dma);
5874  	}
5875  
5876  	kfree(ioc->hpr_lookup);
5877  	ioc->hpr_lookup = NULL;
5878  	kfree(ioc->internal_lookup);
5879  	ioc->internal_lookup = NULL;
5880  	if (ioc->chain_lookup) {
5881  		for (i = 0; i < ioc->scsiio_depth; i++) {
5882  			for (j = ioc->chains_per_prp_buffer;
5883  			    j < ioc->chains_needed_per_io; j++) {
5884  				ct = &ioc->chain_lookup[i].chains_per_smid[j];
5885  				if (ct && ct->chain_buffer)
5886  					dma_pool_free(ioc->chain_dma_pool,
5887  						ct->chain_buffer,
5888  						ct->chain_buffer_dma);
5889  			}
5890  			kfree(ioc->chain_lookup[i].chains_per_smid);
5891  		}
5892  		dma_pool_destroy(ioc->chain_dma_pool);
5893  		kfree(ioc->chain_lookup);
5894  		ioc->chain_lookup = NULL;
5895  	}
5896  
5897  	kfree(ioc->io_queue_num);
5898  	ioc->io_queue_num = NULL;
5899  }
5900  
5901  /**
5902   * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
5903   *	having same upper 32bits in their base memory address.
5904   * @start_address: Base address of a reply queue set
5905   * @pool_sz: Size of single Reply Descriptor Post Queues pool size
5906   *
5907   * Return: 1 if reply queues in a set have a same upper 32bits in their base
5908   * memory address, else 0.
5909   */
5910  static int
mpt3sas_check_same_4gb_region(dma_addr_t start_address,u32 pool_sz)5911  mpt3sas_check_same_4gb_region(dma_addr_t start_address, u32 pool_sz)
5912  {
5913  	dma_addr_t end_address;
5914  
5915  	end_address = start_address + pool_sz - 1;
5916  
5917  	if (upper_32_bits(start_address) == upper_32_bits(end_address))
5918  		return 1;
5919  	else
5920  		return 0;
5921  }
5922  
5923  /**
5924   * _base_reduce_hba_queue_depth- Retry with reduced queue depth
5925   * @ioc: Adapter object
5926   *
5927   * Return: 0 for success, non-zero for failure.
5928   **/
5929  static inline int
_base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER * ioc)5930  _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5931  {
5932  	int reduce_sz = 64;
5933  
5934  	if ((ioc->hba_queue_depth - reduce_sz) >
5935  	    (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5936  		ioc->hba_queue_depth -= reduce_sz;
5937  		return 0;
5938  	} else
5939  		return -ENOMEM;
5940  }
5941  
5942  /**
5943   * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
5944   *			for pcie sgl pools.
5945   * @ioc: Adapter object
5946   * @sz: DMA Pool size
5947   *
5948   * Return: 0 for success, non-zero for failure.
5949   */
5950  
5951  static int
_base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER * ioc,u32 sz)5952  _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5953  {
5954  	int i = 0, j = 0;
5955  	struct chain_tracker *ct;
5956  
5957  	ioc->pcie_sgl_dma_pool =
5958  	    dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5959  	    ioc->page_size, 0);
5960  	if (!ioc->pcie_sgl_dma_pool) {
5961  		ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5962  		return -ENOMEM;
5963  	}
5964  
5965  	ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5966  	ioc->chains_per_prp_buffer =
5967  	    min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5968  	for (i = 0; i < ioc->scsiio_depth; i++) {
5969  		ioc->pcie_sg_lookup[i].pcie_sgl =
5970  		    dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5971  		    &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5972  		if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5973  			ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5974  			return -EAGAIN;
5975  		}
5976  
5977  		if (!mpt3sas_check_same_4gb_region(
5978  		    ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
5979  			ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5980  			    ioc->pcie_sg_lookup[i].pcie_sgl,
5981  			    (unsigned long long)
5982  			    ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5983  			ioc->use_32bit_dma = true;
5984  			return -EAGAIN;
5985  		}
5986  
5987  		for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5988  			ct = &ioc->chain_lookup[i].chains_per_smid[j];
5989  			ct->chain_buffer =
5990  			    ioc->pcie_sg_lookup[i].pcie_sgl +
5991  			    (j * ioc->chain_segment_sz);
5992  			ct->chain_buffer_dma =
5993  			    ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5994  			    (j * ioc->chain_segment_sz);
5995  		}
5996  	}
5997  	dinitprintk(ioc, ioc_info(ioc,
5998  	    "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5999  	    ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
6000  	dinitprintk(ioc, ioc_info(ioc,
6001  	    "Number of chains can fit in a PRP page(%d)\n",
6002  	    ioc->chains_per_prp_buffer));
6003  	return 0;
6004  }
6005  
6006  /**
6007   * _base_allocate_chain_dma_pool - Allocating DMA'able memory
6008   *			for chain dma pool.
6009   * @ioc: Adapter object
6010   * @sz: DMA Pool size
6011   *
6012   * Return: 0 for success, non-zero for failure.
6013   */
6014  static int
_base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER * ioc,u32 sz)6015  _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6016  {
6017  	int i = 0, j = 0;
6018  	struct chain_tracker *ctr;
6019  
6020  	ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
6021  	    ioc->chain_segment_sz, 16, 0);
6022  	if (!ioc->chain_dma_pool)
6023  		return -ENOMEM;
6024  
6025  	for (i = 0; i < ioc->scsiio_depth; i++) {
6026  		for (j = ioc->chains_per_prp_buffer;
6027  		    j < ioc->chains_needed_per_io; j++) {
6028  			ctr = &ioc->chain_lookup[i].chains_per_smid[j];
6029  			ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
6030  			    GFP_KERNEL, &ctr->chain_buffer_dma);
6031  			if (!ctr->chain_buffer)
6032  				return -EAGAIN;
6033  			if (!mpt3sas_check_same_4gb_region(
6034  			    ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
6035  				ioc_err(ioc,
6036  				    "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
6037  				    ctr->chain_buffer,
6038  				    (unsigned long long)ctr->chain_buffer_dma);
6039  				ioc->use_32bit_dma = true;
6040  				return -EAGAIN;
6041  			}
6042  		}
6043  	}
6044  	dinitprintk(ioc, ioc_info(ioc,
6045  	    "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
6046  	    ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
6047  	    (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
6048  	    ioc->chain_segment_sz))/1024));
6049  	return 0;
6050  }
6051  
6052  /**
6053   * _base_allocate_sense_dma_pool - Allocating DMA'able memory
6054   *			for sense dma pool.
6055   * @ioc: Adapter object
6056   * @sz: DMA Pool size
6057   * Return: 0 for success, non-zero for failure.
6058   */
6059  static int
_base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER * ioc,u32 sz)6060  _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6061  {
6062  	ioc->sense_dma_pool =
6063  	    dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
6064  	if (!ioc->sense_dma_pool)
6065  		return -ENOMEM;
6066  	ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
6067  	    GFP_KERNEL, &ioc->sense_dma);
6068  	if (!ioc->sense)
6069  		return -EAGAIN;
6070  	if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
6071  		dinitprintk(ioc, pr_err(
6072  		    "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
6073  		    ioc->sense, (unsigned long long) ioc->sense_dma));
6074  		ioc->use_32bit_dma = true;
6075  		return -EAGAIN;
6076  	}
6077  	ioc_info(ioc,
6078  	    "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
6079  	    ioc->sense, (unsigned long long)ioc->sense_dma,
6080  	    ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
6081  	return 0;
6082  }
6083  
6084  /**
6085   * _base_allocate_reply_pool - Allocating DMA'able memory
6086   *			for reply pool.
6087   * @ioc: Adapter object
6088   * @sz: DMA Pool size
6089   * Return: 0 for success, non-zero for failure.
6090   */
6091  static int
_base_allocate_reply_pool(struct MPT3SAS_ADAPTER * ioc,u32 sz)6092  _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6093  {
6094  	/* reply pool, 4 byte align */
6095  	ioc->reply_dma_pool = dma_pool_create("reply pool",
6096  	    &ioc->pdev->dev, sz, 4, 0);
6097  	if (!ioc->reply_dma_pool)
6098  		return -ENOMEM;
6099  	ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
6100  	    &ioc->reply_dma);
6101  	if (!ioc->reply)
6102  		return -EAGAIN;
6103  	if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
6104  		dinitprintk(ioc, pr_err(
6105  		    "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
6106  		    ioc->reply, (unsigned long long) ioc->reply_dma));
6107  		ioc->use_32bit_dma = true;
6108  		return -EAGAIN;
6109  	}
6110  	ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
6111  	ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
6112  	ioc_info(ioc,
6113  	    "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
6114  	    ioc->reply, (unsigned long long)ioc->reply_dma,
6115  	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
6116  	return 0;
6117  }
6118  
6119  /**
6120   * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
6121   *			for reply free dma pool.
6122   * @ioc: Adapter object
6123   * @sz: DMA Pool size
6124   * Return: 0 for success, non-zero for failure.
6125   */
6126  static int
_base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER * ioc,u32 sz)6127  _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6128  {
6129  	/* reply free queue, 16 byte align */
6130  	ioc->reply_free_dma_pool = dma_pool_create(
6131  	    "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
6132  	if (!ioc->reply_free_dma_pool)
6133  		return -ENOMEM;
6134  	ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
6135  	    GFP_KERNEL, &ioc->reply_free_dma);
6136  	if (!ioc->reply_free)
6137  		return -EAGAIN;
6138  	if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
6139  		dinitprintk(ioc,
6140  		    pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
6141  		    ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
6142  		ioc->use_32bit_dma = true;
6143  		return -EAGAIN;
6144  	}
6145  	memset(ioc->reply_free, 0, sz);
6146  	dinitprintk(ioc, ioc_info(ioc,
6147  	    "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
6148  	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
6149  	dinitprintk(ioc, ioc_info(ioc,
6150  	    "reply_free_dma (0x%llx)\n",
6151  	    (unsigned long long)ioc->reply_free_dma));
6152  	return 0;
6153  }
6154  
6155  /**
6156   * _base_allocate_reply_post_free_array - Allocating DMA'able memory
6157   *			for reply post free array.
6158   * @ioc: Adapter object
6159   * @reply_post_free_array_sz: DMA Pool size
6160   * Return: 0 for success, non-zero for failure.
6161   */
6162  
6163  static int
_base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER * ioc,u32 reply_post_free_array_sz)6164  _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
6165  	u32 reply_post_free_array_sz)
6166  {
6167  	ioc->reply_post_free_array_dma_pool =
6168  	    dma_pool_create("reply_post_free_array pool",
6169  	    &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
6170  	if (!ioc->reply_post_free_array_dma_pool)
6171  		return -ENOMEM;
6172  	ioc->reply_post_free_array =
6173  	    dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
6174  	    GFP_KERNEL, &ioc->reply_post_free_array_dma);
6175  	if (!ioc->reply_post_free_array)
6176  		return -EAGAIN;
6177  	if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
6178  	    reply_post_free_array_sz)) {
6179  		dinitprintk(ioc, pr_err(
6180  		    "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
6181  		    ioc->reply_free,
6182  		    (unsigned long long) ioc->reply_free_dma));
6183  		ioc->use_32bit_dma = true;
6184  		return -EAGAIN;
6185  	}
6186  	return 0;
6187  }
6188  /**
6189   * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
6190   *                     for reply queues.
6191   * @ioc: per adapter object
6192   * @sz: DMA Pool size
6193   * Return: 0 for success, non-zero for failure.
6194   */
6195  static int
base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER * ioc,int sz)6196  base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
6197  {
6198  	int i = 0;
6199  	u32 dma_alloc_count = 0;
6200  	int reply_post_free_sz = ioc->reply_post_queue_depth *
6201  		sizeof(Mpi2DefaultReplyDescriptor_t);
6202  	int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
6203  
6204  	ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
6205  			GFP_KERNEL);
6206  	if (!ioc->reply_post)
6207  		return -ENOMEM;
6208  	/*
6209  	 *  For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
6210  	 *  VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
6211  	 *  be within 4GB boundary i.e reply queues in a set must have same
6212  	 *  upper 32-bits in their memory address. so here driver is allocating
6213  	 *  the DMA'able memory for reply queues according.
6214  	 *  Driver uses limitation of
6215  	 *  VENTURA_SERIES to manage INVADER_SERIES as well.
6216  	 */
6217  	dma_alloc_count = DIV_ROUND_UP(count,
6218  				RDPQ_MAX_INDEX_IN_ONE_CHUNK);
6219  	ioc->reply_post_free_dma_pool =
6220  		dma_pool_create("reply_post_free pool",
6221  		    &ioc->pdev->dev, sz, 16, 0);
6222  	if (!ioc->reply_post_free_dma_pool)
6223  		return -ENOMEM;
6224  	for (i = 0; i < count; i++) {
6225  		if ((i % RDPQ_MAX_INDEX_IN_ONE_CHUNK == 0) && dma_alloc_count) {
6226  			ioc->reply_post[i].reply_post_free =
6227  			    dma_pool_zalloc(ioc->reply_post_free_dma_pool,
6228  				GFP_KERNEL,
6229  				&ioc->reply_post[i].reply_post_free_dma);
6230  			if (!ioc->reply_post[i].reply_post_free)
6231  				return -ENOMEM;
6232  			/*
6233  			 * Each set of RDPQ pool must satisfy 4gb boundary
6234  			 * restriction.
6235  			 * 1) Check if allocated resources for RDPQ pool are in
6236  			 *	the same 4GB range.
6237  			 * 2) If #1 is true, continue with 64 bit DMA.
6238  			 * 3) If #1 is false, return 1. which means free all the
6239  			 * resources and set DMA mask to 32 and allocate.
6240  			 */
6241  			if (!mpt3sas_check_same_4gb_region(
6242  				ioc->reply_post[i].reply_post_free_dma, sz)) {
6243  				dinitprintk(ioc,
6244  				    ioc_err(ioc, "bad Replypost free pool(0x%p)"
6245  				    "reply_post_free_dma = (0x%llx)\n",
6246  				    ioc->reply_post[i].reply_post_free,
6247  				    (unsigned long long)
6248  				    ioc->reply_post[i].reply_post_free_dma));
6249  				return -EAGAIN;
6250  			}
6251  			dma_alloc_count--;
6252  
6253  		} else {
6254  			ioc->reply_post[i].reply_post_free =
6255  			    (Mpi2ReplyDescriptorsUnion_t *)
6256  			    ((long)ioc->reply_post[i-1].reply_post_free
6257  			    + reply_post_free_sz);
6258  			ioc->reply_post[i].reply_post_free_dma =
6259  			    (dma_addr_t)
6260  			    (ioc->reply_post[i-1].reply_post_free_dma +
6261  			    reply_post_free_sz);
6262  		}
6263  	}
6264  	return 0;
6265  }
6266  
6267  /**
6268   * _base_allocate_memory_pools - allocate start of day memory pools
6269   * @ioc: per adapter object
6270   *
6271   * Return: 0 success, anything else error.
6272   */
6273  static int
_base_allocate_memory_pools(struct MPT3SAS_ADAPTER * ioc)6274  _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
6275  {
6276  	struct mpt3sas_facts *facts;
6277  	u16 max_sge_elements;
6278  	u16 chains_needed_per_io;
6279  	u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
6280  	u32 retry_sz;
6281  	u32 rdpq_sz = 0, sense_sz = 0;
6282  	u16 max_request_credit, nvme_blocks_needed;
6283  	unsigned short sg_tablesize;
6284  	u16 sge_size;
6285  	int i;
6286  	int ret = 0, rc = 0;
6287  
6288  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6289  
6290  
6291  	retry_sz = 0;
6292  	facts = &ioc->facts;
6293  
6294  	/* command line tunables for max sgl entries */
6295  	if (max_sgl_entries != -1)
6296  		sg_tablesize = max_sgl_entries;
6297  	else {
6298  		if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
6299  			sg_tablesize = MPT2SAS_SG_DEPTH;
6300  		else
6301  			sg_tablesize = MPT3SAS_SG_DEPTH;
6302  	}
6303  
6304  	/* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
6305  	if (reset_devices)
6306  		sg_tablesize = min_t(unsigned short, sg_tablesize,
6307  		   MPT_KDUMP_MIN_PHYS_SEGMENTS);
6308  
6309  	if (ioc->is_mcpu_endpoint)
6310  		ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6311  	else {
6312  		if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
6313  			sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6314  		else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
6315  			sg_tablesize = min_t(unsigned short, sg_tablesize,
6316  					SG_MAX_SEGMENTS);
6317  			ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
6318  				 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
6319  		}
6320  		ioc->shost->sg_tablesize = sg_tablesize;
6321  	}
6322  
6323  	ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
6324  		(facts->RequestCredit / 4));
6325  	if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
6326  		if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
6327  				INTERNAL_SCSIIO_CMDS_COUNT)) {
6328  			ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
6329  				facts->RequestCredit);
6330  			return -ENOMEM;
6331  		}
6332  		ioc->internal_depth = 10;
6333  	}
6334  
6335  	ioc->hi_priority_depth = ioc->internal_depth - (5);
6336  	/* command line tunables  for max controller queue depth */
6337  	if (max_queue_depth != -1 && max_queue_depth != 0) {
6338  		max_request_credit = min_t(u16, max_queue_depth +
6339  			ioc->internal_depth, facts->RequestCredit);
6340  		if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
6341  			max_request_credit =  MAX_HBA_QUEUE_DEPTH;
6342  	} else if (reset_devices)
6343  		max_request_credit = min_t(u16, facts->RequestCredit,
6344  		    (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
6345  	else
6346  		max_request_credit = min_t(u16, facts->RequestCredit,
6347  		    MAX_HBA_QUEUE_DEPTH);
6348  
6349  	/* Firmware maintains additional facts->HighPriorityCredit number of
6350  	 * credits for HiPriprity Request messages, so hba queue depth will be
6351  	 * sum of max_request_credit and high priority queue depth.
6352  	 */
6353  	ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
6354  
6355  	/* request frame size */
6356  	ioc->request_sz = facts->IOCRequestFrameSize * 4;
6357  
6358  	/* reply frame size */
6359  	ioc->reply_sz = facts->ReplyFrameSize * 4;
6360  
6361  	/* chain segment size */
6362  	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6363  		if (facts->IOCMaxChainSegmentSize)
6364  			ioc->chain_segment_sz =
6365  					facts->IOCMaxChainSegmentSize *
6366  					MAX_CHAIN_ELEMT_SZ;
6367  		else
6368  		/* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
6369  			ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
6370  						    MAX_CHAIN_ELEMT_SZ;
6371  	} else
6372  		ioc->chain_segment_sz = ioc->request_sz;
6373  
6374  	/* calculate the max scatter element size */
6375  	sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
6376  
6377   retry_allocation:
6378  	total_sz = 0;
6379  	/* calculate number of sg elements left over in the 1st frame */
6380  	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
6381  	    sizeof(Mpi2SGEIOUnion_t)) + sge_size);
6382  	ioc->max_sges_in_main_message = max_sge_elements/sge_size;
6383  
6384  	/* now do the same for a chain buffer */
6385  	max_sge_elements = ioc->chain_segment_sz - sge_size;
6386  	ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
6387  
6388  	/*
6389  	 *  MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
6390  	 */
6391  	chains_needed_per_io = ((ioc->shost->sg_tablesize -
6392  	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
6393  	    + 1;
6394  	if (chains_needed_per_io > facts->MaxChainDepth) {
6395  		chains_needed_per_io = facts->MaxChainDepth;
6396  		ioc->shost->sg_tablesize = min_t(u16,
6397  		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
6398  		* chains_needed_per_io), ioc->shost->sg_tablesize);
6399  	}
6400  	ioc->chains_needed_per_io = chains_needed_per_io;
6401  
6402  	/* reply free queue sizing - taking into account for 64 FW events */
6403  	ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6404  
6405  	/* mCPU manage single counters for simplicity */
6406  	if (ioc->is_mcpu_endpoint)
6407  		ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
6408  	else {
6409  		/* calculate reply descriptor post queue depth */
6410  		ioc->reply_post_queue_depth = ioc->hba_queue_depth +
6411  			ioc->reply_free_queue_depth +  1;
6412  		/* align the reply post queue on the next 16 count boundary */
6413  		if (ioc->reply_post_queue_depth % 16)
6414  			ioc->reply_post_queue_depth += 16 -
6415  				(ioc->reply_post_queue_depth % 16);
6416  	}
6417  
6418  	if (ioc->reply_post_queue_depth >
6419  	    facts->MaxReplyDescriptorPostQueueDepth) {
6420  		ioc->reply_post_queue_depth =
6421  				facts->MaxReplyDescriptorPostQueueDepth -
6422  		    (facts->MaxReplyDescriptorPostQueueDepth % 16);
6423  		ioc->hba_queue_depth =
6424  				((ioc->reply_post_queue_depth - 64) / 2) - 1;
6425  		ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6426  	}
6427  
6428  	ioc_info(ioc,
6429  	    "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
6430  	    "sge_per_io(%d), chains_per_io(%d)\n",
6431  	    ioc->max_sges_in_main_message,
6432  	    ioc->max_sges_in_chain_message,
6433  	    ioc->shost->sg_tablesize,
6434  	    ioc->chains_needed_per_io);
6435  
6436  	/* reply post queue, 16 byte align */
6437  	reply_post_free_sz = ioc->reply_post_queue_depth *
6438  	    sizeof(Mpi2DefaultReplyDescriptor_t);
6439  	rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
6440  	if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
6441  	    || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
6442  		rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
6443  	ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
6444  	if (ret == -EAGAIN) {
6445  		/*
6446  		 * Free allocated bad RDPQ memory pools.
6447  		 * Change dma coherent mask to 32 bit and reallocate RDPQ
6448  		 */
6449  		_base_release_memory_pools(ioc);
6450  		ioc->use_32bit_dma = true;
6451  		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6452  			ioc_err(ioc,
6453  			    "32 DMA mask failed %s\n", pci_name(ioc->pdev));
6454  			return -ENODEV;
6455  		}
6456  		if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
6457  			return -ENOMEM;
6458  	} else if (ret == -ENOMEM)
6459  		return -ENOMEM;
6460  	total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
6461  	    DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
6462  	ioc->scsiio_depth = ioc->hba_queue_depth -
6463  	    ioc->hi_priority_depth - ioc->internal_depth;
6464  
6465  	/* set the scsi host can_queue depth
6466  	 * with some internal commands that could be outstanding
6467  	 */
6468  	ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
6469  	dinitprintk(ioc,
6470  		    ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
6471  			     ioc->shost->can_queue));
6472  
6473  	/* contiguous pool for request and chains, 16 byte align, one extra "
6474  	 * "frame for smid=0
6475  	 */
6476  	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
6477  	sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
6478  
6479  	/* hi-priority queue */
6480  	sz += (ioc->hi_priority_depth * ioc->request_sz);
6481  
6482  	/* internal queue */
6483  	sz += (ioc->internal_depth * ioc->request_sz);
6484  
6485  	ioc->request_dma_sz = sz;
6486  	ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
6487  			&ioc->request_dma, GFP_KERNEL);
6488  	if (!ioc->request) {
6489  		ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6490  			ioc->hba_queue_depth, ioc->chains_needed_per_io,
6491  			ioc->request_sz, sz / 1024);
6492  		if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
6493  			goto out;
6494  		retry_sz = 64;
6495  		ioc->hba_queue_depth -= retry_sz;
6496  		_base_release_memory_pools(ioc);
6497  		goto retry_allocation;
6498  	}
6499  
6500  	if (retry_sz)
6501  		ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6502  			ioc->hba_queue_depth, ioc->chains_needed_per_io,
6503  			ioc->request_sz, sz / 1024);
6504  
6505  	/* hi-priority queue */
6506  	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
6507  	    ioc->request_sz);
6508  	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
6509  	    ioc->request_sz);
6510  
6511  	/* internal queue */
6512  	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
6513  	    ioc->request_sz);
6514  	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
6515  	    ioc->request_sz);
6516  
6517  	ioc_info(ioc,
6518  	    "request pool(0x%p) - dma(0x%llx): "
6519  	    "depth(%d), frame_size(%d), pool_size(%d kB)\n",
6520  	    ioc->request, (unsigned long long) ioc->request_dma,
6521  	    ioc->hba_queue_depth, ioc->request_sz,
6522  	    (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6523  
6524  	total_sz += sz;
6525  
6526  	dinitprintk(ioc,
6527  		    ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6528  			     ioc->request, ioc->scsiio_depth));
6529  
6530  	ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6531  	sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6532  	ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6533  	if (!ioc->chain_lookup) {
6534  		ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6535  		goto out;
6536  	}
6537  
6538  	sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6539  	for (i = 0; i < ioc->scsiio_depth; i++) {
6540  		ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6541  		if (!ioc->chain_lookup[i].chains_per_smid) {
6542  			ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6543  			goto out;
6544  		}
6545  	}
6546  
6547  	/* initialize hi-priority queue smid's */
6548  	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6549  	    sizeof(struct request_tracker), GFP_KERNEL);
6550  	if (!ioc->hpr_lookup) {
6551  		ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6552  		goto out;
6553  	}
6554  	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6555  	dinitprintk(ioc,
6556  		    ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6557  			     ioc->hi_priority,
6558  			     ioc->hi_priority_depth, ioc->hi_priority_smid));
6559  
6560  	/* initialize internal queue smid's */
6561  	ioc->internal_lookup = kcalloc(ioc->internal_depth,
6562  	    sizeof(struct request_tracker), GFP_KERNEL);
6563  	if (!ioc->internal_lookup) {
6564  		ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6565  		goto out;
6566  	}
6567  	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6568  	dinitprintk(ioc,
6569  		    ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6570  			     ioc->internal,
6571  			     ioc->internal_depth, ioc->internal_smid));
6572  
6573  	ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6574  	    sizeof(u16), GFP_KERNEL);
6575  	if (!ioc->io_queue_num)
6576  		goto out;
6577  	/*
6578  	 * The number of NVMe page sized blocks needed is:
6579  	 *     (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
6580  	 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
6581  	 * that is placed in the main message frame.  8 is the size of each PRP
6582  	 * entry or PRP list pointer entry.  8 is subtracted from page_size
6583  	 * because of the PRP list pointer entry at the end of a page, so this
6584  	 * is not counted as a PRP entry.  The 1 added page is a round up.
6585  	 *
6586  	 * To avoid allocation failures due to the amount of memory that could
6587  	 * be required for NVMe PRP's, only each set of NVMe blocks will be
6588  	 * contiguous, so a new set is allocated for each possible I/O.
6589  	 */
6590  
6591  	ioc->chains_per_prp_buffer = 0;
6592  	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6593  		nvme_blocks_needed =
6594  			(ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6595  		nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6596  		nvme_blocks_needed++;
6597  
6598  		sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6599  		ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6600  		if (!ioc->pcie_sg_lookup) {
6601  			ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6602  			goto out;
6603  		}
6604  		sz = nvme_blocks_needed * ioc->page_size;
6605  		rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6606  		if (rc == -ENOMEM)
6607  			return -ENOMEM;
6608  		else if (rc == -EAGAIN)
6609  			goto try_32bit_dma;
6610  		total_sz += sz * ioc->scsiio_depth;
6611  	}
6612  
6613  	rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6614  	if (rc == -ENOMEM)
6615  		return -ENOMEM;
6616  	else if (rc == -EAGAIN)
6617  		goto try_32bit_dma;
6618  	total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6619  		ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6620  	dinitprintk(ioc,
6621  	    ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6622  	    ioc->chain_depth, ioc->chain_segment_sz,
6623  	    (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6624  	/* sense buffers, 4 byte align */
6625  	sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6626  	rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6627  	if (rc  == -ENOMEM)
6628  		return -ENOMEM;
6629  	else if (rc == -EAGAIN)
6630  		goto try_32bit_dma;
6631  	total_sz += sense_sz;
6632  	/* reply pool, 4 byte align */
6633  	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6634  	rc = _base_allocate_reply_pool(ioc, sz);
6635  	if (rc == -ENOMEM)
6636  		return -ENOMEM;
6637  	else if (rc == -EAGAIN)
6638  		goto try_32bit_dma;
6639  	total_sz += sz;
6640  
6641  	/* reply free queue, 16 byte align */
6642  	sz = ioc->reply_free_queue_depth * 4;
6643  	rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6644  	if (rc  == -ENOMEM)
6645  		return -ENOMEM;
6646  	else if (rc == -EAGAIN)
6647  		goto try_32bit_dma;
6648  	dinitprintk(ioc,
6649  		    ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6650  			     (unsigned long long)ioc->reply_free_dma));
6651  	total_sz += sz;
6652  	if (ioc->rdpq_array_enable) {
6653  		reply_post_free_array_sz = ioc->reply_queue_count *
6654  		    sizeof(Mpi2IOCInitRDPQArrayEntry);
6655  		rc = _base_allocate_reply_post_free_array(ioc,
6656  		    reply_post_free_array_sz);
6657  		if (rc == -ENOMEM)
6658  			return -ENOMEM;
6659  		else if (rc == -EAGAIN)
6660  			goto try_32bit_dma;
6661  	}
6662  	ioc->config_page_sz = 512;
6663  	ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6664  			ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6665  	if (!ioc->config_page) {
6666  		ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6667  		goto out;
6668  	}
6669  
6670  	ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6671  	    ioc->config_page, (unsigned long long)ioc->config_page_dma,
6672  	    ioc->config_page_sz);
6673  	total_sz += ioc->config_page_sz;
6674  
6675  	ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6676  		 total_sz / 1024);
6677  	ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6678  		 ioc->shost->can_queue, facts->RequestCredit);
6679  	ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6680  		 ioc->shost->sg_tablesize);
6681  	return 0;
6682  
6683  try_32bit_dma:
6684  	_base_release_memory_pools(ioc);
6685  	if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6686  		/* Change dma coherent mask to 32 bit and reallocate */
6687  		if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6688  			pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6689  			    pci_name(ioc->pdev));
6690  			return -ENODEV;
6691  		}
6692  	} else if (_base_reduce_hba_queue_depth(ioc) != 0)
6693  		return -ENOMEM;
6694  	goto retry_allocation;
6695  
6696   out:
6697  	return -ENOMEM;
6698  }
6699  
6700  /**
6701   * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
6702   * @ioc: Pointer to MPT_ADAPTER structure
6703   * @cooked: Request raw or cooked IOC state
6704   *
6705   * Return: all IOC Doorbell register bits if cooked==0, else just the
6706   * Doorbell bits in MPI_IOC_STATE_MASK.
6707   */
6708  u32
mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER * ioc,int cooked)6709  mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6710  {
6711  	u32 s, sc;
6712  
6713  	s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
6714  	sc = s & MPI2_IOC_STATE_MASK;
6715  	return cooked ? sc : s;
6716  }
6717  
6718  /**
6719   * _base_wait_on_iocstate - waiting on a particular ioc state
6720   * @ioc: ?
6721   * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
6722   * @timeout: timeout in second
6723   *
6724   * Return: 0 for success, non-zero for failure.
6725   */
6726  static int
_base_wait_on_iocstate(struct MPT3SAS_ADAPTER * ioc,u32 ioc_state,int timeout)6727  _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6728  {
6729  	u32 count, cntdn;
6730  	u32 current_state;
6731  
6732  	count = 0;
6733  	cntdn = 1000 * timeout;
6734  	do {
6735  		current_state = mpt3sas_base_get_iocstate(ioc, 1);
6736  		if (current_state == ioc_state)
6737  			return 0;
6738  		if (count && current_state == MPI2_IOC_STATE_FAULT)
6739  			break;
6740  		if (count && current_state == MPI2_IOC_STATE_COREDUMP)
6741  			break;
6742  
6743  		usleep_range(1000, 1500);
6744  		count++;
6745  	} while (--cntdn);
6746  
6747  	return current_state;
6748  }
6749  
6750  /**
6751   * _base_dump_reg_set -	This function will print hexdump of register set.
6752   * @ioc: per adapter object
6753   *
6754   * Return: nothing.
6755   */
6756  static inline void
_base_dump_reg_set(struct MPT3SAS_ADAPTER * ioc)6757  _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6758  {
6759  	unsigned int i, sz = 256;
6760  	u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6761  
6762  	ioc_info(ioc, "System Register set:\n");
6763  	for (i = 0; i < (sz / sizeof(u32)); i++)
6764  		pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
6765  }
6766  
6767  /**
6768   * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
6769   * a write to the doorbell)
6770   * @ioc: per adapter object
6771   * @timeout: timeout in seconds
6772   *
6773   * Return: 0 for success, non-zero for failure.
6774   *
6775   * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
6776   */
6777  
6778  static int
_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER * ioc,int timeout)6779  _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6780  {
6781  	u32 cntdn, count;
6782  	u32 int_status;
6783  
6784  	count = 0;
6785  	cntdn = 1000 * timeout;
6786  	do {
6787  		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6788  		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6789  			dhsprintk(ioc,
6790  				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6791  					   __func__, count, timeout));
6792  			return 0;
6793  		}
6794  
6795  		usleep_range(1000, 1500);
6796  		count++;
6797  	} while (--cntdn);
6798  
6799  	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6800  		__func__, count, int_status);
6801  	return -EFAULT;
6802  }
6803  
6804  static int
_base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER * ioc,int timeout)6805  _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6806  {
6807  	u32 cntdn, count;
6808  	u32 int_status;
6809  
6810  	count = 0;
6811  	cntdn = 2000 * timeout;
6812  	do {
6813  		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6814  		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6815  			dhsprintk(ioc,
6816  				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6817  					   __func__, count, timeout));
6818  			return 0;
6819  		}
6820  
6821  		udelay(500);
6822  		count++;
6823  	} while (--cntdn);
6824  
6825  	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6826  		__func__, count, int_status);
6827  	return -EFAULT;
6828  
6829  }
6830  
6831  /**
6832   * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
6833   * @ioc: per adapter object
6834   * @timeout: timeout in second
6835   *
6836   * Return: 0 for success, non-zero for failure.
6837   *
6838   * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
6839   * doorbell.
6840   */
6841  static int
_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER * ioc,int timeout)6842  _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6843  {
6844  	u32 cntdn, count;
6845  	u32 int_status;
6846  	u32 doorbell;
6847  
6848  	count = 0;
6849  	cntdn = 1000 * timeout;
6850  	do {
6851  		int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6852  		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
6853  			dhsprintk(ioc,
6854  				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6855  					   __func__, count, timeout));
6856  			return 0;
6857  		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
6858  			doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
6859  			if ((doorbell & MPI2_IOC_STATE_MASK) ==
6860  			    MPI2_IOC_STATE_FAULT) {
6861  				mpt3sas_print_fault_code(ioc, doorbell);
6862  				return -EFAULT;
6863  			}
6864  			if ((doorbell & MPI2_IOC_STATE_MASK) ==
6865  			    MPI2_IOC_STATE_COREDUMP) {
6866  				mpt3sas_print_coredump_info(ioc, doorbell);
6867  				return -EFAULT;
6868  			}
6869  		} else if (int_status == 0xFFFFFFFF)
6870  			goto out;
6871  
6872  		usleep_range(1000, 1500);
6873  		count++;
6874  	} while (--cntdn);
6875  
6876   out:
6877  	ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6878  		__func__, count, int_status);
6879  	return -EFAULT;
6880  }
6881  
6882  /**
6883   * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
6884   * @ioc: per adapter object
6885   * @timeout: timeout in second
6886   *
6887   * Return: 0 for success, non-zero for failure.
6888   */
6889  static int
_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER * ioc,int timeout)6890  _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6891  {
6892  	u32 cntdn, count;
6893  	u32 doorbell_reg;
6894  
6895  	count = 0;
6896  	cntdn = 1000 * timeout;
6897  	do {
6898  		doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
6899  		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
6900  			dhsprintk(ioc,
6901  				  ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6902  					   __func__, count, timeout));
6903  			return 0;
6904  		}
6905  
6906  		usleep_range(1000, 1500);
6907  		count++;
6908  	} while (--cntdn);
6909  
6910  	ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6911  		__func__, count, doorbell_reg);
6912  	return -EFAULT;
6913  }
6914  
6915  /**
6916   * _base_send_ioc_reset - send doorbell reset
6917   * @ioc: per adapter object
6918   * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
6919   * @timeout: timeout in second
6920   *
6921   * Return: 0 for success, non-zero for failure.
6922   */
6923  static int
_base_send_ioc_reset(struct MPT3SAS_ADAPTER * ioc,u8 reset_type,int timeout)6924  _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6925  {
6926  	u32 ioc_state;
6927  	int r = 0;
6928  	unsigned long flags;
6929  
6930  	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
6931  		ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6932  		return -EFAULT;
6933  	}
6934  
6935  	if (!(ioc->facts.IOCCapabilities &
6936  	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
6937  		return -EFAULT;
6938  
6939  	ioc_info(ioc, "sending message unit reset !!\n");
6940  
6941  	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
6942  	    &ioc->chip->Doorbell);
6943  	if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6944  		r = -EFAULT;
6945  		goto out;
6946  	}
6947  
6948  	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6949  	if (ioc_state) {
6950  		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6951  			__func__, ioc_state);
6952  		r = -EFAULT;
6953  		goto out;
6954  	}
6955   out:
6956  	if (r != 0) {
6957  		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6958  		spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6959  		/*
6960  		 * Wait for IOC state CoreDump to clear only during
6961  		 * HBA initialization & release time.
6962  		 */
6963  		if ((ioc_state & MPI2_IOC_STATE_MASK) ==
6964  		    MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6965  		    ioc->fault_reset_work_q == NULL)) {
6966  			spin_unlock_irqrestore(
6967  			    &ioc->ioc_reset_in_progress_lock, flags);
6968  			mpt3sas_print_coredump_info(ioc, ioc_state);
6969  			mpt3sas_base_wait_for_coredump_completion(ioc,
6970  			    __func__);
6971  			spin_lock_irqsave(
6972  			    &ioc->ioc_reset_in_progress_lock, flags);
6973  		}
6974  		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6975  	}
6976  	ioc_info(ioc, "message unit reset: %s\n",
6977  		 r == 0 ? "SUCCESS" : "FAILED");
6978  	return r;
6979  }
6980  
6981  /**
6982   * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
6983   * @ioc: per adapter object
6984   * @timeout: timeout in seconds
6985   *
6986   * Return: Waits up to timeout seconds for the IOC to
6987   * become operational. Returns 0 if IOC is present
6988   * and operational; otherwise returns %-EFAULT.
6989   */
6990  
6991  int
mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER * ioc,int timeout)6992  mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6993  {
6994  	int wait_state_count = 0;
6995  	u32 ioc_state;
6996  
6997  	do {
6998  		ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6999  		if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
7000  			break;
7001  
7002  		/*
7003  		 * Watchdog thread will be started after IOC Initialization, so
7004  		 * no need to wait here for IOC state to become operational
7005  		 * when IOC Initialization is on. Instead the driver will
7006  		 * return ETIME status, so that calling function can issue
7007  		 * diag reset operation and retry the command.
7008  		 */
7009  		if (ioc->is_driver_loading)
7010  			return -ETIME;
7011  
7012  		ssleep(1);
7013  		ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
7014  				__func__, ++wait_state_count);
7015  	} while (--timeout);
7016  	if (!timeout) {
7017  		ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
7018  		return -EFAULT;
7019  	}
7020  	if (wait_state_count)
7021  		ioc_info(ioc, "ioc is operational\n");
7022  	return 0;
7023  }
7024  
7025  /**
7026   * _base_handshake_req_reply_wait - send request thru doorbell interface
7027   * @ioc: per adapter object
7028   * @request_bytes: request length
7029   * @request: pointer having request payload
7030   * @reply_bytes: reply length
7031   * @reply: pointer to reply payload
7032   * @timeout: timeout in second
7033   *
7034   * Return: 0 for success, non-zero for failure.
7035   */
7036  static int
_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER * ioc,int request_bytes,u32 * request,int reply_bytes,u16 * reply,int timeout)7037  _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
7038  	u32 *request, int reply_bytes, u16 *reply, int timeout)
7039  {
7040  	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
7041  	int i;
7042  	u8 failed;
7043  	__le32 *mfp;
7044  
7045  	/* make sure doorbell is not in use */
7046  	if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
7047  		ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
7048  		return -EFAULT;
7049  	}
7050  
7051  	/* clear pending doorbell interrupts from previous state changes */
7052  	if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
7053  	    MPI2_HIS_IOC2SYS_DB_STATUS)
7054  		writel(0, &ioc->chip->HostInterruptStatus);
7055  
7056  	/* send message to ioc */
7057  	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
7058  	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
7059  	    &ioc->chip->Doorbell);
7060  
7061  	if ((_base_spin_on_doorbell_int(ioc, 5))) {
7062  		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7063  			__LINE__);
7064  		return -EFAULT;
7065  	}
7066  	writel(0, &ioc->chip->HostInterruptStatus);
7067  
7068  	if ((_base_wait_for_doorbell_ack(ioc, 5))) {
7069  		ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
7070  			__LINE__);
7071  		return -EFAULT;
7072  	}
7073  
7074  	/* send message 32-bits at a time */
7075  	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
7076  		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
7077  		if ((_base_wait_for_doorbell_ack(ioc, 5)))
7078  			failed = 1;
7079  	}
7080  
7081  	if (failed) {
7082  		ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
7083  			__LINE__);
7084  		return -EFAULT;
7085  	}
7086  
7087  	/* now wait for the reply */
7088  	if ((_base_wait_for_doorbell_int(ioc, timeout))) {
7089  		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7090  			__LINE__);
7091  		return -EFAULT;
7092  	}
7093  
7094  	/* read the first two 16-bits, it gives the total length of the reply */
7095  	reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
7096  	    & MPI2_DOORBELL_DATA_MASK);
7097  	writel(0, &ioc->chip->HostInterruptStatus);
7098  	if ((_base_wait_for_doorbell_int(ioc, 5))) {
7099  		ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7100  			__LINE__);
7101  		return -EFAULT;
7102  	}
7103  	reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
7104  	    & MPI2_DOORBELL_DATA_MASK);
7105  	writel(0, &ioc->chip->HostInterruptStatus);
7106  
7107  	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
7108  		if ((_base_wait_for_doorbell_int(ioc, 5))) {
7109  			ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7110  				__LINE__);
7111  			return -EFAULT;
7112  		}
7113  		if (i >=  reply_bytes/2) /* overflow case */
7114  			ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
7115  		else
7116  			reply[i] = le16_to_cpu(
7117  			    ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
7118  			    & MPI2_DOORBELL_DATA_MASK);
7119  		writel(0, &ioc->chip->HostInterruptStatus);
7120  	}
7121  
7122  	_base_wait_for_doorbell_int(ioc, 5);
7123  	if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
7124  		dhsprintk(ioc,
7125  			  ioc_info(ioc, "doorbell is in use (line=%d)\n",
7126  				   __LINE__));
7127  	}
7128  	writel(0, &ioc->chip->HostInterruptStatus);
7129  
7130  	if (ioc->logging_level & MPT_DEBUG_INIT) {
7131  		mfp = (__le32 *)reply;
7132  		pr_info("\toffset:data\n");
7133  		for (i = 0; i < reply_bytes/4; i++)
7134  			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7135  			    le32_to_cpu(mfp[i]));
7136  	}
7137  	return 0;
7138  }
7139  
7140  /**
7141   * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
7142   * @ioc: per adapter object
7143   * @mpi_reply: the reply payload from FW
7144   * @mpi_request: the request payload sent to FW
7145   *
7146   * The SAS IO Unit Control Request message allows the host to perform low-level
7147   * operations, such as resets on the PHYs of the IO Unit, also allows the host
7148   * to obtain the IOC assigned device handles for a device if it has other
7149   * identifying information about the device, in addition allows the host to
7150   * remove IOC resources associated with the device.
7151   *
7152   * Return: 0 for success, non-zero for failure.
7153   */
7154  int
mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER * ioc,Mpi2SasIoUnitControlReply_t * mpi_reply,Mpi2SasIoUnitControlRequest_t * mpi_request)7155  mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
7156  	Mpi2SasIoUnitControlReply_t *mpi_reply,
7157  	Mpi2SasIoUnitControlRequest_t *mpi_request)
7158  {
7159  	u16 smid;
7160  	u8 issue_reset = 0;
7161  	int rc;
7162  	void *request;
7163  
7164  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7165  
7166  	mutex_lock(&ioc->base_cmds.mutex);
7167  
7168  	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7169  		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7170  		rc = -EAGAIN;
7171  		goto out;
7172  	}
7173  
7174  	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7175  	if (rc)
7176  		goto out;
7177  
7178  	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7179  	if (!smid) {
7180  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7181  		rc = -EAGAIN;
7182  		goto out;
7183  	}
7184  
7185  	rc = 0;
7186  	ioc->base_cmds.status = MPT3_CMD_PENDING;
7187  	request = mpt3sas_base_get_msg_frame(ioc, smid);
7188  	ioc->base_cmds.smid = smid;
7189  	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
7190  	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
7191  	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
7192  		ioc->ioc_link_reset_in_progress = 1;
7193  	init_completion(&ioc->base_cmds.done);
7194  	ioc->put_smid_default(ioc, smid);
7195  	wait_for_completion_timeout(&ioc->base_cmds.done,
7196  	    msecs_to_jiffies(10000));
7197  	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
7198  	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
7199  	    ioc->ioc_link_reset_in_progress)
7200  		ioc->ioc_link_reset_in_progress = 0;
7201  	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7202  		mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
7203  		    mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4,
7204  		    issue_reset);
7205  		goto issue_host_reset;
7206  	}
7207  	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7208  		memcpy(mpi_reply, ioc->base_cmds.reply,
7209  		    sizeof(Mpi2SasIoUnitControlReply_t));
7210  	else
7211  		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
7212  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7213  	goto out;
7214  
7215   issue_host_reset:
7216  	if (issue_reset)
7217  		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7218  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7219  	rc = -EFAULT;
7220   out:
7221  	mutex_unlock(&ioc->base_cmds.mutex);
7222  	return rc;
7223  }
7224  
7225  /**
7226   * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
7227   * @ioc: per adapter object
7228   * @mpi_reply: the reply payload from FW
7229   * @mpi_request: the request payload sent to FW
7230   *
7231   * The SCSI Enclosure Processor request message causes the IOC to
7232   * communicate with SES devices to control LED status signals.
7233   *
7234   * Return: 0 for success, non-zero for failure.
7235   */
7236  int
mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER * ioc,Mpi2SepReply_t * mpi_reply,Mpi2SepRequest_t * mpi_request)7237  mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
7238  	Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
7239  {
7240  	u16 smid;
7241  	u8 issue_reset = 0;
7242  	int rc;
7243  	void *request;
7244  
7245  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7246  
7247  	mutex_lock(&ioc->base_cmds.mutex);
7248  
7249  	if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7250  		ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7251  		rc = -EAGAIN;
7252  		goto out;
7253  	}
7254  
7255  	rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7256  	if (rc)
7257  		goto out;
7258  
7259  	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7260  	if (!smid) {
7261  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7262  		rc = -EAGAIN;
7263  		goto out;
7264  	}
7265  
7266  	rc = 0;
7267  	ioc->base_cmds.status = MPT3_CMD_PENDING;
7268  	request = mpt3sas_base_get_msg_frame(ioc, smid);
7269  	ioc->base_cmds.smid = smid;
7270  	memset(request, 0, ioc->request_sz);
7271  	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
7272  	init_completion(&ioc->base_cmds.done);
7273  	ioc->put_smid_default(ioc, smid);
7274  	wait_for_completion_timeout(&ioc->base_cmds.done,
7275  	    msecs_to_jiffies(10000));
7276  	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7277  		mpt3sas_check_cmd_timeout(ioc,
7278  		    ioc->base_cmds.status, mpi_request,
7279  		    sizeof(Mpi2SepRequest_t)/4, issue_reset);
7280  		goto issue_host_reset;
7281  	}
7282  	if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7283  		memcpy(mpi_reply, ioc->base_cmds.reply,
7284  		    sizeof(Mpi2SepReply_t));
7285  	else
7286  		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
7287  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7288  	goto out;
7289  
7290   issue_host_reset:
7291  	if (issue_reset)
7292  		mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7293  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7294  	rc = -EFAULT;
7295   out:
7296  	mutex_unlock(&ioc->base_cmds.mutex);
7297  	return rc;
7298  }
7299  
7300  /**
7301   * _base_get_port_facts - obtain port facts reply and save in ioc
7302   * @ioc: per adapter object
7303   * @port: ?
7304   *
7305   * Return: 0 for success, non-zero for failure.
7306   */
7307  static int
_base_get_port_facts(struct MPT3SAS_ADAPTER * ioc,int port)7308  _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
7309  {
7310  	Mpi2PortFactsRequest_t mpi_request;
7311  	Mpi2PortFactsReply_t mpi_reply;
7312  	struct mpt3sas_port_facts *pfacts;
7313  	int mpi_reply_sz, mpi_request_sz, r;
7314  
7315  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7316  
7317  	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
7318  	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
7319  	memset(&mpi_request, 0, mpi_request_sz);
7320  	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
7321  	mpi_request.PortNumber = port;
7322  	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7323  	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
7324  
7325  	if (r != 0) {
7326  		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7327  		return r;
7328  	}
7329  
7330  	pfacts = &ioc->pfacts[port];
7331  	memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
7332  	pfacts->PortNumber = mpi_reply.PortNumber;
7333  	pfacts->VP_ID = mpi_reply.VP_ID;
7334  	pfacts->VF_ID = mpi_reply.VF_ID;
7335  	pfacts->MaxPostedCmdBuffers =
7336  	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
7337  
7338  	return 0;
7339  }
7340  
7341  /**
7342   * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
7343   * @ioc: per adapter object
7344   * @timeout:
7345   *
7346   * Return: 0 for success, non-zero for failure.
7347   */
7348  static int
_base_wait_for_iocstate(struct MPT3SAS_ADAPTER * ioc,int timeout)7349  _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
7350  {
7351  	u32 ioc_state;
7352  	int rc;
7353  
7354  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7355  
7356  	if (ioc->pci_error_recovery) {
7357  		dfailprintk(ioc,
7358  			    ioc_info(ioc, "%s: host in pci error recovery\n",
7359  				     __func__));
7360  		return -EFAULT;
7361  	}
7362  
7363  	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7364  	dhsprintk(ioc,
7365  		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7366  			   __func__, ioc_state));
7367  
7368  	if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
7369  	    (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
7370  		return 0;
7371  
7372  	if (ioc_state & MPI2_DOORBELL_USED) {
7373  		dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
7374  		goto issue_diag_reset;
7375  	}
7376  
7377  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
7378  		mpt3sas_print_fault_code(ioc, ioc_state &
7379  		    MPI2_DOORBELL_DATA_MASK);
7380  		goto issue_diag_reset;
7381  	} else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
7382  	    MPI2_IOC_STATE_COREDUMP) {
7383  		ioc_info(ioc,
7384  		    "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
7385  		    __func__, ioc_state);
7386  		return -EFAULT;
7387  	}
7388  
7389  	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
7390  	if (ioc_state) {
7391  		dfailprintk(ioc,
7392  			    ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7393  				     __func__, ioc_state));
7394  		return -EFAULT;
7395  	}
7396  
7397  	return 0;
7398  
7399  issue_diag_reset:
7400  	rc = _base_diag_reset(ioc);
7401  	return rc;
7402  }
7403  
7404  /**
7405   * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
7406   * @ioc: per adapter object
7407   *
7408   * Return: 0 for success, non-zero for failure.
7409   */
7410  static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER * ioc)7411  _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
7412  {
7413  	Mpi2IOCFactsRequest_t mpi_request;
7414  	Mpi2IOCFactsReply_t mpi_reply;
7415  	struct mpt3sas_facts *facts;
7416  	int mpi_reply_sz, mpi_request_sz, r;
7417  
7418  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7419  
7420  	r = _base_wait_for_iocstate(ioc, 10);
7421  	if (r) {
7422  		dfailprintk(ioc,
7423  			    ioc_info(ioc, "%s: failed getting to correct state\n",
7424  				     __func__));
7425  		return r;
7426  	}
7427  	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
7428  	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
7429  	memset(&mpi_request, 0, mpi_request_sz);
7430  	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
7431  	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7432  	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
7433  
7434  	if (r != 0) {
7435  		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7436  		return r;
7437  	}
7438  
7439  	facts = &ioc->facts;
7440  	memset(facts, 0, sizeof(struct mpt3sas_facts));
7441  	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
7442  	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
7443  	facts->VP_ID = mpi_reply.VP_ID;
7444  	facts->VF_ID = mpi_reply.VF_ID;
7445  	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
7446  	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
7447  	facts->WhoInit = mpi_reply.WhoInit;
7448  	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
7449  	facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
7450  	if (ioc->msix_enable && (facts->MaxMSIxVectors <=
7451  	    MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
7452  		ioc->combined_reply_queue = 0;
7453  	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
7454  	facts->MaxReplyDescriptorPostQueueDepth =
7455  	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
7456  	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
7457  	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
7458  	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
7459  		ioc->ir_firmware = 1;
7460  	if ((facts->IOCCapabilities &
7461  	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
7462  		ioc->rdpq_array_capable = 1;
7463  	if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
7464  	    && ioc->is_aero_ioc)
7465  		ioc->atomic_desc_capable = 1;
7466  	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
7467  	facts->IOCRequestFrameSize =
7468  	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
7469  	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7470  		facts->IOCMaxChainSegmentSize =
7471  			le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
7472  	}
7473  	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
7474  	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
7475  	ioc->shost->max_id = -1;
7476  	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
7477  	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
7478  	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
7479  	facts->HighPriorityCredit =
7480  	    le16_to_cpu(mpi_reply.HighPriorityCredit);
7481  	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
7482  	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
7483  	facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
7484  
7485  	/*
7486  	 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
7487  	 */
7488  	ioc->page_size = 1 << facts->CurrentHostPageSize;
7489  	if (ioc->page_size == 1) {
7490  		ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7491  		ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
7492  	}
7493  	dinitprintk(ioc,
7494  		    ioc_info(ioc, "CurrentHostPageSize(%d)\n",
7495  			     facts->CurrentHostPageSize));
7496  
7497  	dinitprintk(ioc,
7498  		    ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
7499  			     facts->RequestCredit, facts->MaxChainDepth));
7500  	dinitprintk(ioc,
7501  		    ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
7502  			     facts->IOCRequestFrameSize * 4,
7503  			     facts->ReplyFrameSize * 4));
7504  	return 0;
7505  }
7506  
7507  /**
7508   * _base_send_ioc_init - send ioc_init to firmware
7509   * @ioc: per adapter object
7510   *
7511   * Return: 0 for success, non-zero for failure.
7512   */
7513  static int
_base_send_ioc_init(struct MPT3SAS_ADAPTER * ioc)7514  _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7515  {
7516  	Mpi2IOCInitRequest_t mpi_request;
7517  	Mpi2IOCInitReply_t mpi_reply;
7518  	int i, r = 0;
7519  	ktime_t current_time;
7520  	u16 ioc_status;
7521  	u32 reply_post_free_array_sz = 0;
7522  
7523  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7524  
7525  	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
7526  	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
7527  	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
7528  	mpi_request.VF_ID = 0; /* TODO */
7529  	mpi_request.VP_ID = 0;
7530  	mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7531  	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
7532  	mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
7533  
7534  	if (_base_is_controller_msix_enabled(ioc))
7535  		mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7536  	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7537  	mpi_request.ReplyDescriptorPostQueueDepth =
7538  	    cpu_to_le16(ioc->reply_post_queue_depth);
7539  	mpi_request.ReplyFreeQueueDepth =
7540  	    cpu_to_le16(ioc->reply_free_queue_depth);
7541  
7542  	mpi_request.SenseBufferAddressHigh =
7543  	    cpu_to_le32((u64)ioc->sense_dma >> 32);
7544  	mpi_request.SystemReplyAddressHigh =
7545  	    cpu_to_le32((u64)ioc->reply_dma >> 32);
7546  	mpi_request.SystemRequestFrameBaseAddress =
7547  	    cpu_to_le64((u64)ioc->request_dma);
7548  	mpi_request.ReplyFreeQueueAddress =
7549  	    cpu_to_le64((u64)ioc->reply_free_dma);
7550  
7551  	if (ioc->rdpq_array_enable) {
7552  		reply_post_free_array_sz = ioc->reply_queue_count *
7553  		    sizeof(Mpi2IOCInitRDPQArrayEntry);
7554  		memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7555  		for (i = 0; i < ioc->reply_queue_count; i++)
7556  			ioc->reply_post_free_array[i].RDPQBaseAddress =
7557  			    cpu_to_le64(
7558  				(u64)ioc->reply_post[i].reply_post_free_dma);
7559  		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
7560  		mpi_request.ReplyDescriptorPostQueueAddress =
7561  		    cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7562  	} else {
7563  		mpi_request.ReplyDescriptorPostQueueAddress =
7564  		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7565  	}
7566  
7567  	/*
7568  	 * Set the flag to enable CoreDump state feature in IOC firmware.
7569  	 */
7570  	mpi_request.ConfigurationFlags |=
7571  	    cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE);
7572  
7573  	/* This time stamp specifies number of milliseconds
7574  	 * since epoch ~ midnight January 1, 1970.
7575  	 */
7576  	current_time = ktime_get_real();
7577  	mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
7578  
7579  	if (ioc->logging_level & MPT_DEBUG_INIT) {
7580  		__le32 *mfp;
7581  		int i;
7582  
7583  		mfp = (__le32 *)&mpi_request;
7584  		ioc_info(ioc, "\toffset:data\n");
7585  		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
7586  			ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7587  			    le32_to_cpu(mfp[i]));
7588  	}
7589  
7590  	r = _base_handshake_req_reply_wait(ioc,
7591  	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7592  	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7593  
7594  	if (r != 0) {
7595  		ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7596  		return r;
7597  	}
7598  
7599  	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
7600  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
7601  	    mpi_reply.IOCLogInfo) {
7602  		ioc_err(ioc, "%s: failed\n", __func__);
7603  		r = -EIO;
7604  	}
7605  
7606  	/* Reset TimeSync Counter*/
7607  	ioc->timestamp_update_count = 0;
7608  	return r;
7609  }
7610  
7611  /**
7612   * mpt3sas_port_enable_done - command completion routine for port enable
7613   * @ioc: per adapter object
7614   * @smid: system request message index
7615   * @msix_index: MSIX table index supplied by the OS
7616   * @reply: reply message frame(lower 32bit addr)
7617   *
7618   * Return: 1 meaning mf should be freed from _base_interrupt
7619   *          0 means the mf is freed from this function.
7620   */
7621  u8
mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER * ioc,u16 smid,u8 msix_index,u32 reply)7622  mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7623  	u32 reply)
7624  {
7625  	MPI2DefaultReply_t *mpi_reply;
7626  	u16 ioc_status;
7627  
7628  	if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7629  		return 1;
7630  
7631  	mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7632  	if (!mpi_reply)
7633  		return 1;
7634  
7635  	if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
7636  		return 1;
7637  
7638  	ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7639  	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7640  	ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7641  	memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7642  	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7643  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
7644  		ioc->port_enable_failed = 1;
7645  
7646  	if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
7647  		ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
7648  		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
7649  			mpt3sas_port_enable_complete(ioc);
7650  			return 1;
7651  		} else {
7652  			ioc->start_scan_failed = ioc_status;
7653  			ioc->start_scan = 0;
7654  			return 1;
7655  		}
7656  	}
7657  	complete(&ioc->port_enable_cmds.done);
7658  	return 1;
7659  }
7660  
7661  /**
7662   * _base_send_port_enable - send port_enable(discovery stuff) to firmware
7663   * @ioc: per adapter object
7664   *
7665   * Return: 0 for success, non-zero for failure.
7666   */
7667  static int
_base_send_port_enable(struct MPT3SAS_ADAPTER * ioc)7668  _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7669  {
7670  	Mpi2PortEnableRequest_t *mpi_request;
7671  	Mpi2PortEnableReply_t *mpi_reply;
7672  	int r = 0;
7673  	u16 smid;
7674  	u16 ioc_status;
7675  
7676  	ioc_info(ioc, "sending port enable !!\n");
7677  
7678  	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7679  		ioc_err(ioc, "%s: internal command already in use\n", __func__);
7680  		return -EAGAIN;
7681  	}
7682  
7683  	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7684  	if (!smid) {
7685  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7686  		return -EAGAIN;
7687  	}
7688  
7689  	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7690  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7691  	ioc->port_enable_cmds.smid = smid;
7692  	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7693  	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7694  
7695  	init_completion(&ioc->port_enable_cmds.done);
7696  	ioc->put_smid_default(ioc, smid);
7697  	wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7698  	if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7699  		ioc_err(ioc, "%s: timeout\n", __func__);
7700  		_debug_dump_mf(mpi_request,
7701  		    sizeof(Mpi2PortEnableRequest_t)/4);
7702  		if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7703  			r = -EFAULT;
7704  		else
7705  			r = -ETIME;
7706  		goto out;
7707  	}
7708  
7709  	mpi_reply = ioc->port_enable_cmds.reply;
7710  	ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
7711  	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7712  		ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7713  			__func__, ioc_status);
7714  		r = -EFAULT;
7715  		goto out;
7716  	}
7717  
7718   out:
7719  	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7720  	ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7721  	return r;
7722  }
7723  
7724  /**
7725   * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
7726   * @ioc: per adapter object
7727   *
7728   * Return: 0 for success, non-zero for failure.
7729   */
7730  int
mpt3sas_port_enable(struct MPT3SAS_ADAPTER * ioc)7731  mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7732  {
7733  	Mpi2PortEnableRequest_t *mpi_request;
7734  	u16 smid;
7735  
7736  	ioc_info(ioc, "sending port enable !!\n");
7737  
7738  	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7739  		ioc_err(ioc, "%s: internal command already in use\n", __func__);
7740  		return -EAGAIN;
7741  	}
7742  
7743  	smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7744  	if (!smid) {
7745  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7746  		return -EAGAIN;
7747  	}
7748  	ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
7749  	ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7750  	ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
7751  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7752  	ioc->port_enable_cmds.smid = smid;
7753  	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
7754  	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
7755  
7756  	ioc->put_smid_default(ioc, smid);
7757  	return 0;
7758  }
7759  
7760  /**
7761   * _base_determine_wait_on_discovery - desposition
7762   * @ioc: per adapter object
7763   *
7764   * Decide whether to wait on discovery to complete. Used to either
7765   * locate boot device, or report volumes ahead of physical devices.
7766   *
7767   * Return: 1 for wait, 0 for don't wait.
7768   */
7769  static int
_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER * ioc)7770  _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7771  {
7772  	/* We wait for discovery to complete if IR firmware is loaded.
7773  	 * The sas topology events arrive before PD events, so we need time to
7774  	 * turn on the bit in ioc->pd_handles to indicate PD
7775  	 * Also, it maybe required to report Volumes ahead of physical
7776  	 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
7777  	 */
7778  	if (ioc->ir_firmware)
7779  		return 1;
7780  
7781  	/* if no Bios, then we don't need to wait */
7782  	if (!ioc->bios_pg3.BiosVersion)
7783  		return 0;
7784  
7785  	/* Bios is present, then we drop down here.
7786  	 *
7787  	 * If there any entries in the Bios Page 2, then we wait
7788  	 * for discovery to complete.
7789  	 */
7790  
7791  	/* Current Boot Device */
7792  	if ((ioc->bios_pg2.CurrentBootDeviceForm &
7793  	    MPI2_BIOSPAGE2_FORM_MASK) ==
7794  	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7795  	/* Request Boot Device */
7796  	   (ioc->bios_pg2.ReqBootDeviceForm &
7797  	    MPI2_BIOSPAGE2_FORM_MASK) ==
7798  	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
7799  	/* Alternate Request Boot Device */
7800  	   (ioc->bios_pg2.ReqAltBootDeviceForm &
7801  	    MPI2_BIOSPAGE2_FORM_MASK) ==
7802  	    MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
7803  		return 0;
7804  
7805  	return 1;
7806  }
7807  
7808  /**
7809   * _base_unmask_events - turn on notification for this event
7810   * @ioc: per adapter object
7811   * @event: firmware event
7812   *
7813   * The mask is stored in ioc->event_masks.
7814   */
7815  static void
_base_unmask_events(struct MPT3SAS_ADAPTER * ioc,u16 event)7816  _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7817  {
7818  	u32 desired_event;
7819  
7820  	if (event >= 128)
7821  		return;
7822  
7823  	desired_event = (1 << (event % 32));
7824  
7825  	if (event < 32)
7826  		ioc->event_masks[0] &= ~desired_event;
7827  	else if (event < 64)
7828  		ioc->event_masks[1] &= ~desired_event;
7829  	else if (event < 96)
7830  		ioc->event_masks[2] &= ~desired_event;
7831  	else if (event < 128)
7832  		ioc->event_masks[3] &= ~desired_event;
7833  }
7834  
7835  /**
7836   * _base_event_notification - send event notification
7837   * @ioc: per adapter object
7838   *
7839   * Return: 0 for success, non-zero for failure.
7840   */
7841  static int
_base_event_notification(struct MPT3SAS_ADAPTER * ioc)7842  _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7843  {
7844  	Mpi2EventNotificationRequest_t *mpi_request;
7845  	u16 smid;
7846  	int r = 0;
7847  	int i, issue_diag_reset = 0;
7848  
7849  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7850  
7851  	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7852  		ioc_err(ioc, "%s: internal command already in use\n", __func__);
7853  		return -EAGAIN;
7854  	}
7855  
7856  	smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7857  	if (!smid) {
7858  		ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7859  		return -EAGAIN;
7860  	}
7861  	ioc->base_cmds.status = MPT3_CMD_PENDING;
7862  	mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7863  	ioc->base_cmds.smid = smid;
7864  	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
7865  	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
7866  	mpi_request->VF_ID = 0; /* TODO */
7867  	mpi_request->VP_ID = 0;
7868  	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7869  		mpi_request->EventMasks[i] =
7870  		    cpu_to_le32(ioc->event_masks[i]);
7871  	init_completion(&ioc->base_cmds.done);
7872  	ioc->put_smid_default(ioc, smid);
7873  	wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7874  	if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7875  		ioc_err(ioc, "%s: timeout\n", __func__);
7876  		_debug_dump_mf(mpi_request,
7877  		    sizeof(Mpi2EventNotificationRequest_t)/4);
7878  		if (ioc->base_cmds.status & MPT3_CMD_RESET)
7879  			r = -EFAULT;
7880  		else
7881  			issue_diag_reset = 1;
7882  
7883  	} else
7884  		dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7885  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7886  
7887  	if (issue_diag_reset) {
7888  		if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
7889  			return -EFAULT;
7890  		if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
7891  			return -EFAULT;
7892  		r = -EAGAIN;
7893  	}
7894  	return r;
7895  }
7896  
7897  /**
7898   * mpt3sas_base_validate_event_type - validating event types
7899   * @ioc: per adapter object
7900   * @event_type: firmware event
7901   *
7902   * This will turn on firmware event notification when application
7903   * ask for that event. We don't mask events that are already enabled.
7904   */
7905  void
mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER * ioc,u32 * event_type)7906  mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7907  {
7908  	int i, j;
7909  	u32 event_mask, desired_event;
7910  	u8 send_update_to_fw;
7911  
7912  	for (i = 0, send_update_to_fw = 0; i <
7913  	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
7914  		event_mask = ~event_type[i];
7915  		desired_event = 1;
7916  		for (j = 0; j < 32; j++) {
7917  			if (!(event_mask & desired_event) &&
7918  			    (ioc->event_masks[i] & desired_event)) {
7919  				ioc->event_masks[i] &= ~desired_event;
7920  				send_update_to_fw = 1;
7921  			}
7922  			desired_event = (desired_event << 1);
7923  		}
7924  	}
7925  
7926  	if (!send_update_to_fw)
7927  		return;
7928  
7929  	mutex_lock(&ioc->base_cmds.mutex);
7930  	_base_event_notification(ioc);
7931  	mutex_unlock(&ioc->base_cmds.mutex);
7932  }
7933  
7934  /**
7935  * mpt3sas_base_unlock_and_get_host_diagnostic- enable Host Diagnostic Register writes
7936  * @ioc: per adapter object
7937  * @host_diagnostic: host diagnostic register content
7938  *
7939  * Return: 0 for success, non-zero for failure.
7940  */
7941  
7942  int
mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER * ioc,u32 * host_diagnostic)7943  mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc,
7944  	u32 *host_diagnostic)
7945  {
7946  
7947  	u32 count;
7948  	*host_diagnostic = 0;
7949  	count = 0;
7950  
7951  	do {
7952  		/* Write magic sequence to WriteSequence register
7953  		 * Loop until in diagnostic mode
7954  		 */
7955  		drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7956  		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7957  		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7958  		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7959  		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7960  		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7961  		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7962  		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7963  
7964  		/* wait 100 msec */
7965  		msleep(100);
7966  
7967  		if (count++ > 20) {
7968  			ioc_info(ioc,
7969  				    "Stop writing magic sequence after 20 retries\n");
7970  			_base_dump_reg_set(ioc);
7971  			return -EFAULT;
7972  		}
7973  
7974  		*host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
7975  		drsprintk(ioc,
7976  			     ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7977  				     count, *host_diagnostic));
7978  
7979  	} while ((*host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
7980  	return 0;
7981  }
7982  
7983  /**
7984   * mpt3sas_base_lock_host_diagnostic: Disable Host Diagnostic Register writes
7985   * @ioc: per adapter object
7986   */
7987  
7988  void
mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER * ioc)7989  mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc)
7990  {
7991  	drsprintk(ioc, ioc_info(ioc, "disable writes to the diagnostic register\n"));
7992  	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7993  }
7994  
7995  /**
7996   * _base_diag_reset - the "big hammer" start of day reset
7997   * @ioc: per adapter object
7998   *
7999   * Return: 0 for success, non-zero for failure.
8000   */
8001  static int
_base_diag_reset(struct MPT3SAS_ADAPTER * ioc)8002  _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
8003  {
8004  	u32 host_diagnostic;
8005  	u32 ioc_state;
8006  	u32 count;
8007  	u32 hcb_size;
8008  
8009  	ioc_info(ioc, "sending diag reset !!\n");
8010  
8011  	pci_cfg_access_lock(ioc->pdev);
8012  
8013  	drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
8014  
8015  	mutex_lock(&ioc->hostdiag_unlock_mutex);
8016  	if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
8017  		goto out;
8018  
8019  	hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
8020  	drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
8021  	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
8022  	     &ioc->chip->HostDiagnostic);
8023  
8024  	/* This delay allows the chip PCIe hardware time to finish reset tasks */
8025  	msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
8026  
8027  	/* Approximately 300 second max wait */
8028  	for (count = 0; count < (300000000 /
8029  	    MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
8030  
8031  		host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
8032  
8033  		if (host_diagnostic == 0xFFFFFFFF) {
8034  			ioc_info(ioc,
8035  			    "Invalid host diagnostic register value\n");
8036  			_base_dump_reg_set(ioc);
8037  			goto out;
8038  		}
8039  		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
8040  			break;
8041  
8042  		/* Wait to pass the second read delay window */
8043  		msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC/1000);
8044  	}
8045  
8046  	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
8047  
8048  		drsprintk(ioc,
8049  			ioc_info(ioc, "restart the adapter assuming the\n"
8050  					"HCB Address points to good F/W\n"));
8051  		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
8052  		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
8053  		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
8054  
8055  		drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
8056  		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
8057  		    &ioc->chip->HCBSize);
8058  	}
8059  
8060  	drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
8061  	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
8062  	    &ioc->chip->HostDiagnostic);
8063  
8064  	mpt3sas_base_lock_host_diagnostic(ioc);
8065  	mutex_unlock(&ioc->hostdiag_unlock_mutex);
8066  
8067  	drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
8068  	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
8069  	if (ioc_state) {
8070  		ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
8071  			__func__, ioc_state);
8072  		_base_dump_reg_set(ioc);
8073  		goto out;
8074  	}
8075  
8076  	pci_cfg_access_unlock(ioc->pdev);
8077  	ioc_info(ioc, "diag reset: SUCCESS\n");
8078  	return 0;
8079  
8080   out:
8081  	pci_cfg_access_unlock(ioc->pdev);
8082  	ioc_err(ioc, "diag reset: FAILED\n");
8083  	mutex_unlock(&ioc->hostdiag_unlock_mutex);
8084  	return -EFAULT;
8085  }
8086  
8087  /**
8088   * mpt3sas_base_make_ioc_ready - put controller in READY state
8089   * @ioc: per adapter object
8090   * @type: FORCE_BIG_HAMMER or SOFT_RESET
8091   *
8092   * Return: 0 for success, non-zero for failure.
8093   */
8094  int
mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER * ioc,enum reset_type type)8095  mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
8096  {
8097  	u32 ioc_state;
8098  	int rc;
8099  	int count;
8100  
8101  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8102  
8103  	if (ioc->pci_error_recovery)
8104  		return 0;
8105  
8106  	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8107  	dhsprintk(ioc,
8108  		  ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
8109  			   __func__, ioc_state));
8110  
8111  	/* if in RESET state, it should move to READY state shortly */
8112  	count = 0;
8113  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
8114  		while ((ioc_state & MPI2_IOC_STATE_MASK) !=
8115  		    MPI2_IOC_STATE_READY) {
8116  			if (count++ == 10) {
8117  				ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
8118  					__func__, ioc_state);
8119  				return -EFAULT;
8120  			}
8121  			ssleep(1);
8122  			ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8123  		}
8124  	}
8125  
8126  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
8127  		return 0;
8128  
8129  	if (ioc_state & MPI2_DOORBELL_USED) {
8130  		ioc_info(ioc, "unexpected doorbell active!\n");
8131  		goto issue_diag_reset;
8132  	}
8133  
8134  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
8135  		mpt3sas_print_fault_code(ioc, ioc_state &
8136  		    MPI2_DOORBELL_DATA_MASK);
8137  		goto issue_diag_reset;
8138  	}
8139  
8140  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) {
8141  		/*
8142  		 * if host reset is invoked while watch dog thread is waiting
8143  		 * for IOC state to be changed to Fault state then driver has
8144  		 * to wait here for CoreDump state to clear otherwise reset
8145  		 * will be issued to the FW and FW move the IOC state to
8146  		 * reset state without copying the FW logs to coredump region.
8147  		 */
8148  		if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
8149  			mpt3sas_print_coredump_info(ioc, ioc_state &
8150  			    MPI2_DOORBELL_DATA_MASK);
8151  			mpt3sas_base_wait_for_coredump_completion(ioc,
8152  			    __func__);
8153  		}
8154  		goto issue_diag_reset;
8155  	}
8156  
8157  	if (type == FORCE_BIG_HAMMER)
8158  		goto issue_diag_reset;
8159  
8160  	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
8161  		if (!(_base_send_ioc_reset(ioc,
8162  		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
8163  			return 0;
8164  	}
8165  
8166   issue_diag_reset:
8167  	rc = _base_diag_reset(ioc);
8168  	return rc;
8169  }
8170  
8171  /**
8172   * _base_make_ioc_operational - put controller in OPERATIONAL state
8173   * @ioc: per adapter object
8174   *
8175   * Return: 0 for success, non-zero for failure.
8176   */
8177  static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER * ioc)8178  _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
8179  {
8180  	int r, i, index, rc;
8181  	unsigned long	flags;
8182  	u32 reply_address;
8183  	u16 smid;
8184  	struct _tr_list *delayed_tr, *delayed_tr_next;
8185  	struct _sc_list *delayed_sc, *delayed_sc_next;
8186  	struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
8187  	u8 hide_flag;
8188  	struct adapter_reply_queue *reply_q;
8189  	Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
8190  
8191  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8192  
8193  	/* clean the delayed target reset list */
8194  	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
8195  	    &ioc->delayed_tr_list, list) {
8196  		list_del(&delayed_tr->list);
8197  		kfree(delayed_tr);
8198  	}
8199  
8200  
8201  	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
8202  	    &ioc->delayed_tr_volume_list, list) {
8203  		list_del(&delayed_tr->list);
8204  		kfree(delayed_tr);
8205  	}
8206  
8207  	list_for_each_entry_safe(delayed_sc, delayed_sc_next,
8208  	    &ioc->delayed_sc_list, list) {
8209  		list_del(&delayed_sc->list);
8210  		kfree(delayed_sc);
8211  	}
8212  
8213  	list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
8214  	    &ioc->delayed_event_ack_list, list) {
8215  		list_del(&delayed_event_ack->list);
8216  		kfree(delayed_event_ack);
8217  	}
8218  
8219  	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8220  
8221  	/* hi-priority queue */
8222  	INIT_LIST_HEAD(&ioc->hpr_free_list);
8223  	smid = ioc->hi_priority_smid;
8224  	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
8225  		ioc->hpr_lookup[i].cb_idx = 0xFF;
8226  		ioc->hpr_lookup[i].smid = smid;
8227  		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
8228  		    &ioc->hpr_free_list);
8229  	}
8230  
8231  	/* internal queue */
8232  	INIT_LIST_HEAD(&ioc->internal_free_list);
8233  	smid = ioc->internal_smid;
8234  	for (i = 0; i < ioc->internal_depth; i++, smid++) {
8235  		ioc->internal_lookup[i].cb_idx = 0xFF;
8236  		ioc->internal_lookup[i].smid = smid;
8237  		list_add_tail(&ioc->internal_lookup[i].tracker_list,
8238  		    &ioc->internal_free_list);
8239  	}
8240  
8241  	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8242  
8243  	/* initialize Reply Free Queue */
8244  	for (i = 0, reply_address = (u32)ioc->reply_dma ;
8245  	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
8246  	    ioc->reply_sz) {
8247  		ioc->reply_free[i] = cpu_to_le32(reply_address);
8248  		if (ioc->is_mcpu_endpoint)
8249  			_base_clone_reply_to_sys_mem(ioc,
8250  					reply_address, i);
8251  	}
8252  
8253  	/* initialize reply queues */
8254  	if (ioc->is_driver_loading)
8255  		_base_assign_reply_queues(ioc);
8256  
8257  	/* initialize Reply Post Free Queue */
8258  	index = 0;
8259  	reply_post_free_contig = ioc->reply_post[0].reply_post_free;
8260  	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8261  		/*
8262  		 * If RDPQ is enabled, switch to the next allocation.
8263  		 * Otherwise advance within the contiguous region.
8264  		 */
8265  		if (ioc->rdpq_array_enable) {
8266  			reply_q->reply_post_free =
8267  				ioc->reply_post[index++].reply_post_free;
8268  		} else {
8269  			reply_q->reply_post_free = reply_post_free_contig;
8270  			reply_post_free_contig += ioc->reply_post_queue_depth;
8271  		}
8272  
8273  		reply_q->reply_post_host_index = 0;
8274  		for (i = 0; i < ioc->reply_post_queue_depth; i++)
8275  			reply_q->reply_post_free[i].Words =
8276  			    cpu_to_le64(ULLONG_MAX);
8277  		if (!_base_is_controller_msix_enabled(ioc))
8278  			goto skip_init_reply_post_free_queue;
8279  	}
8280   skip_init_reply_post_free_queue:
8281  
8282  	r = _base_send_ioc_init(ioc);
8283  	if (r) {
8284  		/*
8285  		 * No need to check IOC state for fault state & issue
8286  		 * diag reset during host reset. This check is need
8287  		 * only during driver load time.
8288  		 */
8289  		if (!ioc->is_driver_loading)
8290  			return r;
8291  
8292  		rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8293  		if (rc || (_base_send_ioc_init(ioc)))
8294  			return r;
8295  	}
8296  
8297  	/* initialize reply free host index */
8298  	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
8299  	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
8300  
8301  	/* initialize reply post host index */
8302  	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8303  		if (ioc->combined_reply_queue)
8304  			writel((reply_q->msix_index & 7)<<
8305  			   MPI2_RPHI_MSIX_INDEX_SHIFT,
8306  			   ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
8307  		else
8308  			writel(reply_q->msix_index <<
8309  				MPI2_RPHI_MSIX_INDEX_SHIFT,
8310  				&ioc->chip->ReplyPostHostIndex);
8311  
8312  		if (!_base_is_controller_msix_enabled(ioc))
8313  			goto skip_init_reply_post_host_index;
8314  	}
8315  
8316   skip_init_reply_post_host_index:
8317  
8318  	mpt3sas_base_unmask_interrupts(ioc);
8319  
8320  	if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8321  		r = _base_display_fwpkg_version(ioc);
8322  		if (r)
8323  			return r;
8324  	}
8325  
8326  	r = _base_static_config_pages(ioc);
8327  	if (r)
8328  		return r;
8329  
8330  	r = _base_event_notification(ioc);
8331  	if (r)
8332  		return r;
8333  
8334  	if (!ioc->shost_recovery) {
8335  
8336  		if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
8337  		    == 0x80) {
8338  			hide_flag = (u8) (
8339  			    le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
8340  			    MFG_PAGE10_HIDE_SSDS_MASK);
8341  			if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
8342  				ioc->mfg_pg10_hide_flag = hide_flag;
8343  		}
8344  
8345  		ioc->wait_for_discovery_to_complete =
8346  		    _base_determine_wait_on_discovery(ioc);
8347  
8348  		return r; /* scan_start and scan_finished support */
8349  	}
8350  
8351  	r = _base_send_port_enable(ioc);
8352  	if (r)
8353  		return r;
8354  
8355  	return r;
8356  }
8357  
8358  /**
8359   * mpt3sas_base_free_resources - free resources controller resources
8360   * @ioc: per adapter object
8361   */
8362  void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER * ioc)8363  mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
8364  {
8365  	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8366  
8367  	/* synchronizing freeing resource with pci_access_mutex lock */
8368  	mutex_lock(&ioc->pci_access_mutex);
8369  	if (ioc->chip_phys && ioc->chip) {
8370  		mpt3sas_base_mask_interrupts(ioc);
8371  		ioc->shost_recovery = 1;
8372  		mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8373  		ioc->shost_recovery = 0;
8374  	}
8375  
8376  	mpt3sas_base_unmap_resources(ioc);
8377  	mutex_unlock(&ioc->pci_access_mutex);
8378  	return;
8379  }
8380  
8381  /**
8382   * mpt3sas_base_attach - attach controller instance
8383   * @ioc: per adapter object
8384   *
8385   * Return: 0 for success, non-zero for failure.
8386   */
8387  int
mpt3sas_base_attach(struct MPT3SAS_ADAPTER * ioc)8388  mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
8389  {
8390  	int r, i, rc;
8391  	int cpu_id, last_cpu_id = 0;
8392  
8393  	dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8394  
8395  	/* setup cpu_msix_table */
8396  	ioc->cpu_count = num_online_cpus();
8397  	for_each_online_cpu(cpu_id)
8398  		last_cpu_id = cpu_id;
8399  	ioc->cpu_msix_table_sz = last_cpu_id + 1;
8400  	ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
8401  	ioc->reply_queue_count = 1;
8402  	if (!ioc->cpu_msix_table) {
8403  		ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
8404  		r = -ENOMEM;
8405  		goto out_free_resources;
8406  	}
8407  
8408  	if (ioc->is_warpdrive) {
8409  		ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
8410  		    sizeof(resource_size_t *), GFP_KERNEL);
8411  		if (!ioc->reply_post_host_index) {
8412  			ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
8413  			r = -ENOMEM;
8414  			goto out_free_resources;
8415  		}
8416  	}
8417  
8418  	ioc->smp_affinity_enable = smp_affinity_enable;
8419  
8420  	ioc->rdpq_array_enable_assigned = 0;
8421  	ioc->use_32bit_dma = false;
8422  	ioc->dma_mask = 64;
8423  	if (ioc->is_aero_ioc) {
8424  		ioc->base_readl = &_base_readl_aero;
8425  		ioc->base_readl_ext_retry = &_base_readl_ext_retry;
8426  	} else {
8427  		ioc->base_readl = &_base_readl;
8428  		ioc->base_readl_ext_retry = &_base_readl;
8429  	}
8430  	r = mpt3sas_base_map_resources(ioc);
8431  	if (r)
8432  		goto out_free_resources;
8433  
8434  	pci_set_drvdata(ioc->pdev, ioc->shost);
8435  	r = _base_get_ioc_facts(ioc);
8436  	if (r) {
8437  		rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8438  		if (rc || (_base_get_ioc_facts(ioc)))
8439  			goto out_free_resources;
8440  	}
8441  
8442  	switch (ioc->hba_mpi_version_belonged) {
8443  	case MPI2_VERSION:
8444  		ioc->build_sg_scmd = &_base_build_sg_scmd;
8445  		ioc->build_sg = &_base_build_sg;
8446  		ioc->build_zero_len_sge = &_base_build_zero_len_sge;
8447  		ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8448  		break;
8449  	case MPI25_VERSION:
8450  	case MPI26_VERSION:
8451  		/*
8452  		 * In SAS3.0,
8453  		 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
8454  		 * Target Status - all require the IEEE formatted scatter gather
8455  		 * elements.
8456  		 */
8457  		ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
8458  		ioc->build_sg = &_base_build_sg_ieee;
8459  		ioc->build_nvme_prp = &_base_build_nvme_prp;
8460  		ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
8461  		ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
8462  		if (ioc->high_iops_queues)
8463  			ioc->get_msix_index_for_smlio =
8464  					&_base_get_high_iops_msix_index;
8465  		else
8466  			ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8467  		break;
8468  	}
8469  	if (ioc->atomic_desc_capable) {
8470  		ioc->put_smid_default = &_base_put_smid_default_atomic;
8471  		ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
8472  		ioc->put_smid_fast_path =
8473  				&_base_put_smid_fast_path_atomic;
8474  		ioc->put_smid_hi_priority =
8475  				&_base_put_smid_hi_priority_atomic;
8476  	} else {
8477  		ioc->put_smid_default = &_base_put_smid_default;
8478  		ioc->put_smid_fast_path = &_base_put_smid_fast_path;
8479  		ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
8480  		if (ioc->is_mcpu_endpoint)
8481  			ioc->put_smid_scsi_io =
8482  				&_base_put_smid_mpi_ep_scsi_io;
8483  		else
8484  			ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
8485  	}
8486  	/*
8487  	 * These function pointers for other requests that don't
8488  	 * the require IEEE scatter gather elements.
8489  	 *
8490  	 * For example Configuration Pages and SAS IOUNIT Control don't.
8491  	 */
8492  	ioc->build_sg_mpi = &_base_build_sg;
8493  	ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
8494  
8495  	r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8496  	if (r)
8497  		goto out_free_resources;
8498  
8499  	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
8500  	    sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
8501  	if (!ioc->pfacts) {
8502  		r = -ENOMEM;
8503  		goto out_free_resources;
8504  	}
8505  
8506  	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
8507  		r = _base_get_port_facts(ioc, i);
8508  		if (r) {
8509  			rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8510  			if (rc || (_base_get_port_facts(ioc, i)))
8511  				goto out_free_resources;
8512  		}
8513  	}
8514  
8515  	r = _base_allocate_memory_pools(ioc);
8516  	if (r)
8517  		goto out_free_resources;
8518  
8519  	if (irqpoll_weight > 0)
8520  		ioc->thresh_hold = irqpoll_weight;
8521  	else
8522  		ioc->thresh_hold = ioc->hba_queue_depth/4;
8523  
8524  	_base_init_irqpolls(ioc);
8525  	init_waitqueue_head(&ioc->reset_wq);
8526  
8527  	/* allocate memory pd handle bitmask list */
8528  	ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8529  	if (ioc->facts.MaxDevHandle % 8)
8530  		ioc->pd_handles_sz++;
8531  	/*
8532  	 * pd_handles_sz should have, at least, the minimal room for
8533  	 * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
8534  	 */
8535  	ioc->pd_handles_sz = ALIGN(ioc->pd_handles_sz, sizeof(unsigned long));
8536  
8537  	ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
8538  	    GFP_KERNEL);
8539  	if (!ioc->pd_handles) {
8540  		r = -ENOMEM;
8541  		goto out_free_resources;
8542  	}
8543  	ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
8544  	    GFP_KERNEL);
8545  	if (!ioc->blocking_handles) {
8546  		r = -ENOMEM;
8547  		goto out_free_resources;
8548  	}
8549  
8550  	/* allocate memory for pending OS device add list */
8551  	ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
8552  	if (ioc->facts.MaxDevHandle % 8)
8553  		ioc->pend_os_device_add_sz++;
8554  
8555  	/*
8556  	 * pend_os_device_add_sz should have, at least, the minimal room for
8557  	 * set_bit()/test_bit(), otherwise out-of-memory may occur.
8558  	 */
8559  	ioc->pend_os_device_add_sz = ALIGN(ioc->pend_os_device_add_sz,
8560  					   sizeof(unsigned long));
8561  	ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
8562  	    GFP_KERNEL);
8563  	if (!ioc->pend_os_device_add) {
8564  		r = -ENOMEM;
8565  		goto out_free_resources;
8566  	}
8567  
8568  	ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
8569  	ioc->device_remove_in_progress =
8570  		kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
8571  	if (!ioc->device_remove_in_progress) {
8572  		r = -ENOMEM;
8573  		goto out_free_resources;
8574  	}
8575  
8576  	ioc->fwfault_debug = mpt3sas_fwfault_debug;
8577  
8578  	/* base internal command bits */
8579  	mutex_init(&ioc->base_cmds.mutex);
8580  	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8581  	ioc->base_cmds.status = MPT3_CMD_NOT_USED;
8582  
8583  	/* port_enable command bits */
8584  	ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8585  	ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
8586  
8587  	/* transport internal command bits */
8588  	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8589  	ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
8590  	mutex_init(&ioc->transport_cmds.mutex);
8591  
8592  	/* scsih internal command bits */
8593  	ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8594  	ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8595  	mutex_init(&ioc->scsih_cmds.mutex);
8596  
8597  	/* task management internal command bits */
8598  	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8599  	ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8600  	mutex_init(&ioc->tm_cmds.mutex);
8601  
8602  	/* config page internal command bits */
8603  	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8604  	ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8605  	mutex_init(&ioc->config_cmds.mutex);
8606  
8607  	/* ctl module internal command bits */
8608  	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8609  	ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8610  	ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8611  	mutex_init(&ioc->ctl_cmds.mutex);
8612  
8613  	if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8614  	    !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8615  	    !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8616  	    !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8617  		r = -ENOMEM;
8618  		goto out_free_resources;
8619  	}
8620  
8621  	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
8622  		ioc->event_masks[i] = -1;
8623  
8624  	/* here we enable the events we care about */
8625  	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8626  	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8627  	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8628  	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8629  	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8630  	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8631  	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8632  	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8633  	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8634  	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8635  	_base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8636  	_base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8637  	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8638  	if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8639  		if (ioc->is_gen35_ioc) {
8640  			_base_unmask_events(ioc,
8641  				MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
8642  			_base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8643  			_base_unmask_events(ioc,
8644  				MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
8645  		}
8646  	}
8647  	r = _base_make_ioc_operational(ioc);
8648  	if (r == -EAGAIN) {
8649  		r = _base_make_ioc_operational(ioc);
8650  		if (r)
8651  			goto out_free_resources;
8652  	}
8653  
8654  	/*
8655  	 * Copy current copy of IOCFacts in prev_fw_facts
8656  	 * and it will be used during online firmware upgrade.
8657  	 */
8658  	memcpy(&ioc->prev_fw_facts, &ioc->facts,
8659  	    sizeof(struct mpt3sas_facts));
8660  
8661  	ioc->non_operational_loop = 0;
8662  	ioc->ioc_coredump_loop = 0;
8663  	ioc->got_task_abort_from_ioctl = 0;
8664  	return 0;
8665  
8666   out_free_resources:
8667  
8668  	ioc->remove_host = 1;
8669  
8670  	mpt3sas_base_free_resources(ioc);
8671  	_base_release_memory_pools(ioc);
8672  	pci_set_drvdata(ioc->pdev, NULL);
8673  	kfree(ioc->cpu_msix_table);
8674  	if (ioc->is_warpdrive)
8675  		kfree(ioc->reply_post_host_index);
8676  	kfree(ioc->pd_handles);
8677  	kfree(ioc->blocking_handles);
8678  	kfree(ioc->device_remove_in_progress);
8679  	kfree(ioc->pend_os_device_add);
8680  	kfree(ioc->tm_cmds.reply);
8681  	kfree(ioc->transport_cmds.reply);
8682  	kfree(ioc->scsih_cmds.reply);
8683  	kfree(ioc->config_cmds.reply);
8684  	kfree(ioc->base_cmds.reply);
8685  	kfree(ioc->port_enable_cmds.reply);
8686  	kfree(ioc->ctl_cmds.reply);
8687  	kfree(ioc->ctl_cmds.sense);
8688  	kfree(ioc->pfacts);
8689  	ioc->ctl_cmds.reply = NULL;
8690  	ioc->base_cmds.reply = NULL;
8691  	ioc->tm_cmds.reply = NULL;
8692  	ioc->scsih_cmds.reply = NULL;
8693  	ioc->transport_cmds.reply = NULL;
8694  	ioc->config_cmds.reply = NULL;
8695  	ioc->pfacts = NULL;
8696  	return r;
8697  }
8698  
8699  
8700  /**
8701   * mpt3sas_base_detach - remove controller instance
8702   * @ioc: per adapter object
8703   */
8704  void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER * ioc)8705  mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8706  {
8707  	dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8708  
8709  	mpt3sas_base_stop_watchdog(ioc);
8710  	mpt3sas_base_free_resources(ioc);
8711  	_base_release_memory_pools(ioc);
8712  	mpt3sas_free_enclosure_list(ioc);
8713  	pci_set_drvdata(ioc->pdev, NULL);
8714  	kfree(ioc->cpu_msix_table);
8715  	if (ioc->is_warpdrive)
8716  		kfree(ioc->reply_post_host_index);
8717  	kfree(ioc->pd_handles);
8718  	kfree(ioc->blocking_handles);
8719  	kfree(ioc->device_remove_in_progress);
8720  	kfree(ioc->pend_os_device_add);
8721  	kfree(ioc->pfacts);
8722  	kfree(ioc->ctl_cmds.reply);
8723  	kfree(ioc->ctl_cmds.sense);
8724  	kfree(ioc->base_cmds.reply);
8725  	kfree(ioc->port_enable_cmds.reply);
8726  	kfree(ioc->tm_cmds.reply);
8727  	kfree(ioc->transport_cmds.reply);
8728  	kfree(ioc->scsih_cmds.reply);
8729  	kfree(ioc->config_cmds.reply);
8730  }
8731  
8732  /**
8733   * _base_pre_reset_handler - pre reset handler
8734   * @ioc: per adapter object
8735   */
_base_pre_reset_handler(struct MPT3SAS_ADAPTER * ioc)8736  static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8737  {
8738  	mpt3sas_scsih_pre_reset_handler(ioc);
8739  	mpt3sas_ctl_pre_reset_handler(ioc);
8740  	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8741  }
8742  
8743  /**
8744   * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
8745   * @ioc: per adapter object
8746   */
8747  static void
_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER * ioc)8748  _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8749  {
8750  	dtmprintk(ioc,
8751  	    ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8752  	if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8753  		ioc->transport_cmds.status |= MPT3_CMD_RESET;
8754  		mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8755  		complete(&ioc->transport_cmds.done);
8756  	}
8757  	if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8758  		ioc->base_cmds.status |= MPT3_CMD_RESET;
8759  		mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8760  		complete(&ioc->base_cmds.done);
8761  	}
8762  	if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8763  		ioc->port_enable_failed = 1;
8764  		ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8765  		mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8766  		if (ioc->is_driver_loading) {
8767  			ioc->start_scan_failed =
8768  				MPI2_IOCSTATUS_INTERNAL_ERROR;
8769  			ioc->start_scan = 0;
8770  		} else {
8771  			complete(&ioc->port_enable_cmds.done);
8772  		}
8773  	}
8774  	if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8775  		ioc->config_cmds.status |= MPT3_CMD_RESET;
8776  		mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8777  		ioc->config_cmds.smid = USHRT_MAX;
8778  		complete(&ioc->config_cmds.done);
8779  	}
8780  }
8781  
8782  /**
8783   * _base_clear_outstanding_commands - clear all outstanding commands
8784   * @ioc: per adapter object
8785   */
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER * ioc)8786  static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8787  {
8788  	mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8789  	mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8790  	_base_clear_outstanding_mpt_commands(ioc);
8791  }
8792  
8793  /**
8794   * _base_reset_done_handler - reset done handler
8795   * @ioc: per adapter object
8796   */
_base_reset_done_handler(struct MPT3SAS_ADAPTER * ioc)8797  static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8798  {
8799  	mpt3sas_scsih_reset_done_handler(ioc);
8800  	mpt3sas_ctl_reset_done_handler(ioc);
8801  	dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8802  }
8803  
8804  /**
8805   * mpt3sas_wait_for_commands_to_complete - reset controller
8806   * @ioc: Pointer to MPT_ADAPTER structure
8807   *
8808   * This function is waiting 10s for all pending commands to complete
8809   * prior to putting controller in reset.
8810   */
8811  void
mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER * ioc)8812  mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8813  {
8814  	u32 ioc_state;
8815  
8816  	ioc->pending_io_count = 0;
8817  
8818  	ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8819  	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
8820  		return;
8821  
8822  	/* pending command count */
8823  	ioc->pending_io_count = scsi_host_busy(ioc->shost);
8824  
8825  	if (!ioc->pending_io_count)
8826  		return;
8827  
8828  	/* wait for pending commands to complete */
8829  	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8830  }
8831  
8832  /**
8833   * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
8834   *     attributes during online firmware upgrade and update the corresponding
8835   *     IOC variables accordingly.
8836   *
8837   * @ioc: Pointer to MPT_ADAPTER structure
8838   */
8839  static int
_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER * ioc)8840  _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8841  {
8842  	u16 pd_handles_sz;
8843  	void *pd_handles = NULL, *blocking_handles = NULL;
8844  	void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
8845  	struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8846  
8847  	if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8848  		pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8849  		if (ioc->facts.MaxDevHandle % 8)
8850  			pd_handles_sz++;
8851  
8852  		/*
8853  		 * pd_handles should have, at least, the minimal room for
8854  		 * set_bit()/test_bit(), otherwise out-of-memory touch may
8855  		 * occur.
8856  		 */
8857  		pd_handles_sz = ALIGN(pd_handles_sz, sizeof(unsigned long));
8858  		pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8859  		    GFP_KERNEL);
8860  		if (!pd_handles) {
8861  			ioc_info(ioc,
8862  			    "Unable to allocate the memory for pd_handles of sz: %d\n",
8863  			    pd_handles_sz);
8864  			return -ENOMEM;
8865  		}
8866  		memset(pd_handles + ioc->pd_handles_sz, 0,
8867  		    (pd_handles_sz - ioc->pd_handles_sz));
8868  		ioc->pd_handles = pd_handles;
8869  
8870  		blocking_handles = krealloc(ioc->blocking_handles,
8871  		    pd_handles_sz, GFP_KERNEL);
8872  		if (!blocking_handles) {
8873  			ioc_info(ioc,
8874  			    "Unable to allocate the memory for "
8875  			    "blocking_handles of sz: %d\n",
8876  			    pd_handles_sz);
8877  			return -ENOMEM;
8878  		}
8879  		memset(blocking_handles + ioc->pd_handles_sz, 0,
8880  		    (pd_handles_sz - ioc->pd_handles_sz));
8881  		ioc->blocking_handles = blocking_handles;
8882  		ioc->pd_handles_sz = pd_handles_sz;
8883  
8884  		pend_os_device_add = krealloc(ioc->pend_os_device_add,
8885  		    pd_handles_sz, GFP_KERNEL);
8886  		if (!pend_os_device_add) {
8887  			ioc_info(ioc,
8888  			    "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8889  			    pd_handles_sz);
8890  			return -ENOMEM;
8891  		}
8892  		memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8893  		    (pd_handles_sz - ioc->pend_os_device_add_sz));
8894  		ioc->pend_os_device_add = pend_os_device_add;
8895  		ioc->pend_os_device_add_sz = pd_handles_sz;
8896  
8897  		device_remove_in_progress = krealloc(
8898  		    ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8899  		if (!device_remove_in_progress) {
8900  			ioc_info(ioc,
8901  			    "Unable to allocate the memory for device_remove_in_progress of sz: %d\n",
8902  			    pd_handles_sz);
8903  			return -ENOMEM;
8904  		}
8905  		memset(device_remove_in_progress +
8906  		    ioc->device_remove_in_progress_sz, 0,
8907  		    (pd_handles_sz - ioc->device_remove_in_progress_sz));
8908  		ioc->device_remove_in_progress = device_remove_in_progress;
8909  		ioc->device_remove_in_progress_sz = pd_handles_sz;
8910  	}
8911  
8912  	memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8913  	return 0;
8914  }
8915  
8916  /**
8917   * mpt3sas_base_hard_reset_handler - reset controller
8918   * @ioc: Pointer to MPT_ADAPTER structure
8919   * @type: FORCE_BIG_HAMMER or SOFT_RESET
8920   *
8921   * Return: 0 for success, non-zero for failure.
8922   */
8923  int
mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER * ioc,enum reset_type type)8924  mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8925  	enum reset_type type)
8926  {
8927  	int r;
8928  	unsigned long flags;
8929  	u32 ioc_state;
8930  	u8 is_fault = 0, is_trigger = 0;
8931  
8932  	dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8933  
8934  	if (ioc->pci_error_recovery) {
8935  		ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8936  		r = 0;
8937  		goto out_unlocked;
8938  	}
8939  
8940  	if (mpt3sas_fwfault_debug)
8941  		mpt3sas_halt_firmware(ioc);
8942  
8943  	/* wait for an active reset in progress to complete */
8944  	mutex_lock(&ioc->reset_in_progress_mutex);
8945  
8946  	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8947  	ioc->shost_recovery = 1;
8948  	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8949  
8950  	if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8951  	    MPT3_DIAG_BUFFER_IS_REGISTERED) &&
8952  	    (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8953  	    MPT3_DIAG_BUFFER_IS_RELEASED))) {
8954  		is_trigger = 1;
8955  		ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8956  		if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
8957  		    (ioc_state & MPI2_IOC_STATE_MASK) ==
8958  		    MPI2_IOC_STATE_COREDUMP) {
8959  			is_fault = 1;
8960  			ioc->htb_rel.trigger_info_dwords[1] =
8961  			    (ioc_state & MPI2_DOORBELL_DATA_MASK);
8962  		}
8963  	}
8964  	_base_pre_reset_handler(ioc);
8965  	mpt3sas_wait_for_commands_to_complete(ioc);
8966  	mpt3sas_base_mask_interrupts(ioc);
8967  	mpt3sas_base_pause_mq_polling(ioc);
8968  	r = mpt3sas_base_make_ioc_ready(ioc, type);
8969  	if (r)
8970  		goto out;
8971  	_base_clear_outstanding_commands(ioc);
8972  
8973  	/* If this hard reset is called while port enable is active, then
8974  	 * there is no reason to call make_ioc_operational
8975  	 */
8976  	if (ioc->is_driver_loading && ioc->port_enable_failed) {
8977  		ioc->remove_host = 1;
8978  		r = -EFAULT;
8979  		goto out;
8980  	}
8981  	r = _base_get_ioc_facts(ioc);
8982  	if (r)
8983  		goto out;
8984  
8985  	r = _base_check_ioc_facts_changes(ioc);
8986  	if (r) {
8987  		ioc_info(ioc,
8988  		    "Some of the parameters got changed in this new firmware"
8989  		    " image and it requires system reboot\n");
8990  		goto out;
8991  	}
8992  	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8993  		panic("%s: Issue occurred with flashing controller firmware."
8994  		      "Please reboot the system and ensure that the correct"
8995  		      " firmware version is running\n", ioc->name);
8996  
8997  	r = _base_make_ioc_operational(ioc);
8998  	if (!r)
8999  		_base_reset_done_handler(ioc);
9000  
9001   out:
9002  	ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
9003  
9004  	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
9005  	ioc->shost_recovery = 0;
9006  	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
9007  	ioc->ioc_reset_count++;
9008  	mutex_unlock(&ioc->reset_in_progress_mutex);
9009  	mpt3sas_base_resume_mq_polling(ioc);
9010  
9011   out_unlocked:
9012  	if ((r == 0) && is_trigger) {
9013  		if (is_fault)
9014  			mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
9015  		else
9016  			mpt3sas_trigger_master(ioc,
9017  			    MASTER_TRIGGER_ADAPTER_RESET);
9018  	}
9019  	dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
9020  	return r;
9021  }
9022