1  /*******************************************************************
2   * This file is part of the Emulex Linux Device Driver for         *
3   * Fibre Channel Host Bus Adapters.                                *
4   * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7   * EMULEX and SLI are trademarks of Emulex.                        *
8   * www.broadcom.com                                                *
9   * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10   *                                                                 *
11   * This program is free software; you can redistribute it and/or   *
12   * modify it under the terms of version 2 of the GNU General       *
13   * Public License as published by the Free Software Foundation.    *
14   * This program is distributed in the hope that it will be useful. *
15   * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16   * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17   * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18   * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19   * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20   * more details, a copy of which can be found in the file COPYING  *
21   * included with this package.                                     *
22   *******************************************************************/
23  
24  #include <scsi/scsi_host.h>
25  #include <linux/hashtable.h>
26  #include <linux/ktime.h>
27  #include <linux/workqueue.h>
28  
29  #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
30  #define CONFIG_SCSI_LPFC_DEBUG_FS
31  #endif
32  
33  struct lpfc_sli2_slim;
34  
35  #define ELX_MODEL_NAME_SIZE	80
36  #define ELX_FW_NAME_SIZE	84
37  
38  #define LPFC_PCI_DEV_LP		0x1
39  #define LPFC_PCI_DEV_OC		0x2
40  
41  #define LPFC_SLI_REV2		2
42  #define LPFC_SLI_REV3		3
43  #define LPFC_SLI_REV4		4
44  
45  #define LPFC_MAX_TARGET		4096	/* max number of targets supported */
46  #define LPFC_MAX_DISC_THREADS	64	/* max outstanding discovery els
47  					   requests */
48  #define LPFC_MAX_NS_RETRY	3	/* Number of retry attempts to contact
49  					   the NameServer  before giving up. */
50  #define LPFC_CMD_PER_LUN	3	/* max outstanding cmds per lun */
51  #define LPFC_DEFAULT_SG_SEG_CNT 64	/* sg element count per scsi cmnd */
52  
53  #define LPFC_DEFAULT_XPSGL_SIZE	256
54  #define LPFC_MAX_SG_TABLESIZE	0xffff
55  #define LPFC_MIN_SG_SLI4_BUF_SZ	0x800	/* based on LPFC_DEFAULT_SG_SEG_CNT */
56  #define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
57  #define LPFC_MAX_SG_SEG_CNT_DIF 512	/* sg element count per scsi cmnd  */
58  #define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
59  #define LPFC_MIN_SG_SEG_CNT	32	/* sg element count per scsi cmnd */
60  #define LPFC_MAX_SGL_SEG_CNT	512	/* SGL element count per scsi cmnd */
61  #define LPFC_MAX_BPL_SEG_CNT	4096	/* BPL element count per scsi cmnd */
62  #define LPFC_MAX_NVME_SEG_CNT	256	/* max SGL element cnt per NVME cmnd */
63  
64  #define LPFC_MAX_SGE_SIZE       0x80000000 /* Maximum data allowed in a SGE */
65  #define LPFC_IOCB_LIST_CNT	2250	/* list of IOCBs for fast-path usage. */
66  #define LPFC_Q_RAMP_UP_INTERVAL 120     /* lun q_depth ramp up interval */
67  #define LPFC_VNAME_LEN		100	/* vport symbolic name length */
68  #define LPFC_TGTQ_RAMPUP_PCENT	5	/* Target queue rampup in percentage */
69  #define LPFC_MIN_TGT_QDEPTH	10
70  #define LPFC_MAX_TGT_QDEPTH	0xFFFF
71  
72  /*
73   * Following time intervals are used of adjusting SCSI device
74   * queue depths when there are driver resource error or Firmware
75   * resource error.
76   */
77  /* 1 Second */
78  #define QUEUE_RAMP_DOWN_INTERVAL	(msecs_to_jiffies(1000 * 1))
79  
80  /* Number of exchanges reserved for discovery to complete */
81  #define LPFC_DISC_IOCB_BUFF_COUNT 20
82  
83  #define LPFC_HB_MBOX_INTERVAL   5	/* Heart beat interval in seconds. */
84  #define LPFC_HB_MBOX_TIMEOUT    30	/* Heart beat timeout  in seconds. */
85  
86  /* Error Attention event polling interval */
87  #define LPFC_ERATT_POLL_INTERVAL	5 /* EATT poll interval in seconds */
88  
89  /* Define macros for 64 bit support */
90  #define putPaddrLow(addr)    ((uint32_t) (0xffffffff & (u64)(addr)))
91  #define putPaddrHigh(addr)   ((uint32_t) (0xffffffff & (((u64)(addr))>>32)))
92  #define getPaddr(high, low)  ((dma_addr_t)( \
93  			     (( (u64)(high)<<16 ) << 16)|( (u64)(low))))
94  /* Provide maximum configuration definitions. */
95  #define LPFC_DRVR_TIMEOUT	16	/* driver iocb timeout value in sec */
96  #define FC_MAX_ADPTMSG		64
97  
98  #define MAX_HBAEVT	32
99  #define MAX_HBAS_NO_RESET 16
100  
101  /* Number of MSI-X vectors the driver uses */
102  #define LPFC_MSIX_VECTORS	2
103  
104  /* lpfc wait event data ready flag */
105  #define LPFC_DATA_READY		0	/* bit 0 */
106  
107  /* queue dump line buffer size */
108  #define LPFC_LBUF_SZ		128
109  
110  /* mailbox system shutdown options */
111  #define LPFC_MBX_NO_WAIT	0
112  #define LPFC_MBX_WAIT		1
113  
114  #define LPFC_CFG_PARAM_MAGIC_NUM 0xFEAA0005
115  #define LPFC_PORT_CFG_NAME "/cfg/port.cfg"
116  
117  #define lpfc_rangecheck(val, min, max) \
118  	((uint)(val) >= (uint)(min) && (val) <= (max))
119  
120  enum lpfc_polling_flags {
121  	ENABLE_FCP_RING_POLLING = 0x1,
122  	DISABLE_FCP_RING_INT    = 0x2
123  };
124  
125  struct perf_prof {
126  	uint16_t cmd_cpu[40];
127  	uint16_t rsp_cpu[40];
128  	uint16_t qh_cpu[40];
129  	uint16_t wqidx[40];
130  };
131  
132  /*
133   * Provide for FC4 TYPE x28 - NVME.  The
134   * bit mask for FCP and NVME is 0x8 identically
135   * because they are 32 bit positions distance.
136   */
137  #define LPFC_FC4_TYPE_BITMASK	0x00000100
138  
139  /* Provide DMA memory definitions the driver uses per port instance. */
140  struct lpfc_dmabuf {
141  	struct list_head list;
142  	void *virt;		/* virtual address ptr */
143  	dma_addr_t phys;	/* mapped address */
144  	uint32_t   buffer_tag;	/* used for tagged queue ring */
145  };
146  
147  struct lpfc_nvmet_ctxbuf {
148  	struct list_head list;
149  	struct lpfc_async_xchg_ctx *context;
150  	struct lpfc_iocbq *iocbq;
151  	struct lpfc_sglq *sglq;
152  	struct work_struct defer_work;
153  };
154  
155  struct lpfc_dma_pool {
156  	struct lpfc_dmabuf   *elements;
157  	uint32_t    max_count;
158  	uint32_t    current_count;
159  };
160  
161  struct hbq_dmabuf {
162  	struct lpfc_dmabuf hbuf;
163  	struct lpfc_dmabuf dbuf;
164  	uint16_t total_size;
165  	uint16_t bytes_recv;
166  	uint32_t tag;
167  	struct lpfc_cq_event cq_event;
168  	unsigned long time_stamp;
169  	void *context;
170  };
171  
172  struct rqb_dmabuf {
173  	struct lpfc_dmabuf hbuf;
174  	struct lpfc_dmabuf dbuf;
175  	uint16_t total_size;
176  	uint16_t bytes_recv;
177  	uint16_t idx;
178  	struct lpfc_queue *hrq;	  /* ptr to associated Header RQ */
179  	struct lpfc_queue *drq;	  /* ptr to associated Data RQ */
180  };
181  
182  /* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
183  #define MEM_PRI		0x100
184  
185  
186  /****************************************************************************/
187  /*      Device VPD save area                                                */
188  /****************************************************************************/
189  typedef struct lpfc_vpd {
190  	uint32_t status;	/* vpd status value */
191  	uint32_t length;	/* number of bytes actually returned */
192  	struct {
193  		uint32_t rsvd1;	/* Revision numbers */
194  		uint32_t biuRev;
195  		uint32_t smRev;
196  		uint32_t smFwRev;
197  		uint32_t endecRev;
198  		uint16_t rBit;
199  		uint8_t fcphHigh;
200  		uint8_t fcphLow;
201  		uint8_t feaLevelHigh;
202  		uint8_t feaLevelLow;
203  		uint32_t postKernRev;
204  		uint32_t opFwRev;
205  		uint8_t opFwName[16];
206  		uint32_t sli1FwRev;
207  		uint8_t sli1FwName[16];
208  		uint32_t sli2FwRev;
209  		uint8_t sli2FwName[16];
210  	} rev;
211  	struct {
212  #ifdef __BIG_ENDIAN_BITFIELD
213  		uint32_t rsvd3  :20;  /* Reserved                             */
214  		uint32_t rsvd2	: 3;  /* Reserved                             */
215  		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
216  		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
217  		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
218  		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
219  		uint32_t chbs   : 1;  /* Cofigure Host Backing store          */
220  		uint32_t cinb   : 1;  /* Enable Interrupt Notification Block  */
221  		uint32_t cerbm	: 1;  /* Configure Enhanced Receive Buf Mgmt  */
222  		uint32_t cmx	: 1;  /* Configure Max XRIs                   */
223  		uint32_t cmr	: 1;  /* Configure Max RPIs                   */
224  #else	/*  __LITTLE_ENDIAN */
225  		uint32_t cmr	: 1;  /* Configure Max RPIs                   */
226  		uint32_t cmx	: 1;  /* Configure Max XRIs                   */
227  		uint32_t cerbm	: 1;  /* Configure Enhanced Receive Buf Mgmt  */
228  		uint32_t cinb   : 1;  /* Enable Interrupt Notification Block  */
229  		uint32_t chbs   : 1;  /* Cofigure Host Backing store          */
230  		uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
231  		uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
232  		uint32_t cmv	: 1;  /* Configure Max VPIs                   */
233  		uint32_t cbg	: 1;  /* Configure BlockGuard                 */
234  		uint32_t rsvd2	: 3;  /* Reserved                             */
235  		uint32_t rsvd3  :20;  /* Reserved                             */
236  #endif
237  	} sli3Feat;
238  } lpfc_vpd_t;
239  
240  
241  /*
242   * lpfc stat counters
243   */
244  struct lpfc_stats {
245  	/* Statistics for ELS commands */
246  	uint32_t elsLogiCol;
247  	uint32_t elsRetryExceeded;
248  	uint32_t elsXmitRetry;
249  	uint32_t elsDelayRetry;
250  	uint32_t elsRcvDrop;
251  	uint32_t elsRcvFrame;
252  	uint32_t elsRcvRSCN;
253  	uint32_t elsRcvRNID;
254  	uint32_t elsRcvFARP;
255  	uint32_t elsRcvFARPR;
256  	uint32_t elsRcvFLOGI;
257  	uint32_t elsRcvPLOGI;
258  	uint32_t elsRcvADISC;
259  	uint32_t elsRcvPDISC;
260  	uint32_t elsRcvFAN;
261  	uint32_t elsRcvLOGO;
262  	uint32_t elsRcvPRLO;
263  	uint32_t elsRcvPRLI;
264  	uint32_t elsRcvLIRR;
265  	uint32_t elsRcvRLS;
266  	uint32_t elsRcvRPL;
267  	uint32_t elsRcvRRQ;
268  	uint32_t elsRcvRTV;
269  	uint32_t elsRcvECHO;
270  	uint32_t elsRcvLCB;
271  	uint32_t elsRcvRDP;
272  	uint32_t elsRcvRDF;
273  	uint32_t elsXmitFLOGI;
274  	uint32_t elsXmitFDISC;
275  	uint32_t elsXmitPLOGI;
276  	uint32_t elsXmitPRLI;
277  	uint32_t elsXmitADISC;
278  	uint32_t elsXmitLOGO;
279  	uint32_t elsXmitSCR;
280  	uint32_t elsXmitRSCN;
281  	uint32_t elsXmitRNID;
282  	uint32_t elsXmitFARP;
283  	uint32_t elsXmitFARPR;
284  	uint32_t elsXmitACC;
285  	uint32_t elsXmitLSRJT;
286  
287  	uint32_t frameRcvBcast;
288  	uint32_t frameRcvMulti;
289  	uint32_t strayXmitCmpl;
290  	uint32_t frameXmitDelay;
291  	uint32_t xriCmdCmpl;
292  	uint32_t xriStatErr;
293  	uint32_t LinkUp;
294  	uint32_t LinkDown;
295  	uint32_t LinkMultiEvent;
296  	uint32_t NoRcvBuf;
297  	uint32_t fcpCmd;
298  	uint32_t fcpCmpl;
299  	uint32_t fcpRspErr;
300  	uint32_t fcpRemoteStop;
301  	uint32_t fcpPortRjt;
302  	uint32_t fcpPortBusy;
303  	uint32_t fcpError;
304  	uint32_t fcpLocalErr;
305  };
306  
307  struct lpfc_hba;
308  
309  /* Data structure to keep withheld FLOGI_ACC information */
310  struct lpfc_defer_flogi_acc {
311  	bool flag;
312  	u16 rx_id;
313  	u16 ox_id;
314  	struct lpfc_nodelist *ndlp;
315  
316  };
317  
318  #define LPFC_VMID_TIMER   300	/* timer interval in seconds */
319  
320  #define LPFC_MAX_VMID_SIZE      256
321  
322  union lpfc_vmid_io_tag {
323  	u32 app_id;	/* App Id vmid */
324  	u8 cs_ctl_vmid;	/* Priority tag vmid */
325  };
326  
327  #define JIFFIES_PER_HR	(HZ * 60 * 60)
328  
329  struct lpfc_vmid {
330  	u8 flag;
331  #define LPFC_VMID_SLOT_FREE     0x0
332  #define LPFC_VMID_SLOT_USED     0x1
333  #define LPFC_VMID_REQ_REGISTER  0x2
334  #define LPFC_VMID_REGISTERED    0x4
335  #define LPFC_VMID_DE_REGISTER   0x8
336  	char host_vmid[LPFC_MAX_VMID_SIZE];
337  	union lpfc_vmid_io_tag un;
338  	struct hlist_node hnode;
339  	u64 io_rd_cnt;
340  	u64 io_wr_cnt;
341  	u8 vmid_len;
342  	u8 delete_inactive; /* Delete if inactive flag 0 = no, 1 = yes */
343  	u32 hash_index;
344  	u64 __percpu *last_io_time;
345  };
346  
347  #define lpfc_vmid_is_type_priority_tag(vport)\
348  	(vport->vmid_priority_tagging ? 1 : 0)
349  
350  #define LPFC_VMID_HASH_SIZE     256
351  #define LPFC_VMID_HASH_MASK     255
352  #define LPFC_VMID_HASH_SHIFT    6
353  
354  struct lpfc_vmid_context {
355  	struct lpfc_vmid *vmp;
356  	struct lpfc_nodelist *nlp;
357  	bool instantiated;
358  };
359  
360  struct lpfc_vmid_priority_range {
361  	u8 low;
362  	u8 high;
363  	u8 qos;
364  };
365  
366  struct lpfc_vmid_priority_info {
367  	u32 num_descriptors;
368  	struct lpfc_vmid_priority_range *vmid_range;
369  };
370  
371  #define QFPA_EVEN_ONLY 0x01
372  #define QFPA_ODD_ONLY  0x02
373  #define QFPA_EVEN_ODD  0x03
374  
375  enum discovery_state {
376  	LPFC_VPORT_UNKNOWN     =  0,    /* vport state is unknown */
377  	LPFC_VPORT_FAILED      =  1,    /* vport has failed */
378  	LPFC_LOCAL_CFG_LINK    =  6,    /* local NPORT Id configured */
379  	LPFC_FLOGI             =  7,    /* FLOGI sent to Fabric */
380  	LPFC_FDISC             =  8,    /* FDISC sent for vport */
381  	LPFC_FABRIC_CFG_LINK   =  9,    /* Fabric assigned NPORT Id
382  				         * configured */
383  	LPFC_NS_REG            =  10,   /* Register with NameServer */
384  	LPFC_NS_QRY            =  11,   /* Query NameServer for NPort ID list */
385  	LPFC_BUILD_DISC_LIST   =  12,   /* Build ADISC and PLOGI lists for
386  				         * device authentication / discovery */
387  	LPFC_DISC_AUTH         =  13,   /* Processing ADISC list */
388  	LPFC_VPORT_READY       =  32,
389  };
390  
391  enum hba_state {
392  	LPFC_LINK_UNKNOWN    =   0,   /* HBA state is unknown */
393  	LPFC_WARM_START      =   1,   /* HBA state after selective reset */
394  	LPFC_INIT_START      =   2,   /* Initial state after board reset */
395  	LPFC_INIT_MBX_CMDS   =   3,   /* Initialize HBA with mbox commands */
396  	LPFC_LINK_DOWN       =   4,   /* HBA initialized, link is down */
397  	LPFC_LINK_UP         =   5,   /* Link is up  - issue READ_LA */
398  	LPFC_CLEAR_LA        =   6,   /* authentication cmplt - issue
399  				       * CLEAR_LA */
400  	LPFC_HBA_READY       =  32,
401  	LPFC_HBA_ERROR       =  -1
402  };
403  
404  enum lpfc_hba_flag { /* hba generic flags */
405  	HBA_ERATT_HANDLED	= 0, /* This flag is set when eratt handled */
406  	DEFER_ERATT		= 1, /* Deferred error attn in progress */
407  	HBA_FCOE_MODE		= 2, /* HBA function in FCoE Mode */
408  	HBA_SP_QUEUE_EVT	= 3, /* Slow-path qevt posted to worker thread*/
409  	HBA_POST_RECEIVE_BUFFER = 4, /* Rcv buffers need to be posted */
410  	HBA_PERSISTENT_TOPO	= 5, /* Persistent topology support in hba */
411  	ELS_XRI_ABORT_EVENT	= 6, /* ELS_XRI abort event was queued */
412  	ASYNC_EVENT		= 7,
413  	LINK_DISABLED		= 8, /* Link disabled by user */
414  	FCF_TS_INPROG           = 9, /* FCF table scan in progress */
415  	FCF_RR_INPROG           = 10, /* FCF roundrobin flogi in progress */
416  	HBA_FIP_SUPPORT		= 11, /* FIP support in HBA */
417  	HBA_DEVLOSS_TMO         = 13, /* HBA in devloss timeout */
418  	HBA_RRQ_ACTIVE		= 14, /* process the rrq active list */
419  	HBA_IOQ_FLUSH		= 15, /* I/O queues being flushed */
420  	HBA_RECOVERABLE_UE	= 17, /* FW supports recoverable UE */
421  	HBA_FORCED_LINK_SPEED	= 18, /*
422  				       * Firmware supports Forced Link
423  				       * Speed capability
424  				       */
425  	HBA_FLOGI_ISSUED	= 20, /* FLOGI was issued */
426  	HBA_DEFER_FLOGI		= 23, /* Defer FLOGI till read_sparm cmpl */
427  	HBA_SETUP		= 24, /* HBA setup completed */
428  	HBA_NEEDS_CFG_PORT	= 25, /* SLI3: CONFIG_PORT mbox needed */
429  	HBA_HBEAT_INP		= 26, /* mbox HBEAT is in progress */
430  	HBA_HBEAT_TMO		= 27, /* HBEAT initiated after timeout */
431  	HBA_FLOGI_OUTSTANDING	= 28, /* FLOGI is outstanding */
432  	HBA_RHBA_CMPL		= 29, /* RHBA FDMI cmd is successful */
433  };
434  
435  struct lpfc_trunk_link_state {
436  	enum hba_state state;
437  	uint8_t fault;
438  };
439  
440  struct lpfc_trunk_link  {
441  	struct lpfc_trunk_link_state link0,
442  				     link1,
443  				     link2,
444  				     link3;
445  	u32 phy_lnk_speed;
446  };
447  
448  /* Format of congestion module parameters */
449  struct lpfc_cgn_param {
450  	uint32_t cgn_param_magic;
451  	uint8_t  cgn_param_version;	/* version 1 */
452  	uint8_t  cgn_param_mode;	/* 0=off 1=managed 2=monitor only */
453  #define LPFC_CFG_OFF		0
454  #define LPFC_CFG_MANAGED	1
455  #define LPFC_CFG_MONITOR	2
456  	uint8_t  cgn_rsvd1;
457  	uint8_t  cgn_rsvd2;
458  	uint8_t  cgn_param_level0;
459  	uint8_t  cgn_param_level1;
460  	uint8_t  cgn_param_level2;
461  	uint8_t  byte11;
462  	uint8_t  byte12;
463  	uint8_t  byte13;
464  	uint8_t  byte14;
465  	uint8_t  byte15;
466  };
467  
468  /* Max number of days of congestion data */
469  #define LPFC_MAX_CGN_DAYS 10
470  
471  struct lpfc_cgn_ts {
472  	uint8_t month;
473  	uint8_t day;
474  	uint8_t year;
475  	uint8_t hour;
476  	uint8_t minute;
477  	uint8_t second;
478  };
479  
480  /* Format of congestion buffer info
481   * This structure defines memory thats allocated and registered with
482   * the HBA firmware. When adding or removing fields from this structure
483   * the alignment must match the HBA firmware.
484   */
485  
486  struct lpfc_cgn_info {
487  	/* Header */
488  	__le16   cgn_info_size;		/* is sizeof(struct lpfc_cgn_info) */
489  	uint8_t  cgn_info_version;	/* represents format of structure */
490  #define LPFC_CGN_INFO_V1	1
491  #define LPFC_CGN_INFO_V2	2
492  #define LPFC_CGN_INFO_V3	3
493  #define LPFC_CGN_INFO_V4	4
494  	uint8_t  cgn_info_mode;		/* 0=off 1=managed 2=monitor only */
495  	uint8_t  cgn_info_detect;
496  	uint8_t  cgn_info_action;
497  	uint8_t  cgn_info_level0;
498  	uint8_t  cgn_info_level1;
499  	uint8_t  cgn_info_level2;
500  
501  	/* Start Time */
502  	struct lpfc_cgn_ts base_time;
503  
504  	/* minute / hours / daily indices */
505  	uint8_t  cgn_index_minute;
506  	uint8_t  cgn_index_hour;
507  	uint8_t  cgn_index_day;
508  
509  	__le16   cgn_warn_freq;
510  	__le16   cgn_alarm_freq;
511  	__le16   cgn_lunq;
512  	uint8_t  cgn_pad1[8];
513  
514  	/* Driver Information */
515  	__le16   cgn_drvr_min[60];
516  	__le32   cgn_drvr_hr[24];
517  	__le32   cgn_drvr_day[LPFC_MAX_CGN_DAYS];
518  
519  	/* Congestion Warnings */
520  	__le16   cgn_warn_min[60];
521  	__le32   cgn_warn_hr[24];
522  	__le32   cgn_warn_day[LPFC_MAX_CGN_DAYS];
523  
524  	/* Latency Information */
525  	__le32   cgn_latency_min[60];
526  	__le32   cgn_latency_hr[24];
527  	__le32   cgn_latency_day[LPFC_MAX_CGN_DAYS];
528  
529  	/* Bandwidth Information */
530  	__le16   cgn_bw_min[60];
531  	__le16   cgn_bw_hr[24];
532  	__le16   cgn_bw_day[LPFC_MAX_CGN_DAYS];
533  
534  	/* Congestion Alarms */
535  	__le16   cgn_alarm_min[60];
536  	__le32   cgn_alarm_hr[24];
537  	__le32   cgn_alarm_day[LPFC_MAX_CGN_DAYS];
538  
539  	struct_group(cgn_stat,
540  		uint8_t  cgn_stat_npm;		/* Notifications per minute */
541  
542  		/* Start Time */
543  		struct lpfc_cgn_ts stat_start;	/* Base time */
544  		uint8_t cgn_pad2;
545  
546  		__le32   cgn_notification;
547  		__le32   cgn_peer_notification;
548  		__le32   link_integ_notification;
549  		__le32   delivery_notification;
550  		struct lpfc_cgn_ts stat_fpin;	/* Last congestion notification FPIN */
551  		struct lpfc_cgn_ts stat_peer;	/* Last peer congestion FPIN */
552  		struct lpfc_cgn_ts stat_lnk;	/* Last link integrity FPIN */
553  		struct lpfc_cgn_ts stat_delivery;	/* Last delivery notification FPIN */
554  	);
555  
556  	__le32   cgn_info_crc;
557  #define LPFC_CGN_CRC32_MAGIC_NUMBER	0x1EDC6F41
558  #define LPFC_CGN_CRC32_SEED		0xFFFFFFFF
559  };
560  
561  #define LPFC_CGN_INFO_SZ	(sizeof(struct lpfc_cgn_info) -  \
562  				sizeof(uint32_t))
563  
564  struct lpfc_cgn_stat {
565  	atomic64_t total_bytes;
566  	atomic64_t rcv_bytes;
567  	atomic64_t rx_latency;
568  #define LPFC_CGN_NOT_SENT	0xFFFFFFFFFFFFFFFFLL
569  	atomic_t rx_io_cnt;
570  };
571  
572  struct lpfc_cgn_acqe_stat {
573  	atomic64_t alarm;
574  	atomic64_t warn;
575  };
576  
577  enum lpfc_fc_flag {
578  	/* Several of these flags are HBA centric and should be moved to
579  	 * phba->link_flag (e.g. FC_PTP, FC_PUBLIC_LOOP)
580  	 */
581  	FC_PT2PT,			/* pt2pt with no fabric */
582  	FC_PT2PT_PLOGI,			/* pt2pt initiate PLOGI */
583  	FC_DISC_TMO,			/* Discovery timer running */
584  	FC_PUBLIC_LOOP,			/* Public loop */
585  	FC_LBIT,			/* LOGIN bit in loopinit set */
586  	FC_RSCN_MODE,			/* RSCN cmd rcv'ed */
587  	FC_NLP_MORE,			/* More node to process in node tbl */
588  	FC_OFFLINE_MODE,		/* Interface is offline for diag */
589  	FC_FABRIC,			/* We are fabric attached */
590  	FC_VPORT_LOGO_RCVD,		/* LOGO received on vport */
591  	FC_RSCN_DISCOVERY,		/* Auth all devices after RSCN */
592  	FC_LOGO_RCVD_DID_CHNG,		/* FDISC on phys port detect DID chng */
593  	FC_PT2PT_NO_NVME,		/* Don't send NVME PRLI */
594  	FC_SCSI_SCAN_TMO,		/* scsi scan timer running */
595  	FC_ABORT_DISCOVERY,		/* we want to abort discovery */
596  	FC_NDISC_ACTIVE,		/* NPort discovery active */
597  	FC_BYPASSED_MODE,		/* NPort is in bypassed mode */
598  	FC_VPORT_NEEDS_REG_VPI,		/* Needs to have its vpi registered */
599  	FC_RSCN_DEFERRED,		/* A deferred RSCN being processed */
600  	FC_VPORT_NEEDS_INIT_VPI,	/* Need to INIT_VPI before FDISC */
601  	FC_VPORT_CVL_RCVD,		/* VLink failed due to CVL */
602  	FC_VFI_REGISTERED,		/* VFI is registered */
603  	FC_FDISC_COMPLETED,		/* FDISC completed */
604  	FC_DISC_DELAYED,		/* Delay NPort discovery */
605  };
606  
607  enum lpfc_load_flag {
608  	FC_LOADING,			/* HBA in process of loading drvr */
609  	FC_UNLOADING,			/* HBA in process of unloading drvr */
610  	FC_ALLOW_FDMI,			/* port is ready for FDMI requests */
611  	FC_ALLOW_VMID,			/* Allow VMID I/Os */
612  	FC_DEREGISTER_ALL_APP_ID	/* Deregister all VMIDs */
613  };
614  
615  struct lpfc_vport {
616  	struct lpfc_hba *phba;
617  	struct list_head listentry;
618  	uint8_t port_type;
619  #define LPFC_PHYSICAL_PORT 1
620  #define LPFC_NPIV_PORT  2
621  #define LPFC_FABRIC_PORT 3
622  	enum discovery_state port_state;
623  
624  	uint16_t vpi;
625  	uint16_t vfi;
626  	uint8_t vpi_state;
627  #define LPFC_VPI_REGISTERED	0x1
628  
629  	unsigned long fc_flag;	/* FC flags */
630  
631  	uint32_t ct_flags;
632  #define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
633  #define FC_CT_RNN_ID		0x2	 /* RNN_ID accepted by switch */
634  #define FC_CT_RSNN_NN		0x4	 /* RSNN_NN accepted by switch */
635  #define FC_CT_RSPN_ID		0x8	 /* RSPN_ID accepted by switch */
636  #define FC_CT_RFT_ID		0x10	 /* RFT_ID accepted by switch */
637  #define FC_CT_RPRT_DEFER	0x20	 /* Defer issuing FDMI RPRT */
638  
639  	struct list_head fc_nodes;
640  	spinlock_t fc_nodes_list_lock; /* spinlock for fc_nodes list */
641  
642  	/* Keep counters for the number of entries in each list. */
643  	atomic_t fc_plogi_cnt;
644  	atomic_t fc_adisc_cnt;
645  	atomic_t fc_reglogin_cnt;
646  	atomic_t fc_prli_cnt;
647  	atomic_t fc_unmap_cnt;
648  	atomic_t fc_map_cnt;
649  	atomic_t fc_npr_cnt;
650  	atomic_t fc_unused_cnt;
651  
652  	struct serv_parm fc_sparam;	/* buffer for our service parameters */
653  
654  	uint32_t fc_myDID;	/* fibre channel S_ID */
655  	uint32_t fc_prevDID;	/* previous fibre channel S_ID */
656  	struct lpfc_name fabric_portname;
657  	struct lpfc_name fabric_nodename;
658  
659  	int32_t stopped;   /* HBA has not been restarted since last ERATT */
660  	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
661  
662  	uint32_t num_disc_nodes;	/* in addition to hba_state */
663  	uint32_t gidft_inp;		/* cnt of outstanding GID_FTs */
664  
665  	uint32_t fc_nlp_cnt;	/* outstanding NODELIST requests */
666  	uint32_t fc_rscn_id_cnt;	/* count of RSCNs payloads in list */
667  	uint32_t fc_rscn_flush;		/* flag use of fc_rscn_id_list */
668  	struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
669  	struct lpfc_name fc_nodename;	/* fc nodename */
670  	struct lpfc_name fc_portname;	/* fc portname */
671  
672  	struct lpfc_work_evt disc_timeout_evt;
673  
674  	struct timer_list fc_disctmo;	/* Discovery rescue timer */
675  	uint8_t fc_ns_retry;	/* retries for fabric nameserver */
676  	uint32_t fc_prli_sent;	/* cntr for outstanding PRLIs */
677  
678  	spinlock_t work_port_lock;
679  	uint32_t work_port_events; /* Timeout to be handled  */
680  #define WORKER_DISC_TMO                0x1	/* vport: Discovery timeout */
681  #define WORKER_ELS_TMO                 0x2	/* vport: ELS timeout */
682  #define WORKER_DELAYED_DISC_TMO        0x8	/* vport: delayed discovery */
683  
684  #define WORKER_MBOX_TMO                0x100	/* hba: MBOX timeout */
685  #define WORKER_HB_TMO                  0x200	/* hba: Heart beat timeout */
686  #define WORKER_FABRIC_BLOCK_TMO        0x400	/* hba: fabric block timeout */
687  #define WORKER_RAMP_DOWN_QUEUE         0x800	/* hba: Decrease Q depth */
688  #define WORKER_RAMP_UP_QUEUE           0x1000	/* hba: Increase Q depth */
689  #define WORKER_SERVICE_TXQ             0x2000	/* hba: IOCBs on the txq */
690  #define WORKER_CHECK_INACTIVE_VMID     0x4000	/* hba: check inactive vmids */
691  #define WORKER_CHECK_VMID_ISSUE_QFPA   0x8000	/* vport: Check if qfpa needs
692  						 * to be issued */
693  
694  	struct timer_list els_tmofunc;
695  	struct timer_list delayed_disc_tmo;
696  
697  	unsigned long load_flag;
698  	/* Vport Config Parameters */
699  	uint32_t cfg_scan_down;
700  	uint32_t cfg_lun_queue_depth;
701  	uint32_t cfg_nodev_tmo;
702  	uint32_t cfg_devloss_tmo;
703  	uint32_t cfg_restrict_login;
704  	uint32_t cfg_peer_port_login;
705  	uint32_t cfg_fcp_class;
706  	uint32_t cfg_use_adisc;
707  	uint32_t cfg_discovery_threads;
708  	uint32_t cfg_log_verbose;
709  	uint32_t cfg_enable_fc4_type;
710  	uint32_t cfg_max_luns;
711  	uint32_t cfg_enable_da_id;
712  	uint32_t cfg_max_scsicmpl_time;
713  	uint32_t cfg_tgt_queue_depth;
714  	uint32_t cfg_first_burst_size;
715  	uint32_t dev_loss_tmo_changed;
716  	/* VMID parameters */
717  	u8 lpfc_vmid_host_uuid[16];
718  	u32 max_vmid;	/* maximum VMIDs allowed per port */
719  	u32 cur_vmid_cnt;	/* Current VMID count */
720  #define LPFC_MIN_VMID	4
721  #define LPFC_MAX_VMID	255
722  	u32 vmid_inactivity_timeout;	/* Time after which the VMID */
723  						/* deregisters from switch */
724  	u32 vmid_priority_tagging;
725  #define LPFC_VMID_PRIO_TAG_DISABLE	0 /* Disable */
726  #define LPFC_VMID_PRIO_TAG_SUP_TARGETS	1 /* Allow supported targets only */
727  #define LPFC_VMID_PRIO_TAG_ALL_TARGETS	2 /* Allow all targets */
728  	unsigned long *vmid_priority_range;
729  #define LPFC_VMID_MAX_PRIORITY_RANGE    256
730  #define LPFC_VMID_PRIORITY_BITMAP_SIZE  32
731  	u8 vmid_flag;
732  #define LPFC_VMID_IN_USE		0x1
733  #define LPFC_VMID_ISSUE_QFPA		0x2
734  #define LPFC_VMID_QFPA_CMPL		0x4
735  #define LPFC_VMID_QOS_ENABLED		0x8
736  #define LPFC_VMID_TIMER_ENBLD		0x10
737  #define LPFC_VMID_TYPE_PRIO		0x20
738  	struct fc_qfpa_res *qfpa_res;
739  
740  	struct fc_vport *fc_vport;
741  
742  	struct lpfc_vmid *vmid;
743  	DECLARE_HASHTABLE(hash_table, 8);
744  	rwlock_t vmid_lock;
745  	struct lpfc_vmid_priority_info vmid_priority;
746  
747  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
748  	struct dentry *debug_disc_trc;
749  	struct dentry *debug_nodelist;
750  	struct dentry *debug_nvmestat;
751  	struct dentry *debug_scsistat;
752  	struct dentry *debug_ioktime;
753  	struct dentry *debug_hdwqstat;
754  	struct dentry *vport_debugfs_root;
755  	struct lpfc_debugfs_trc *disc_trc;
756  	atomic_t disc_trc_cnt;
757  #endif
758  	struct list_head rcv_buffer_list;
759  	unsigned long rcv_buffer_time_stamp;
760  	uint32_t vport_flag;
761  #define STATIC_VPORT		0x1
762  #define FAWWPN_PARAM_CHG	0x2
763  
764  	uint16_t fdmi_num_disc;
765  	uint32_t fdmi_hba_mask;
766  	uint32_t fdmi_port_mask;
767  
768  	/* There is a single nvme instance per vport. */
769  	struct nvme_fc_local_port *localport;
770  	uint8_t  nvmei_support; /* driver supports NVME Initiator */
771  	uint32_t last_fcp_wqidx;
772  	uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
773  };
774  
775  struct hbq_s {
776  	uint16_t entry_count;	  /* Current number of HBQ slots */
777  	uint16_t buffer_count;	  /* Current number of buffers posted */
778  	uint32_t next_hbqPutIdx;  /* Index to next HBQ slot to use */
779  	uint32_t hbqPutIdx;	  /* HBQ slot to use */
780  	uint32_t local_hbqGetIdx; /* Local copy of Get index from Port */
781  	void    *hbq_virt;	  /* Virtual ptr to this hbq */
782  	struct list_head hbq_buffer_list;  /* buffers assigned to this HBQ */
783  				  /* Callback for HBQ buffer allocation */
784  	struct hbq_dmabuf *(*hbq_alloc_buffer) (struct lpfc_hba *);
785  				  /* Callback for HBQ buffer free */
786  	void               (*hbq_free_buffer) (struct lpfc_hba *,
787  					       struct hbq_dmabuf *);
788  };
789  
790  /* this matches the position in the lpfc_hbq_defs array */
791  #define LPFC_ELS_HBQ	0
792  #define LPFC_MAX_HBQS	1
793  
794  enum hba_temp_state {
795  	HBA_NORMAL_TEMP,
796  	HBA_OVER_TEMP
797  };
798  
799  enum intr_type_t {
800  	NONE = 0,
801  	INTx,
802  	MSI,
803  	MSIX,
804  };
805  
806  #define LPFC_CT_CTX_MAX		64
807  struct unsol_rcv_ct_ctx {
808  	uint32_t ctxt_id;
809  	uint32_t SID;
810  	uint32_t valid;
811  #define UNSOL_INVALID		0
812  #define UNSOL_VALID		1
813  	uint16_t oxid;
814  	uint16_t rxid;
815  };
816  
817  #define LPFC_USER_LINK_SPEED_AUTO	0	/* auto select (default)*/
818  #define LPFC_USER_LINK_SPEED_1G		1	/* 1 Gigabaud */
819  #define LPFC_USER_LINK_SPEED_2G		2	/* 2 Gigabaud */
820  #define LPFC_USER_LINK_SPEED_4G		4	/* 4 Gigabaud */
821  #define LPFC_USER_LINK_SPEED_8G		8	/* 8 Gigabaud */
822  #define LPFC_USER_LINK_SPEED_10G	10	/* 10 Gigabaud */
823  #define LPFC_USER_LINK_SPEED_16G	16	/* 16 Gigabaud */
824  #define LPFC_USER_LINK_SPEED_32G	32	/* 32 Gigabaud */
825  #define LPFC_USER_LINK_SPEED_64G	64	/* 64 Gigabaud */
826  #define LPFC_USER_LINK_SPEED_MAX	LPFC_USER_LINK_SPEED_64G
827  
828  #define LPFC_LINK_SPEED_STRING "0, 1, 2, 4, 8, 10, 16, 32, 64"
829  
830  enum nemb_type {
831  	nemb_mse = 1,
832  	nemb_hbd
833  };
834  
835  enum mbox_type {
836  	mbox_rd = 1,
837  	mbox_wr
838  };
839  
840  enum dma_type {
841  	dma_mbox = 1,
842  	dma_ebuf
843  };
844  
845  enum sta_type {
846  	sta_pre_addr = 1,
847  	sta_pos_addr
848  };
849  
850  struct lpfc_mbox_ext_buf_ctx {
851  	uint32_t state;
852  #define LPFC_BSG_MBOX_IDLE		0
853  #define LPFC_BSG_MBOX_HOST              1
854  #define LPFC_BSG_MBOX_PORT		2
855  #define LPFC_BSG_MBOX_DONE		3
856  #define LPFC_BSG_MBOX_ABTS		4
857  	enum nemb_type nembType;
858  	enum mbox_type mboxType;
859  	uint32_t numBuf;
860  	uint32_t mbxTag;
861  	uint32_t seqNum;
862  	struct lpfc_dmabuf *mbx_dmabuf;
863  	struct list_head ext_dmabuf_list;
864  };
865  
866  struct lpfc_epd_pool {
867  	/* Expedite pool */
868  	struct list_head list;
869  	u32 count;
870  	spinlock_t lock;	/* lock for expedite pool */
871  };
872  
873  enum ras_state {
874  	INACTIVE,
875  	REG_INPROGRESS,
876  	ACTIVE
877  };
878  
879  struct lpfc_ras_fwlog {
880  	uint8_t *fwlog_buff;
881  	uint32_t fw_buffcount; /* Buffer size posted to FW */
882  #define LPFC_RAS_BUFF_ENTERIES  16      /* Each entry can hold max of 64k */
883  #define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
884  #define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
885  #define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
886  	uint32_t fw_loglevel; /* Log level set */
887  	struct lpfc_dmabuf lwpd;
888  	struct list_head fwlog_buff_list;
889  
890  	/* RAS support status on adapter */
891  	bool ras_hwsupport; /* RAS Support available on HW or not */
892  	bool ras_enabled;   /* Ras Enabled for the function */
893  #define LPFC_RAS_DISABLE_LOGGING 0x00
894  #define LPFC_RAS_ENABLE_LOGGING 0x01
895  	enum ras_state state;    /* RAS logging running state */
896  };
897  
898  #define DBG_LOG_STR_SZ 256
899  #define DBG_LOG_SZ 256
900  
901  struct dbg_log_ent {
902  	char log[DBG_LOG_STR_SZ];
903  	u64     t_ns;
904  };
905  
906  enum lpfc_irq_chann_mode {
907  	/* Assign IRQs to all possible cpus that have hardware queues */
908  	NORMAL_MODE,
909  
910  	/* Assign IRQs only to cpus on the same numa node as HBA */
911  	NUMA_MODE,
912  
913  	/* Assign IRQs only on non-hyperthreaded CPUs. This is the
914  	 * same as normal_mode, but assign IRQS only on physical CPUs.
915  	 */
916  	NHT_MODE,
917  };
918  
919  enum lpfc_hba_bit_flags {
920  	FABRIC_COMANDS_BLOCKED,
921  	HBA_PCI_ERR,
922  	MBX_TMO_ERR,
923  };
924  
925  struct lpfc_hba {
926  	/* SCSI interface function jump table entries */
927  	struct lpfc_io_buf * (*lpfc_get_scsi_buf)
928  		(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
929  		struct scsi_cmnd *cmnd);
930  	int (*lpfc_scsi_prep_dma_buf)
931  		(struct lpfc_hba *, struct lpfc_io_buf *);
932  	void (*lpfc_scsi_unprep_dma_buf)
933  		(struct lpfc_hba *, struct lpfc_io_buf *);
934  	void (*lpfc_release_scsi_buf)
935  		(struct lpfc_hba *, struct lpfc_io_buf *);
936  	void (*lpfc_rampdown_queue_depth)
937  		(struct lpfc_hba *);
938  	void (*lpfc_scsi_prep_cmnd)
939  		(struct lpfc_vport *, struct lpfc_io_buf *,
940  		 struct lpfc_nodelist *);
941  	int (*lpfc_scsi_prep_cmnd_buf)
942  		(struct lpfc_vport *vport,
943  		 struct lpfc_io_buf *lpfc_cmd,
944  		 uint8_t tmo);
945  	int (*lpfc_scsi_prep_task_mgmt_cmd)
946  		(struct lpfc_vport *vport,
947  		 struct lpfc_io_buf *lpfc_cmd,
948  		 u64 lun, u8 task_mgmt_cmd);
949  
950  	/* IOCB interface function jump table entries */
951  	int (*__lpfc_sli_issue_iocb)
952  		(struct lpfc_hba *, uint32_t,
953  		 struct lpfc_iocbq *, uint32_t);
954  	int (*__lpfc_sli_issue_fcp_io)
955  		(struct lpfc_hba *phba, uint32_t ring_number,
956  		 struct lpfc_iocbq *piocb, uint32_t flag);
957  	void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
958  			 struct lpfc_iocbq *);
959  	int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
960  
961  	/* MBOX interface function jump table entries */
962  	int (*lpfc_sli_issue_mbox)
963  		(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
964  
965  	/* Slow-path IOCB process function jump table entries */
966  	void (*lpfc_sli_handle_slow_ring_event)
967  		(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
968  		 uint32_t mask);
969  
970  	/* INIT device interface function jump table entries */
971  	int (*lpfc_sli_hbq_to_firmware)
972  		(struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
973  	int (*lpfc_sli_brdrestart)
974  		(struct lpfc_hba *);
975  	int (*lpfc_sli_brdready)
976  		(struct lpfc_hba *, uint32_t);
977  	void (*lpfc_handle_eratt)
978  		(struct lpfc_hba *);
979  	void (*lpfc_stop_port)
980  		(struct lpfc_hba *);
981  	int (*lpfc_hba_init_link)
982  		(struct lpfc_hba *, uint32_t);
983  	int (*lpfc_hba_down_link)
984  		(struct lpfc_hba *, uint32_t);
985  	int (*lpfc_selective_reset)
986  		(struct lpfc_hba *);
987  
988  	int (*lpfc_bg_scsi_prep_dma_buf)
989  		(struct lpfc_hba *, struct lpfc_io_buf *);
990  
991  	/* Prep SLI WQE/IOCB jump table entries */
992  	void (*__lpfc_sli_prep_els_req_rsp)(struct lpfc_iocbq *cmdiocbq,
993  					    struct lpfc_vport *vport,
994  					    struct lpfc_dmabuf *bmp,
995  					    u16 cmd_size, u32 did, u32 elscmd,
996  					    u8 tmo, u8 expect_rsp);
997  	void (*__lpfc_sli_prep_gen_req)(struct lpfc_iocbq *cmdiocbq,
998  					struct lpfc_dmabuf *bmp, u16 rpi,
999  					u32 num_entry, u8 tmo);
1000  	void (*__lpfc_sli_prep_xmit_seq64)(struct lpfc_iocbq *cmdiocbq,
1001  					   struct lpfc_dmabuf *bmp, u16 rpi,
1002  					   u16 ox_id, u32 num_entry, u8 rctl,
1003  					   u8 last_seq, u8 cr_cx_cmd);
1004  	void (*__lpfc_sli_prep_abort_xri)(struct lpfc_iocbq *cmdiocbq,
1005  					  u16 ulp_context, u16 iotag,
1006  					  u8 ulp_class, u16 cqid, bool ia,
1007  					  bool wqec);
1008  
1009  	/* expedite pool */
1010  	struct lpfc_epd_pool epd_pool;
1011  
1012  	/* SLI4 specific HBA data structure */
1013  	struct lpfc_sli4_hba sli4_hba;
1014  
1015  	struct workqueue_struct *wq;
1016  	struct delayed_work     eq_delay_work;
1017  
1018  #define LPFC_IDLE_STAT_DELAY 1000
1019  	struct delayed_work	idle_stat_delay_work;
1020  
1021  	struct lpfc_sli sli;
1022  	uint8_t pci_dev_grp;	/* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
1023  	uint32_t sli_rev;		/* SLI2, SLI3, or SLI4 */
1024  	uint32_t sli3_options;		/* Mask of enabled SLI3 options */
1025  #define LPFC_SLI3_HBQ_ENABLED		0x01
1026  #define LPFC_SLI3_NPIV_ENABLED		0x02
1027  #define LPFC_SLI3_VPORT_TEARDOWN	0x04
1028  #define LPFC_SLI3_CRP_ENABLED		0x08
1029  #define LPFC_SLI3_BG_ENABLED		0x20
1030  #define LPFC_SLI3_DSS_ENABLED		0x40
1031  #define LPFC_SLI4_PERFH_ENABLED		0x80
1032  #define LPFC_SLI4_PHWQ_ENABLED		0x100
1033  	uint32_t iocb_cmd_size;
1034  	uint32_t iocb_rsp_size;
1035  
1036  	struct lpfc_trunk_link  trunk_link;
1037  	enum hba_state link_state;
1038  	uint32_t link_flag;	/* link state flags */
1039  #define LS_LOOPBACK_MODE      0x1	/* NPort is in Loopback mode */
1040  					/* This flag is set while issuing */
1041  					/* INIT_LINK mailbox command */
1042  #define LS_NPIV_FAB_SUPPORTED 0x2	/* Fabric supports NPIV */
1043  #define LS_IGNORE_ERATT       0x4	/* intr handler should ignore ERATT */
1044  #define LS_MDS_LINK_DOWN      0x8	/* MDS Diagnostics Link Down */
1045  #define LS_MDS_LOOPBACK       0x10	/* MDS Diagnostics Link Up (Loopback) */
1046  #define LS_CT_VEN_RPA         0x20	/* Vendor RPA sent to switch */
1047  #define LS_EXTERNAL_LOOPBACK  0x40	/* External loopback plug inserted */
1048  
1049  	unsigned long hba_flag;	/* hba generic flags */
1050  
1051  	struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
1052  	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
1053  	struct lpfc_dmabuf slim2p;
1054  
1055  	MAILBOX_t *mbox;
1056  	uint32_t *mbox_ext;
1057  	struct lpfc_mbox_ext_buf_ctx mbox_ext_buf_ctx;
1058  	uint32_t ha_copy;
1059  	struct _PCB *pcb;
1060  	struct _IOCB *IOCBs;
1061  
1062  	struct lpfc_dmabuf hbqslimp;
1063  
1064  	uint16_t pci_cfg_value;
1065  
1066  	uint8_t fc_linkspeed;	/* Link speed after last READ_LA */
1067  
1068  	uint32_t fc_eventTag;	/* event tag for link attention */
1069  	uint32_t link_events;
1070  
1071  	/* These fields used to be binfo */
1072  	uint32_t fc_pref_DID;	/* preferred D_ID */
1073  	uint8_t  fc_pref_ALPA;	/* preferred AL_PA */
1074  	uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
1075  	uint32_t fc_edtov;	/* E_D_TOV timer value */
1076  	uint32_t fc_arbtov;	/* ARB_TOV timer value */
1077  	uint32_t fc_ratov;	/* R_A_TOV timer value */
1078  	uint32_t fc_rttov;	/* R_T_TOV timer value */
1079  	uint32_t fc_altov;	/* AL_TOV timer value */
1080  	uint32_t fc_crtov;	/* C_R_TOV timer value */
1081  
1082  	struct serv_parm fc_fabparam;	/* fabric service parameters buffer */
1083  	uint8_t alpa_map[128];	/* AL_PA map from READ_LA */
1084  
1085  	uint32_t lmt;
1086  
1087  	uint32_t fc_topology;	/* link topology, from LINK INIT */
1088  	uint32_t fc_topology_changed;	/* link topology, from LINK INIT */
1089  
1090  	struct lpfc_stats fc_stat;
1091  
1092  	struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
1093  	uint32_t nport_event_cnt;	/* timestamp for nlplist entry */
1094  
1095  	uint8_t  wwnn[8];
1096  	uint8_t  wwpn[8];
1097  	uint32_t RandomData[7];
1098  	uint8_t  fcp_embed_io;
1099  	uint8_t  nvmet_support;	/* driver supports NVMET */
1100  #define LPFC_NVMET_MAX_PORTS	32
1101  	uint8_t  mds_diags_support;
1102  	uint8_t  bbcredit_support;
1103  	uint8_t  enab_exp_wqcq_pages;
1104  	u8	 nsler; /* Firmware supports FC-NVMe-2 SLER */
1105  
1106  	/* HBA Config Parameters */
1107  	uint32_t cfg_ack0;
1108  	uint32_t cfg_xri_rebalancing;
1109  	uint32_t cfg_xpsgl;
1110  	uint32_t cfg_enable_npiv;
1111  	uint32_t cfg_enable_rrq;
1112  	uint32_t cfg_topology;
1113  	uint32_t cfg_link_speed;
1114  #define LPFC_FCF_FOV 1		/* Fast fcf failover */
1115  #define LPFC_FCF_PRIORITY 2	/* Priority fcf failover */
1116  	uint32_t cfg_fcf_failover_policy;
1117  	uint32_t cfg_fcp_io_sched;
1118  	uint32_t cfg_ns_query;
1119  	uint32_t cfg_fcp2_no_tgt_reset;
1120  	uint32_t cfg_cr_delay;
1121  	uint32_t cfg_cr_count;
1122  	uint32_t cfg_multi_ring_support;
1123  	uint32_t cfg_multi_ring_rctl;
1124  	uint32_t cfg_multi_ring_type;
1125  	uint32_t cfg_poll;
1126  	uint32_t cfg_poll_tmo;
1127  	uint32_t cfg_task_mgmt_tmo;
1128  	uint32_t cfg_use_msi;
1129  	uint32_t cfg_auto_imax;
1130  	uint32_t cfg_fcp_imax;
1131  	uint32_t cfg_force_rscn;
1132  	uint32_t cfg_cq_poll_threshold;
1133  	uint32_t cfg_cq_max_proc_limit;
1134  	uint32_t cfg_fcp_cpu_map;
1135  	uint32_t cfg_fcp_mq_threshold;
1136  	uint32_t cfg_hdw_queue;
1137  	uint32_t cfg_irq_chann;
1138  	uint32_t cfg_suppress_rsp;
1139  	uint32_t cfg_nvme_oas;
1140  	uint32_t cfg_nvme_embed_cmd;
1141  	uint32_t cfg_nvmet_mrq_post;
1142  	uint32_t cfg_nvmet_mrq;
1143  	uint32_t cfg_enable_nvmet;
1144  	uint32_t cfg_nvme_enable_fb;
1145  	uint32_t cfg_nvmet_fb_size;
1146  	uint32_t cfg_total_seg_cnt;
1147  	uint32_t cfg_sg_seg_cnt;
1148  	uint32_t cfg_nvme_seg_cnt;
1149  	uint32_t cfg_scsi_seg_cnt;
1150  	uint32_t cfg_sg_dma_buf_size;
1151  	uint32_t cfg_hba_queue_depth;
1152  	uint32_t cfg_enable_hba_reset;
1153  	uint32_t cfg_enable_hba_heartbeat;
1154  	uint32_t cfg_fof;
1155  	uint32_t cfg_EnableXLane;
1156  	uint8_t cfg_oas_tgt_wwpn[8];
1157  	uint8_t cfg_oas_vpt_wwpn[8];
1158  	uint32_t cfg_oas_lun_state;
1159  #define OAS_LUN_ENABLE	1
1160  #define OAS_LUN_DISABLE	0
1161  	uint32_t cfg_oas_lun_status;
1162  #define OAS_LUN_STATUS_EXISTS	0x01
1163  	uint32_t cfg_oas_flags;
1164  #define OAS_FIND_ANY_VPORT	0x01
1165  #define OAS_FIND_ANY_TARGET	0x02
1166  #define OAS_LUN_VALID	0x04
1167  	uint32_t cfg_oas_priority;
1168  	uint32_t cfg_XLanePriority;
1169  	uint32_t cfg_enable_bg;
1170  	uint32_t cfg_prot_mask;
1171  	uint32_t cfg_prot_guard;
1172  	uint32_t cfg_hostmem_hgp;
1173  	uint32_t cfg_log_verbose;
1174  	uint32_t cfg_enable_fc4_type;
1175  #define LPFC_ENABLE_FCP  1
1176  #define LPFC_ENABLE_NVME 2
1177  #define LPFC_ENABLE_BOTH 3
1178  #if (IS_ENABLED(CONFIG_NVME_FC))
1179  #define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
1180  #define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
1181  #else
1182  #define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
1183  #define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
1184  #endif
1185  	uint32_t cfg_sriov_nr_virtfn;
1186  	uint32_t cfg_request_firmware_upgrade;
1187  	uint32_t cfg_suppress_link_up;
1188  	uint32_t cfg_rrq_xri_bitmap_sz;
1189  	u32      cfg_fcp_wait_abts_rsp;
1190  	uint32_t cfg_delay_discovery;
1191  	uint32_t cfg_sli_mode;
1192  #define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
1193  #define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
1194  #define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
1195  	uint32_t cfg_fdmi_on;
1196  #define LPFC_FDMI_NO_SUPPORT	0	/* FDMI not supported */
1197  #define LPFC_FDMI_SUPPORT	1	/* FDMI supported? */
1198  	uint32_t cfg_enable_SmartSAN;
1199  	uint32_t cfg_enable_mds_diags;
1200  	uint32_t cfg_ras_fwlog_level;
1201  	uint32_t cfg_ras_fwlog_buffsize;
1202  	uint32_t cfg_ras_fwlog_func;
1203  	uint32_t cfg_enable_bbcr;	/* Enable BB Credit Recovery */
1204  	uint32_t cfg_enable_dpp;	/* Enable Direct Packet Push */
1205  	uint32_t cfg_enable_pbde;
1206  	uint32_t cfg_enable_mi;
1207  	struct nvmet_fc_target_port *targetport;
1208  	lpfc_vpd_t vpd;		/* vital product data */
1209  
1210  	u32 cfg_max_vmid;	/* maximum VMIDs allowed per port */
1211  	u32 cfg_vmid_app_header;
1212  #define LPFC_VMID_APP_HEADER_DISABLE	0
1213  #define LPFC_VMID_APP_HEADER_ENABLE	1
1214  	u32 cfg_vmid_priority_tagging;
1215  	u32 cfg_vmid_inactivity_timeout;	/* Time after which the VMID */
1216  						/* deregisters from switch */
1217  	struct pci_dev *pcidev;
1218  	struct list_head      work_list;
1219  	uint32_t              work_ha;      /* Host Attention Bits for WT */
1220  	uint32_t              work_ha_mask; /* HA Bits owned by WT        */
1221  	uint32_t              work_hs;      /* HS stored in case of ERRAT */
1222  	uint32_t              work_status[2]; /* Extra status from SLIM */
1223  
1224  	wait_queue_head_t    work_waitq;
1225  	struct task_struct   *worker_thread;
1226  	unsigned long data_flags;
1227  	uint32_t border_sge_num;
1228  
1229  	uint32_t hbq_in_use;		/* HBQs in use flag */
1230  	uint32_t hbq_count;	        /* Count of configured HBQs */
1231  	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
1232  
1233  	atomic_t fcp_qidx;         /* next FCP WQ (RR Policy) */
1234  	atomic_t nvme_qidx;        /* next NVME WQ (RR Policy) */
1235  
1236  	phys_addr_t pci_bar0_map;     /* Physical address for PCI BAR0 */
1237  	phys_addr_t pci_bar1_map;     /* Physical address for PCI BAR1 */
1238  	phys_addr_t pci_bar2_map;     /* Physical address for PCI BAR2 */
1239  	void __iomem *slim_memmap_p;	/* Kernel memory mapped address for
1240  					   PCI BAR0 */
1241  	void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
1242  					    PCI BAR2 */
1243  
1244  	void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
1245  					    PCI BAR0 with dual-ULP support */
1246  	void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
1247  					    PCI BAR2 with dual-ULP support */
1248  	void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
1249  					    PCI BAR4 with dual-ULP support */
1250  #define PCI_64BIT_BAR0	0
1251  #define PCI_64BIT_BAR2	2
1252  #define PCI_64BIT_BAR4	4
1253  	void __iomem *MBslimaddr;	/* virtual address for mbox cmds */
1254  	void __iomem *HAregaddr;	/* virtual address for host attn reg */
1255  	void __iomem *CAregaddr;	/* virtual address for chip attn reg */
1256  	void __iomem *HSregaddr;	/* virtual address for host status
1257  					   reg */
1258  	void __iomem *HCregaddr;	/* virtual address for host ctl reg */
1259  
1260  	struct lpfc_hgp __iomem *host_gp; /* Host side get/put pointers */
1261  	struct lpfc_pgp   *port_gp;
1262  	uint32_t __iomem  *hbq_put;     /* Address in SLIM to HBQ put ptrs */
1263  	uint32_t          *hbq_get;     /* Host mem address of HBQ get ptrs */
1264  
1265  	int brd_no;			/* FC board number */
1266  	char SerialNumber[32];		/* adapter Serial Number */
1267  	char OptionROMVersion[32];	/* adapter BIOS / Fcode version */
1268  	char BIOSVersion[16];		/* Boot BIOS version */
1269  	char ModelDesc[256];		/* Model Description */
1270  	char ModelName[80];		/* Model Name */
1271  	char ProgramType[256];		/* Program Type */
1272  	char Port[20];			/* Port No */
1273  	uint8_t vpd_flag;               /* VPD data flag */
1274  
1275  #define VPD_MODEL_DESC      0x1         /* valid vpd model description */
1276  #define VPD_MODEL_NAME      0x2         /* valid vpd model name */
1277  #define VPD_PROGRAM_TYPE    0x4         /* valid vpd program type */
1278  #define VPD_PORT            0x8         /* valid vpd port data */
1279  #define VPD_MASK            0xf         /* mask for any vpd data */
1280  
1281  
1282  	struct timer_list fcp_poll_timer;
1283  	struct timer_list eratt_poll;
1284  	uint32_t eratt_poll_interval;
1285  
1286  	uint64_t bg_guard_err_cnt;
1287  	uint64_t bg_apptag_err_cnt;
1288  	uint64_t bg_reftag_err_cnt;
1289  
1290  	/* fastpath list. */
1291  	spinlock_t scsi_buf_list_get_lock;  /* SCSI buf alloc list lock */
1292  	spinlock_t scsi_buf_list_put_lock;  /* SCSI buf free list lock */
1293  	struct list_head lpfc_scsi_buf_list_get;
1294  	struct list_head lpfc_scsi_buf_list_put;
1295  	uint32_t total_scsi_bufs;
1296  	struct list_head lpfc_iocb_list;
1297  	uint32_t total_iocbq_bufs;
1298  	spinlock_t rrq_list_lock;       /* lock for active_rrq_list */
1299  	struct list_head active_rrq_list;
1300  	spinlock_t hbalock;
1301  	struct work_struct  unblock_request_work; /* SCSI layer unblock IOs */
1302  
1303  	/* dma_mem_pools */
1304  	struct dma_pool *lpfc_sg_dma_buf_pool;
1305  	struct dma_pool *lpfc_mbuf_pool;
1306  	struct dma_pool *lpfc_hrb_pool;	/* header receive buffer pool */
1307  	struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */
1308  	struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */
1309  	struct dma_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
1310  	struct dma_pool *lpfc_cmd_rsp_buf_pool;
1311  	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
1312  
1313  	mempool_t *mbox_mem_pool;
1314  	mempool_t *nlp_mem_pool;
1315  	mempool_t *rrq_pool;
1316  	mempool_t *active_rrq_pool;
1317  
1318  	struct fc_host_statistics link_stats;
1319  	enum lpfc_irq_chann_mode irq_chann_mode;
1320  	enum intr_type_t intr_type;
1321  	uint32_t intr_mode;
1322  #define LPFC_INTR_ERROR	0xFFFFFFFF
1323  	struct list_head port_list;
1324  	spinlock_t port_list_lock;	/* lock for port_list mutations */
1325  	struct lpfc_vport *pport;	/* physical lpfc_vport pointer */
1326  	uint16_t max_vpi;		/* Maximum virtual nports */
1327  #define LPFC_MAX_VPI	0xFF		/* Max number VPI supported 0 - 0xff */
1328  #define LPFC_MAX_VPORTS	0x100		/* Max vports per port, with pport */
1329  	uint16_t max_vports;            /*
1330  					 * For IOV HBAs max_vpi can change
1331  					 * after a reset. max_vports is max
1332  					 * number of vports present. This can
1333  					 * be greater than max_vpi.
1334  					 */
1335  	uint16_t vpi_base;
1336  	uint16_t vfi_base;
1337  	unsigned long *vpi_bmask;	/* vpi allocation table */
1338  	uint16_t *vpi_ids;
1339  	uint16_t vpi_count;
1340  	struct list_head lpfc_vpi_blk_list;
1341  
1342  	/* Data structure used by fabric iocb scheduler */
1343  	struct list_head fabric_iocb_list;
1344  	atomic_t fabric_iocb_count;
1345  	struct timer_list fabric_block_timer;
1346  	unsigned long bit_flags;
1347  	atomic_t num_rsrc_err;
1348  	unsigned long last_rsrc_error_time;
1349  	unsigned long last_ramp_down_time;
1350  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1351  	struct dentry *hba_debugfs_root;
1352  	atomic_t debugfs_vport_count;
1353  	struct dentry *debug_multixri_pools;
1354  	struct dentry *debug_hbqinfo;
1355  	struct dentry *debug_dumpHostSlim;
1356  	struct dentry *debug_dumpHBASlim;
1357  	struct dentry *debug_InjErrLBA;  /* LBA to inject errors at */
1358  	struct dentry *debug_InjErrNPortID;  /* NPortID to inject errors at */
1359  	struct dentry *debug_InjErrWWPN;  /* WWPN to inject errors at */
1360  	struct dentry *debug_writeGuard; /* inject write guard_tag errors */
1361  	struct dentry *debug_writeApp;   /* inject write app_tag errors */
1362  	struct dentry *debug_writeRef;   /* inject write ref_tag errors */
1363  	struct dentry *debug_readGuard;  /* inject read guard_tag errors */
1364  	struct dentry *debug_readApp;    /* inject read app_tag errors */
1365  	struct dentry *debug_readRef;    /* inject read ref_tag errors */
1366  
1367  	struct dentry *debug_nvmeio_trc;
1368  	struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
1369  	struct dentry *debug_hdwqinfo;
1370  #ifdef LPFC_HDWQ_LOCK_STAT
1371  	struct dentry *debug_lockstat;
1372  #endif
1373  	struct dentry *debug_cgn_buffer;
1374  	struct dentry *debug_rx_monitor;
1375  	struct dentry *debug_ras_log;
1376  	atomic_t nvmeio_trc_cnt;
1377  	uint32_t nvmeio_trc_size;
1378  	uint32_t nvmeio_trc_output_idx;
1379  
1380  	/* T10 DIF error injection */
1381  	uint32_t lpfc_injerr_wgrd_cnt;
1382  	uint32_t lpfc_injerr_wapp_cnt;
1383  	uint32_t lpfc_injerr_wref_cnt;
1384  	uint32_t lpfc_injerr_rgrd_cnt;
1385  	uint32_t lpfc_injerr_rapp_cnt;
1386  	uint32_t lpfc_injerr_rref_cnt;
1387  	uint32_t lpfc_injerr_nportid;
1388  	struct lpfc_name lpfc_injerr_wwpn;
1389  	sector_t lpfc_injerr_lba;
1390  #define LPFC_INJERR_LBA_OFF	(sector_t)(-1)
1391  
1392  	struct dentry *debug_slow_ring_trc;
1393  	struct lpfc_debugfs_trc *slow_ring_trc;
1394  	atomic_t slow_ring_trc_cnt;
1395  	/* iDiag debugfs sub-directory */
1396  	struct dentry *idiag_root;
1397  	struct dentry *idiag_pci_cfg;
1398  	struct dentry *idiag_bar_acc;
1399  	struct dentry *idiag_que_info;
1400  	struct dentry *idiag_que_acc;
1401  	struct dentry *idiag_drb_acc;
1402  	struct dentry *idiag_ctl_acc;
1403  	struct dentry *idiag_mbx_acc;
1404  	struct dentry *idiag_ext_acc;
1405  	uint8_t lpfc_idiag_last_eq;
1406  #endif
1407  	uint16_t nvmeio_trc_on;
1408  
1409  	/* Used for deferred freeing of ELS data buffers */
1410  	struct list_head elsbuf;
1411  	int elsbuf_cnt;
1412  	int elsbuf_prev_cnt;
1413  
1414  	uint8_t temp_sensor_support;
1415  	/* Fields used for heart beat. */
1416  	unsigned long last_completion_time;
1417  	unsigned long skipped_hb;
1418  	struct timer_list hb_tmofunc;
1419  	struct timer_list rrq_tmr;
1420  	enum hba_temp_state over_temp_state;
1421  	/*
1422  	 * Following bit will be set for all buffer tags which are not
1423  	 * associated with any HBQ.
1424  	 */
1425  #define QUE_BUFTAG_BIT  (1<<31)
1426  	uint32_t buffer_tag_count;
1427  
1428  /* Maximum number of events that can be outstanding at any time*/
1429  #define LPFC_MAX_EVT_COUNT 512
1430  	atomic_t fast_event_count;
1431  	uint32_t fcoe_eventtag;
1432  	uint32_t fcoe_eventtag_at_fcf_scan;
1433  	uint32_t fcoe_cvl_eventtag;
1434  	uint32_t fcoe_cvl_eventtag_attn;
1435  	struct lpfc_fcf fcf;
1436  	uint8_t fc_map[3];
1437  	uint8_t valid_vlan;
1438  	uint16_t vlan_id;
1439  	struct list_head fcf_conn_rec_list;
1440  
1441  	struct lpfc_defer_flogi_acc defer_flogi_acc;
1442  
1443  	spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
1444  	struct list_head ct_ev_waiters;
1445  	struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
1446  	uint32_t ctx_idx;
1447  	struct timer_list inactive_vmid_poll;
1448  
1449  	/* RAS Support */
1450  	spinlock_t ras_fwlog_lock; /* do not take while holding another lock */
1451  	struct lpfc_ras_fwlog ras_fwlog;
1452  
1453  	uint32_t iocb_cnt;
1454  	uint32_t iocb_max;
1455  	atomic_t sdev_cnt;
1456  	spinlock_t devicelock;	/* lock for luns list */
1457  	mempool_t *device_data_mem_pool;
1458  	struct list_head luns;
1459  #define LPFC_TRANSGRESSION_HIGH_TEMPERATURE	0x0080
1460  #define LPFC_TRANSGRESSION_LOW_TEMPERATURE	0x0040
1461  #define LPFC_TRANSGRESSION_HIGH_VOLTAGE		0x0020
1462  #define LPFC_TRANSGRESSION_LOW_VOLTAGE		0x0010
1463  #define LPFC_TRANSGRESSION_HIGH_TXBIAS		0x0008
1464  #define LPFC_TRANSGRESSION_LOW_TXBIAS		0x0004
1465  #define LPFC_TRANSGRESSION_HIGH_TXPOWER		0x0002
1466  #define LPFC_TRANSGRESSION_LOW_TXPOWER		0x0001
1467  #define LPFC_TRANSGRESSION_HIGH_RXPOWER		0x8000
1468  #define LPFC_TRANSGRESSION_LOW_RXPOWER		0x4000
1469  	uint16_t sfp_alarm;
1470  	uint16_t sfp_warning;
1471  
1472  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1473  	uint16_t hdwqstat_on;
1474  #define LPFC_CHECK_OFF		0
1475  #define LPFC_CHECK_NVME_IO	1
1476  #define LPFC_CHECK_NVMET_IO	2
1477  #define LPFC_CHECK_SCSI_IO	4
1478  	uint16_t ktime_on;
1479  	uint64_t ktime_data_samples;
1480  	uint64_t ktime_status_samples;
1481  	uint64_t ktime_last_cmd;
1482  	uint64_t ktime_seg1_total;
1483  	uint64_t ktime_seg1_min;
1484  	uint64_t ktime_seg1_max;
1485  	uint64_t ktime_seg2_total;
1486  	uint64_t ktime_seg2_min;
1487  	uint64_t ktime_seg2_max;
1488  	uint64_t ktime_seg3_total;
1489  	uint64_t ktime_seg3_min;
1490  	uint64_t ktime_seg3_max;
1491  	uint64_t ktime_seg4_total;
1492  	uint64_t ktime_seg4_min;
1493  	uint64_t ktime_seg4_max;
1494  	uint64_t ktime_seg5_total;
1495  	uint64_t ktime_seg5_min;
1496  	uint64_t ktime_seg5_max;
1497  	uint64_t ktime_seg6_total;
1498  	uint64_t ktime_seg6_min;
1499  	uint64_t ktime_seg6_max;
1500  	uint64_t ktime_seg7_total;
1501  	uint64_t ktime_seg7_min;
1502  	uint64_t ktime_seg7_max;
1503  	uint64_t ktime_seg8_total;
1504  	uint64_t ktime_seg8_min;
1505  	uint64_t ktime_seg8_max;
1506  	uint64_t ktime_seg9_total;
1507  	uint64_t ktime_seg9_min;
1508  	uint64_t ktime_seg9_max;
1509  	uint64_t ktime_seg10_total;
1510  	uint64_t ktime_seg10_min;
1511  	uint64_t ktime_seg10_max;
1512  #endif
1513  	/* CMF objects */
1514  	struct lpfc_cgn_stat __percpu *cmf_stat;
1515  	uint32_t cmf_interval_rate;  /* timer interval limit in ms */
1516  	uint32_t cmf_timer_cnt;
1517  #define LPFC_CMF_INTERVAL 90
1518  	uint64_t cmf_link_byte_count;
1519  	uint64_t cmf_max_line_rate;
1520  	uint64_t cmf_max_bytes_per_interval;
1521  	uint64_t cmf_last_sync_bw;
1522  #define  LPFC_CMF_BLK_SIZE 512
1523  	struct hrtimer cmf_timer;
1524  	struct hrtimer cmf_stats_timer;	/* 1 minute stats timer  */
1525  	atomic_t cmf_bw_wait;
1526  	atomic_t cmf_busy;
1527  	atomic_t cmf_stop_io;      /* To block request and stop IO's */
1528  	uint32_t cmf_active_mode;
1529  	uint32_t cmf_info_per_interval;
1530  #define LPFC_MAX_CMF_INFO 32
1531  	struct timespec64 cmf_latency;  /* Interval congestion timestamp */
1532  	uint32_t cmf_last_ts;   /* Interval congestion time (ms) */
1533  	uint32_t cmf_active_info;
1534  
1535  	/* Signal / FPIN handling for Congestion Mgmt */
1536  	u8 cgn_reg_fpin;           /* Negotiated value from RDF */
1537  	u8 cgn_init_reg_fpin;      /* Initial value from READ_CONFIG */
1538  #define LPFC_CGN_FPIN_NONE	0x0
1539  #define LPFC_CGN_FPIN_WARN	0x1
1540  #define LPFC_CGN_FPIN_ALARM	0x2
1541  #define LPFC_CGN_FPIN_BOTH	(LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM)
1542  
1543  	u8 cgn_reg_signal;          /* Negotiated value from EDC */
1544  	u8 cgn_init_reg_signal;     /* Initial value from READ_CONFIG */
1545  		/* cgn_reg_signal and cgn_init_reg_signal use
1546  		 * enum fc_edc_cg_signal_cap_types
1547  		 */
1548  	u16 cgn_fpin_frequency;		/* In units of msecs */
1549  #define LPFC_FPIN_INIT_FREQ	0xffff
1550  	u32 cgn_sig_freq;
1551  	u32 cgn_acqe_cnt;
1552  
1553  	/* RX monitor handling for CMF */
1554  	struct lpfc_rx_info_monitor *rx_monitor;
1555  	atomic_t rx_max_read_cnt;       /* Maximum read bytes */
1556  	uint64_t rx_block_cnt;
1557  
1558  	/* Congestion parameters from flash */
1559  	struct lpfc_cgn_param cgn_p;
1560  
1561  	/* Statistics counter for ACQE cgn alarms and warnings */
1562  	struct lpfc_cgn_acqe_stat cgn_acqe_stat;
1563  
1564  	/* Congestion buffer information */
1565  	struct lpfc_dmabuf *cgn_i;      /* Congestion Info buffer */
1566  	atomic_t cgn_fabric_warn_cnt;   /* Total warning cgn events for info */
1567  	atomic_t cgn_fabric_alarm_cnt;  /* Total alarm cgn events for info */
1568  	atomic_t cgn_sync_warn_cnt;     /* Total warning events for SYNC wqe */
1569  	atomic_t cgn_sync_alarm_cnt;    /* Total alarm events for SYNC wqe */
1570  	atomic_t cgn_driver_evt_cnt;    /* Total driver cgn events for fmw */
1571  	atomic_t cgn_latency_evt_cnt;
1572  	atomic64_t cgn_latency_evt;     /* Avg latency per minute */
1573  	unsigned long cgn_evt_timestamp;
1574  #define LPFC_CGN_TIMER_TO_MIN   60000 /* ms in a minute */
1575  	uint32_t cgn_evt_minute;
1576  #define LPFC_SEC_MIN		60UL
1577  #define LPFC_MIN_HOUR		60
1578  #define LPFC_HOUR_DAY		24
1579  #define LPFC_MIN_DAY		(LPFC_MIN_HOUR * LPFC_HOUR_DAY)
1580  
1581  	struct hlist_node cpuhp;	/* used for cpuhp per hba callback */
1582  	struct timer_list cpuhp_poll_timer;
1583  	struct list_head poll_list;	/* slowpath eq polling list */
1584  #define LPFC_POLL_HB	1		/* slowpath heartbeat */
1585  
1586  	char os_host_name[MAXHOSTNAMELEN];
1587  
1588  	/* LD Signaling */
1589  	u32 degrade_activate_threshold;
1590  	u32 degrade_deactivate_threshold;
1591  	u32 fec_degrade_interval;
1592  
1593  	atomic_t dbg_log_idx;
1594  	atomic_t dbg_log_cnt;
1595  	atomic_t dbg_log_dmping;
1596  	struct dbg_log_ent dbg_log[DBG_LOG_SZ];
1597  };
1598  
1599  #define LPFC_MAX_RXMONITOR_ENTRY	800
1600  #define LPFC_MAX_RXMONITOR_DUMP		32
1601  struct rx_info_entry {
1602  	uint64_t cmf_bytes;	/* Total no of read bytes for CMF_SYNC_WQE */
1603  	uint64_t total_bytes;   /* Total no of read bytes requested */
1604  	uint64_t rcv_bytes;     /* Total no of read bytes completed */
1605  	uint64_t avg_io_size;
1606  	uint64_t avg_io_latency;/* Average io latency in microseconds */
1607  	uint64_t max_read_cnt;  /* Maximum read bytes */
1608  	uint64_t max_bytes_per_interval;
1609  	uint32_t cmf_busy;
1610  	uint32_t cmf_info;      /* CMF_SYNC_WQE info */
1611  	uint32_t io_cnt;
1612  	uint32_t timer_utilization;
1613  	uint32_t timer_interval;
1614  };
1615  
1616  struct lpfc_rx_info_monitor {
1617  	struct rx_info_entry *ring; /* info organized in a circular buffer */
1618  	u32 head_idx, tail_idx; /* index to head/tail of ring */
1619  	spinlock_t lock; /* spinlock for ring */
1620  	u32 entries; /* storing number entries/size of ring */
1621  };
1622  
1623  static inline struct Scsi_Host *
lpfc_shost_from_vport(struct lpfc_vport * vport)1624  lpfc_shost_from_vport(struct lpfc_vport *vport)
1625  {
1626  	return container_of((void *) vport, struct Scsi_Host, hostdata[0]);
1627  }
1628  
1629  static inline void
lpfc_set_loopback_flag(struct lpfc_hba * phba)1630  lpfc_set_loopback_flag(struct lpfc_hba *phba)
1631  {
1632  	if (phba->cfg_topology == FLAGS_LOCAL_LB)
1633  		phba->link_flag |= LS_LOOPBACK_MODE;
1634  	else
1635  		phba->link_flag &= ~LS_LOOPBACK_MODE;
1636  }
1637  
1638  static inline int
lpfc_is_link_up(struct lpfc_hba * phba)1639  lpfc_is_link_up(struct lpfc_hba *phba)
1640  {
1641  	return  phba->link_state == LPFC_LINK_UP ||
1642  		phba->link_state == LPFC_CLEAR_LA ||
1643  		phba->link_state == LPFC_HBA_READY;
1644  }
1645  
1646  static inline void
lpfc_worker_wake_up(struct lpfc_hba * phba)1647  lpfc_worker_wake_up(struct lpfc_hba *phba)
1648  {
1649  	/* Set the lpfc data pending flag */
1650  	set_bit(LPFC_DATA_READY, &phba->data_flags);
1651  
1652  	/* Wake up worker thread */
1653  	wake_up(&phba->work_waitq);
1654  	return;
1655  }
1656  
1657  static inline int
lpfc_readl(void __iomem * addr,uint32_t * data)1658  lpfc_readl(void __iomem *addr, uint32_t *data)
1659  {
1660  	uint32_t temp;
1661  	temp = readl(addr);
1662  	if (temp == 0xffffffff)
1663  		return -EIO;
1664  	*data = temp;
1665  	return 0;
1666  }
1667  
1668  static inline int
lpfc_sli_read_hs(struct lpfc_hba * phba)1669  lpfc_sli_read_hs(struct lpfc_hba *phba)
1670  {
1671  	/*
1672  	 * There was a link/board error. Read the status register to retrieve
1673  	 * the error event and process it.
1674  	 */
1675  	phba->sli.slistat.err_attn_event++;
1676  
1677  	/* Save status info and check for unplug error */
1678  	if (lpfc_readl(phba->HSregaddr, &phba->work_hs) ||
1679  		lpfc_readl(phba->MBslimaddr + 0xa8, &phba->work_status[0]) ||
1680  		lpfc_readl(phba->MBslimaddr + 0xac, &phba->work_status[1])) {
1681  		return -EIO;
1682  	}
1683  
1684  	/* Clear chip Host Attention error bit */
1685  	writel(HA_ERATT, phba->HAregaddr);
1686  	readl(phba->HAregaddr); /* flush */
1687  	phba->pport->stopped = 1;
1688  
1689  	return 0;
1690  }
1691  
1692  static inline struct lpfc_sli_ring *
lpfc_phba_elsring(struct lpfc_hba * phba)1693  lpfc_phba_elsring(struct lpfc_hba *phba)
1694  {
1695  	/* Return NULL if sli_rev has become invalid due to bad fw */
1696  	if (phba->sli_rev != LPFC_SLI_REV4  &&
1697  	    phba->sli_rev != LPFC_SLI_REV3  &&
1698  	    phba->sli_rev != LPFC_SLI_REV2)
1699  		return NULL;
1700  
1701  	if (phba->sli_rev == LPFC_SLI_REV4) {
1702  		if (phba->sli4_hba.els_wq)
1703  			return phba->sli4_hba.els_wq->pring;
1704  		else
1705  			return NULL;
1706  	}
1707  	return &phba->sli.sli3_ring[LPFC_ELS_RING];
1708  }
1709  
1710  /**
1711   * lpfc_next_online_cpu - Finds next online CPU on cpumask
1712   * @mask: Pointer to phba's cpumask member.
1713   * @start: starting cpu index
1714   *
1715   * Note: If no valid cpu found, then nr_cpu_ids is returned.
1716   *
1717   **/
1718  static inline unsigned int
lpfc_next_online_cpu(const struct cpumask * mask,unsigned int start)1719  lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
1720  {
1721  	unsigned int cpu_it;
1722  
1723  	for_each_cpu_wrap(cpu_it, mask, start) {
1724  		if (cpu_online(cpu_it))
1725  			break;
1726  	}
1727  
1728  	return cpu_it;
1729  }
1730  /**
1731   * lpfc_next_present_cpu - Finds next present CPU after n
1732   * @n: the cpu prior to search
1733   *
1734   * Note: If no next present cpu, then fallback to first present cpu.
1735   *
1736   **/
lpfc_next_present_cpu(int n)1737  static inline unsigned int lpfc_next_present_cpu(int n)
1738  {
1739  	unsigned int cpu;
1740  
1741  	cpu = cpumask_next(n, cpu_present_mask);
1742  
1743  	if (cpu >= nr_cpu_ids)
1744  		cpu = cpumask_first(cpu_present_mask);
1745  
1746  	return cpu;
1747  }
1748  
1749  /**
1750   * lpfc_sli4_mod_hba_eq_delay - update EQ delay
1751   * @phba: Pointer to HBA context object.
1752   * @q: The Event Queue to update.
1753   * @delay: The delay value (in us) to be written.
1754   *
1755   **/
1756  static inline void
lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba * phba,struct lpfc_queue * eq,u32 delay)1757  lpfc_sli4_mod_hba_eq_delay(struct lpfc_hba *phba, struct lpfc_queue *eq,
1758  			   u32 delay)
1759  {
1760  	struct lpfc_register reg_data;
1761  
1762  	reg_data.word0 = 0;
1763  	bf_set(lpfc_sliport_eqdelay_id, &reg_data, eq->queue_id);
1764  	bf_set(lpfc_sliport_eqdelay_delay, &reg_data, delay);
1765  	writel(reg_data.word0, phba->sli4_hba.u.if_type2.EQDregaddr);
1766  	eq->q_mode = delay;
1767  }
1768  
1769  
1770  /*
1771   * Macro that declares tables and a routine to perform enum type to
1772   * ascii string lookup.
1773   *
1774   * Defines a <key,value> table for an enum. Uses xxx_INIT defines for
1775   * the enum to populate the table.  Macro defines a routine (named
1776   * by caller) that will search all elements of the table for the key
1777   * and return the name string if found or "Unrecognized" if not found.
1778   */
1779  #define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init)		\
1780  static struct {								\
1781  	enum enum_name		value;					\
1782  	char			*name;					\
1783  } fc_##enum_name##_e2str_names[] = enum_init;				\
1784  static const char *routine(enum enum_name table_key)			\
1785  {									\
1786  	int i;								\
1787  	char *name = "Unrecognized";					\
1788  									\
1789  	for (i = 0; i < ARRAY_SIZE(fc_##enum_name##_e2str_names); i++) {\
1790  		if (fc_##enum_name##_e2str_names[i].value == table_key) {\
1791  			name = fc_##enum_name##_e2str_names[i].name;	\
1792  			break;						\
1793  		}							\
1794  	}								\
1795  	return name;							\
1796  }
1797  
1798  /**
1799   * lpfc_is_vmid_enabled - returns if VMID is enabled for either switch types
1800   * @phba: Pointer to HBA context object.
1801   *
1802   * Relationship between the enable, target support and if vmid tag is required
1803   * for the particular combination
1804   * ---------------------------------------------------
1805   * Switch    Enable Flag  Target Support  VMID Needed
1806   * ---------------------------------------------------
1807   * App Id     0              NA              N
1808   * App Id     1               0              N
1809   * App Id     1               1              Y
1810   * Pr Tag     0              NA              N
1811   * Pr Tag     1               0              N
1812   * Pr Tag     1               1              Y
1813   * Pr Tag     2               *              Y
1814   ---------------------------------------------------
1815   *
1816   **/
lpfc_is_vmid_enabled(struct lpfc_hba * phba)1817  static inline int lpfc_is_vmid_enabled(struct lpfc_hba *phba)
1818  {
1819  	return phba->cfg_vmid_app_header || phba->cfg_vmid_priority_tagging;
1820  }
1821  
1822  static inline
get_job_ulpstatus(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1823  u8 get_job_ulpstatus(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1824  {
1825  	if (phba->sli_rev == LPFC_SLI_REV4)
1826  		return bf_get(lpfc_wcqe_c_status, &iocbq->wcqe_cmpl);
1827  	else
1828  		return iocbq->iocb.ulpStatus;
1829  }
1830  
1831  static inline
get_job_word4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1832  u32 get_job_word4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1833  {
1834  	if (phba->sli_rev == LPFC_SLI_REV4)
1835  		return iocbq->wcqe_cmpl.parameter;
1836  	else
1837  		return iocbq->iocb.un.ulpWord[4];
1838  }
1839  
1840  static inline
get_job_cmnd(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1841  u8 get_job_cmnd(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1842  {
1843  	if (phba->sli_rev == LPFC_SLI_REV4)
1844  		return bf_get(wqe_cmnd, &iocbq->wqe.generic.wqe_com);
1845  	else
1846  		return iocbq->iocb.ulpCommand;
1847  }
1848  
1849  static inline
get_job_ulpcontext(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1850  u16 get_job_ulpcontext(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1851  {
1852  	if (phba->sli_rev == LPFC_SLI_REV4)
1853  		return bf_get(wqe_ctxt_tag, &iocbq->wqe.generic.wqe_com);
1854  	else
1855  		return iocbq->iocb.ulpContext;
1856  }
1857  
1858  static inline
get_job_rcvoxid(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1859  u16 get_job_rcvoxid(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1860  {
1861  	if (phba->sli_rev == LPFC_SLI_REV4)
1862  		return bf_get(wqe_rcvoxid, &iocbq->wqe.generic.wqe_com);
1863  	else
1864  		return iocbq->iocb.unsli3.rcvsli3.ox_id;
1865  }
1866  
1867  static inline
get_job_data_placed(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1868  u32 get_job_data_placed(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1869  {
1870  	if (phba->sli_rev == LPFC_SLI_REV4)
1871  		return iocbq->wcqe_cmpl.total_data_placed;
1872  	else
1873  		return iocbq->iocb.un.genreq64.bdl.bdeSize;
1874  }
1875  
1876  static inline
get_job_abtsiotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1877  u32 get_job_abtsiotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1878  {
1879  	if (phba->sli_rev == LPFC_SLI_REV4)
1880  		return iocbq->wqe.abort_cmd.wqe_com.abort_tag;
1881  	else
1882  		return iocbq->iocb.un.acxri.abortIoTag;
1883  }
1884  
1885  static inline
get_job_els_rsp64_did(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1886  u32 get_job_els_rsp64_did(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1887  {
1888  	if (phba->sli_rev == LPFC_SLI_REV4)
1889  		return bf_get(wqe_els_did, &iocbq->wqe.els_req.wqe_dest);
1890  	else
1891  		return iocbq->iocb.un.elsreq64.remoteID;
1892  }
1893