1  /*******************************************************************
2   * This file is part of the Emulex Linux Device Driver for         *
3   * Fibre Channel Host Bus Adapters.                                *
4   * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7   * EMULEX and SLI are trademarks of Emulex.                        *
8   * www.broadcom.com                                                *
9   *                                                                 *
10   * This program is free software; you can redistribute it and/or   *
11   * modify it under the terms of version 2 of the GNU General       *
12   * Public License as published by the Free Software Foundation.    *
13   * This program is distributed in the hope that it will be useful. *
14   * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15   * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16   * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17   * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18   * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19   * more details, a copy of which can be found in the file COPYING  *
20   * included with this package.                                     *
21   *******************************************************************/
22  
23  #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_SCSI_LPFC_DEBUG_FS)
24  #define CONFIG_SCSI_LPFC_DEBUG_FS
25  #endif
26  
27  /* forward declaration for LPFC_IOCB_t's use */
28  struct lpfc_hba;
29  struct lpfc_vport;
30  
31  /* Define the context types that SLI handles for abort and sums. */
32  typedef enum _lpfc_ctx_cmd {
33  	LPFC_CTX_LUN,
34  	LPFC_CTX_TGT,
35  	LPFC_CTX_HOST
36  } lpfc_ctx_cmd;
37  
38  /* Enumeration to describe the thread lock context. */
39  enum lpfc_mbox_ctx {
40  	MBOX_THD_UNLOCKED,
41  	MBOX_THD_LOCKED
42  };
43  
44  union lpfc_vmid_tag {
45  	uint32_t app_id;
46  	uint8_t cs_ctl_vmid;
47  	struct lpfc_vmid_context *vmid_context;	/* UVEM context information */
48  };
49  
50  struct lpfc_cq_event {
51  	struct list_head list;
52  	uint16_t hdwq;
53  	union {
54  		struct lpfc_mcqe		mcqe_cmpl;
55  		struct lpfc_acqe_link		acqe_link;
56  		struct lpfc_acqe_fip		acqe_fip;
57  		struct lpfc_acqe_dcbx		acqe_dcbx;
58  		struct lpfc_acqe_grp5		acqe_grp5;
59  		struct lpfc_acqe_fc_la		acqe_fc;
60  		struct lpfc_acqe_sli		acqe_sli;
61  		struct lpfc_rcqe		rcqe_cmpl;
62  		struct sli4_wcqe_xri_aborted	wcqe_axri;
63  		struct lpfc_wcqe_complete	wcqe_cmpl;
64  	} cqe;
65  };
66  
67  /* This structure is used to handle IOCB requests / responses */
68  struct lpfc_iocbq {
69  	/* lpfc_iocbqs are used in double linked lists */
70  	struct list_head list;
71  	struct list_head clist;
72  	struct list_head dlist;
73  	uint16_t iotag;         /* pre-assigned IO tag */
74  	uint16_t sli4_lxritag;  /* logical pre-assigned XRI. */
75  	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
76  	uint16_t hba_wqidx;     /* index to HBA work queue */
77  	struct lpfc_cq_event cq_event;
78  	uint64_t isr_timestamp;
79  
80  	union lpfc_wqe128 wqe;	/* SLI-4 */
81  	IOCB_t iocb;		/* SLI-3 */
82  	struct lpfc_wcqe_complete wcqe_cmpl;	/* WQE cmpl */
83  
84  	u32 unsol_rcv_len;	/* Receive len in usol path */
85  
86  	/* Pack the u8's together and make them module-4. */
87  	u8 num_bdes;	/* Number of BDEs */
88  	u8 abort_bls;	/* ABTS by initiator or responder */
89  	u8 abort_rctl;	/* ACC or RJT flag */
90  	u8 priority;	/* OAS priority */
91  	u8 retry;	/* retry counter for IOCB cmd - if needed */
92  	u8 rsvd1;       /* Pad for u32 */
93  	u8 rsvd2;       /* Pad for u32 */
94  	u8 rsvd3;	/* Pad for u32 */
95  
96  	u32 cmd_flag;
97  #define LPFC_IO_LIBDFC		1	/* libdfc iocb */
98  #define LPFC_IO_WAKE		2	/* Synchronous I/O completed */
99  #define LPFC_IO_WAKE_TMO	LPFC_IO_WAKE /* Synchronous I/O timed out */
100  #define LPFC_IO_FCP		4	/* FCP command -- iocbq in scsi_buf */
101  #define LPFC_DRIVER_ABORTED	8	/* driver aborted this request */
102  #define LPFC_IO_FABRIC		0x10	/* Iocb send using fabric scheduler */
103  #define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
104  #define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
105  #define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
106  #define DSS_SECURITY_OP		0x100	/* security IO */
107  #define LPFC_IO_ON_TXCMPLQ	0x200	/* The IO is still on the TXCMPLQ */
108  #define LPFC_IO_DIF_PASS	0x400	/* T10 DIF IO pass-thru prot */
109  #define LPFC_IO_DIF_STRIP	0x800	/* T10 DIF IO strip prot */
110  #define LPFC_IO_DIF_INSERT	0x1000	/* T10 DIF IO insert prot */
111  #define LPFC_IO_CMD_OUTSTANDING	0x2000 /* timeout handler abort window */
112  
113  #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
114  #define LPFC_FIP_ELS_ID_SHIFT	14
115  
116  #define LPFC_IO_OAS		0x10000 /* OAS FCP IO */
117  #define LPFC_IO_FOF		0x20000 /* FOF FCP IO */
118  #define LPFC_IO_LOOPBACK	0x40000 /* Loopback IO */
119  #define LPFC_PRLI_NVME_REQ	0x80000 /* This is an NVME PRLI. */
120  #define LPFC_PRLI_FCP_REQ	0x100000 /* This is an NVME PRLI. */
121  #define LPFC_IO_NVME	        0x200000 /* NVME FCP command */
122  #define LPFC_IO_NVME_LS		0x400000 /* NVME LS command */
123  #define LPFC_IO_NVMET		0x800000 /* NVMET command */
124  #define LPFC_IO_VMID            0x1000000 /* VMID tagged IO */
125  #define LPFC_IO_CMF		0x4000000 /* CMF command */
126  
127  	uint32_t drvrTimeout;	/* driver timeout in seconds */
128  	struct lpfc_vport *vport;/* virtual port pointer */
129  	struct lpfc_dmabuf *cmd_dmabuf;
130  	struct lpfc_dmabuf *rsp_dmabuf;
131  	struct lpfc_dmabuf *bpl_dmabuf;
132  	uint32_t event_tag;	/* LA Event tag */
133  	union {
134  		wait_queue_head_t    *wait_queue;
135  		struct lpfcMboxq     *mbox;
136  		struct lpfc_node_rrq *rrq;
137  		struct nvmefc_ls_req *nvme_lsreq;
138  		struct lpfc_async_xchg_ctx *axchg;
139  		struct bsg_job_data *dd_data;
140  	} context_un;
141  
142  	struct lpfc_io_buf *io_buf;
143  	struct lpfc_iocbq *rsp_iocb;
144  	struct lpfc_nodelist *ndlp;
145  	union lpfc_vmid_tag vmid_tag;
146  	void (*fabric_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
147  				struct lpfc_iocbq *rsp);
148  	void (*wait_cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
149  			      struct lpfc_iocbq *rsp);
150  	void (*cmd_cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmd,
151  			 struct lpfc_iocbq *rsp);
152  };
153  
154  #define SLI_IOCB_RET_IOCB      1	/* Return IOCB if cmd ring full */
155  
156  #define IOCB_SUCCESS        0
157  #define IOCB_BUSY           1
158  #define IOCB_ERROR          2
159  #define IOCB_TIMEDOUT       3
160  #define IOCB_ABORTED        4
161  #define IOCB_ABORTING	    5
162  #define IOCB_NORESOURCE	    6
163  
164  #define SLI_WQE_RET_WQE    1    /* Return WQE if cmd ring full */
165  
166  #define WQE_SUCCESS        0
167  #define WQE_BUSY           1
168  #define WQE_ERROR          2
169  #define WQE_TIMEDOUT       3
170  #define WQE_ABORTED        4
171  #define WQE_ABORTING	   5
172  #define WQE_NORESOURCE	   6
173  
174  #define LPFC_MBX_WAKE		1
175  #define LPFC_MBX_IMED_UNREG	2
176  
177  typedef struct lpfcMboxq {
178  	/* MBOXQs are used in single linked lists */
179  	struct list_head list;	/* ptr to next mailbox command */
180  	union {
181  		MAILBOX_t mb;		/* Mailbox cmd */
182  		struct lpfc_mqe mqe;
183  	} u;
184  	struct lpfc_vport *vport; /* virtual port pointer */
185  	struct lpfc_nodelist *ctx_ndlp;	/* caller ndlp pointer */
186  	struct lpfc_dmabuf *ctx_buf;	/* caller buffer information */
187  	void *ext_buf;			/* extended buffer for extended mbox
188  					 * cmds.  Not a generic pointer.
189  					 * Use for storing virtual address.
190  					 */
191  
192  	/* Pointers that are seldom used during mbox execution, but require
193  	 * a saved context.
194  	 */
195  	union {
196  		unsigned long ox_rx_id;		/* Used in els_rsp_rls_acc */
197  		struct lpfc_rdp_context *rdp;	/* Used in get_rdp_info */
198  		struct lpfc_lcb_context *lcb;	/* Used in set_beacon */
199  		struct completion *mbox_wait;	/* Used in issue_mbox_wait */
200  		struct bsg_job_data *dd_data;	/* Used in bsg_issue_mbox_cmpl
201  						 * and
202  						 * bsg_issue_mbox_ext_handle_job
203  						 */
204  		struct lpfc_iocbq *save_iocb;	/* Used in defer_plogi_acc and
205  						 * lpfc_mbx_cmpl_resume_rpi
206  						 */
207  	} ctx_u;
208  
209  	void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
210  	uint8_t mbox_flag;
211  	uint16_t in_ext_byte_len;
212  	uint16_t out_ext_byte_len;
213  	uint8_t  mbox_offset_word;
214  	struct lpfc_mcqe mcqe;
215  	struct lpfc_mbx_nembed_sge_virt *sge_array;
216  } LPFC_MBOXQ_t;
217  
218  #define MBX_POLL        1	/* poll mailbox till command done, then
219  				   return */
220  #define MBX_NOWAIT      2	/* issue command then return immediately */
221  
222  #define LPFC_MAX_RING_MASK  5	/* max num of rctl/type masks allowed per
223  				   ring */
224  #define LPFC_SLI3_MAX_RING  4	/* Max num of SLI3 rings used by driver.
225  				   For SLI4, an additional ring for each
226  				   FCP WQ will be allocated.  */
227  
228  struct lpfc_sli_ring;
229  
230  struct lpfc_sli_ring_mask {
231  	uint8_t profile;	/* profile associated with ring */
232  	uint8_t rctl;	/* rctl / type pair configured for ring */
233  	uint8_t type;	/* rctl / type pair configured for ring */
234  	uint8_t rsvd;
235  	/* rcv'd unsol event */
236  	void (*lpfc_sli_rcv_unsol_event) (struct lpfc_hba *,
237  					 struct lpfc_sli_ring *,
238  					 struct lpfc_iocbq *);
239  };
240  
241  
242  /* Structure used to hold SLI statistical counters and info */
243  struct lpfc_sli_ring_stat {
244  	uint64_t iocb_event;	 /* IOCB event counters */
245  	uint64_t iocb_cmd;	 /* IOCB cmd issued */
246  	uint64_t iocb_rsp;	 /* IOCB rsp received */
247  	uint64_t iocb_cmd_delay; /* IOCB cmd ring delay */
248  	uint64_t iocb_cmd_full;	 /* IOCB cmd ring full */
249  	uint64_t iocb_cmd_empty; /* IOCB cmd ring is now empty */
250  	uint64_t iocb_rsp_full;	 /* IOCB rsp ring full */
251  };
252  
253  struct lpfc_sli3_ring {
254  	uint32_t local_getidx;  /* last available cmd index (from cmdGetInx) */
255  	uint32_t next_cmdidx;   /* next_cmd index */
256  	uint32_t rspidx;	/* current index in response ring */
257  	uint32_t cmdidx;	/* current index in command ring */
258  	uint16_t numCiocb;	/* number of command iocb's per ring */
259  	uint16_t numRiocb;	/* number of rsp iocb's per ring */
260  	uint16_t sizeCiocb;	/* Size of command iocb's in this ring */
261  	uint16_t sizeRiocb;	/* Size of response iocb's in this ring */
262  	uint32_t *cmdringaddr;	/* virtual address for cmd rings */
263  	uint32_t *rspringaddr;	/* virtual address for rsp rings */
264  };
265  
266  struct lpfc_sli4_ring {
267  	struct lpfc_queue *wqp;	/* Pointer to associated WQ */
268  };
269  
270  
271  /* Structure used to hold SLI ring information */
272  struct lpfc_sli_ring {
273  	uint16_t flag;		/* ring flags */
274  #define LPFC_DEFERRED_RING_EVENT 0x001	/* Deferred processing a ring event */
275  #define LPFC_CALL_RING_AVAILABLE 0x002	/* indicates cmd was full */
276  #define LPFC_STOP_IOCB_EVENT     0x020	/* Stop processing IOCB cmds event */
277  	uint16_t abtsiotag;	/* tracks next iotag to use for ABTS */
278  
279  	uint8_t rsvd;
280  	uint8_t ringno;		/* ring number */
281  
282  	spinlock_t ring_lock;	/* lock for issuing commands */
283  
284  	uint32_t fast_iotag;	/* max fastlookup based iotag           */
285  	uint32_t iotag_ctr;	/* keeps track of the next iotag to use */
286  	uint32_t iotag_max;	/* max iotag value to use               */
287  	struct list_head txq;
288  	uint16_t txq_cnt;	/* current length of queue */
289  	uint16_t txq_max;	/* max length */
290  	struct list_head txcmplq;
291  	uint16_t txcmplq_cnt;	/* current length of queue */
292  	uint16_t txcmplq_max;	/* max length */
293  	uint32_t missbufcnt;	/* keep track of buffers to post */
294  	struct list_head postbufq;
295  	uint16_t postbufq_cnt;	/* current length of queue */
296  	uint16_t postbufq_max;	/* max length */
297  	struct list_head iocb_continueq;
298  	uint16_t iocb_continueq_cnt;	/* current length of queue */
299  	uint16_t iocb_continueq_max;	/* max length */
300  	struct list_head iocb_continue_saveq;
301  
302  	struct lpfc_sli_ring_mask prt[LPFC_MAX_RING_MASK];
303  	uint32_t num_mask;	/* number of mask entries in prt array */
304  	void (*lpfc_sli_rcv_async_status) (struct lpfc_hba *,
305  		struct lpfc_sli_ring *, struct lpfc_iocbq *);
306  
307  	struct lpfc_sli_ring_stat stats;	/* SLI statistical info */
308  
309  	/* cmd ring available */
310  	void (*lpfc_sli_cmd_available) (struct lpfc_hba *,
311  					struct lpfc_sli_ring *);
312  	union {
313  		struct lpfc_sli3_ring sli3;
314  		struct lpfc_sli4_ring sli4;
315  	} sli;
316  };
317  
318  /* Structure used for configuring rings to a specific profile or rctl / type */
319  struct lpfc_hbq_init {
320  	uint32_t rn;		/* Receive buffer notification */
321  	uint32_t entry_count;	/* max # of entries in HBQ */
322  	uint32_t headerLen;	/* 0 if not profile 4 or 5 */
323  	uint32_t logEntry;	/* Set to 1 if this HBQ used for LogEntry */
324  	uint32_t profile;	/* Selection profile 0=all, 7=logentry */
325  	uint32_t ring_mask;	/* Binds HBQ to a ring e.g. Ring0=b0001,
326  				 * ring2=b0100 */
327  	uint32_t hbq_index;	/* index of this hbq in ring .HBQs[] */
328  
329  	uint32_t seqlenoff;
330  	uint32_t maxlen;
331  	uint32_t seqlenbcnt;
332  	uint32_t cmdcodeoff;
333  	uint32_t cmdmatch[8];
334  	uint32_t mask_count;	/* number of mask entries in prt array */
335  	struct hbq_mask hbqMasks[6];
336  
337  	/* Non-config rings fields to keep track of buffer allocations */
338  	uint32_t buffer_count;	/* number of buffers allocated */
339  	uint32_t init_count;	/* number to allocate when initialized */
340  	uint32_t add_count;	/* number to allocate when starved */
341  } ;
342  
343  /* Structure used to hold SLI statistical counters and info */
344  struct lpfc_sli_stat {
345  	uint64_t mbox_stat_err;  /* Mbox cmds completed status error */
346  	uint64_t mbox_cmd;       /* Mailbox commands issued */
347  	uint64_t sli_intr;       /* Count of Host Attention interrupts */
348  	uint64_t sli_prev_intr;  /* Previous cnt of Host Attention interrupts */
349  	uint64_t sli_ips;        /* Host Attention interrupts per sec */
350  	uint32_t err_attn_event; /* Error Attn event counters */
351  	uint32_t link_event;     /* Link event counters */
352  	uint32_t mbox_event;     /* Mailbox event counters */
353  	uint32_t mbox_busy;	 /* Mailbox cmd busy */
354  };
355  
356  /* Structure to store link status values when port stats are reset */
357  struct lpfc_lnk_stat {
358  	uint32_t link_failure_count;
359  	uint32_t loss_of_sync_count;
360  	uint32_t loss_of_signal_count;
361  	uint32_t prim_seq_protocol_err_count;
362  	uint32_t invalid_tx_word_count;
363  	uint32_t invalid_crc_count;
364  	uint32_t error_frames;
365  	uint32_t link_events;
366  };
367  
368  /* Structure used to hold SLI information */
369  struct lpfc_sli {
370  	uint32_t num_rings;
371  	uint32_t sli_flag;
372  
373  	/* Additional sli_flags */
374  #define LPFC_SLI_MBOX_ACTIVE      0x100	/* HBA mailbox is currently active */
375  #define LPFC_SLI_ACTIVE           0x200	/* SLI in firmware is active */
376  #define LPFC_PROCESS_LA           0x400	/* Able to process link attention */
377  #define LPFC_BLOCK_MGMT_IO        0x800	/* Don't allow mgmt mbx or iocb cmds */
378  #define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
379  #define LPFC_SLI_SUPPRESS_RSP     0x4000 /* Suppress RSP feature is supported */
380  #define LPFC_SLI_USE_EQDR         0x8000 /* EQ Delay Register is supported */
381  #define LPFC_QUEUE_FREE_INIT	  0x10000 /* Queue freeing is in progress */
382  #define LPFC_QUEUE_FREE_WAIT	  0x20000 /* Hold Queue free as it is being
383  					   * used outside worker thread
384  					   */
385  
386  	struct lpfc_sli_ring *sli3_ring;
387  
388  	struct lpfc_sli_stat slistat;	/* SLI statistical info */
389  	struct list_head mboxq;
390  	uint16_t mboxq_cnt;	/* current length of queue */
391  	uint16_t mboxq_max;	/* max length */
392  	LPFC_MBOXQ_t *mbox_active;	/* active mboxq information */
393  	struct list_head mboxq_cmpl;
394  
395  	struct timer_list mbox_tmo;	/* Hold clk to timeout active mbox
396  					   cmd */
397  
398  #define LPFC_IOCBQ_LOOKUP_INCREMENT  1024
399  	struct lpfc_iocbq ** iocbq_lookup; /* array to lookup IOCB by IOTAG */
400  	size_t iocbq_lookup_len;           /* current lengs of the array */
401  	uint16_t  last_iotag;              /* last allocated IOTAG */
402  	time64_t  stats_start;		   /* in seconds */
403  	struct lpfc_lnk_stat lnk_stat_offsets;
404  };
405  
406  /* Timeout for normal outstanding mbox command (Seconds) */
407  #define LPFC_MBOX_TMO				30
408  /* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */
409  #define LPFC_MBOX_SLI4_CONFIG_TMO		60
410  /* Timeout for flash-based outstanding sli_config mbox command (Seconds) */
411  #define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO	300
412  /* Timeout for other flash-based outstanding mbox command (Seconds) */
413  #define LPFC_MBOX_TMO_FLASH_CMD			300
414  
415  struct lpfc_io_buf {
416  	/* Common fields */
417  	struct list_head list;
418  	void *data;
419  
420  	dma_addr_t dma_handle;
421  	dma_addr_t dma_phys_sgl;
422  
423  	struct sli4_sge *dma_sgl; /* initial segment chunk */
424  
425  	/* linked list of extra sli4_hybrid_sge */
426  	struct list_head dma_sgl_xtra_list;
427  
428  	/* list head for fcp_cmd_rsp buf */
429  	struct list_head dma_cmd_rsp_list;
430  
431  	struct lpfc_iocbq cur_iocbq;
432  	struct lpfc_sli4_hdw_queue *hdwq;
433  	uint16_t hdwq_no;
434  	uint16_t cpu;
435  
436  	struct lpfc_nodelist *ndlp;
437  	uint32_t timeout;
438  	uint16_t flags;
439  #define LPFC_SBUF_XBUSY		0x1	/* SLI4 hba reported XB on WCQE cmpl */
440  #define LPFC_SBUF_BUMP_QDEPTH	0x2	/* bumped queue depth counter */
441  					/* External DIF device IO conversions */
442  #define LPFC_SBUF_NORMAL_DIF	0x4	/* normal mode to insert/strip */
443  #define LPFC_SBUF_PASS_DIF	0x8	/* insert/strip mode to passthru */
444  #define LPFC_SBUF_NOT_POSTED    0x10    /* SGL failed post to FW. */
445  	uint16_t status;	/* From IOCB Word 7- ulpStatus */
446  	uint32_t result;	/* From IOCB Word 4. */
447  
448  	uint32_t   seg_cnt;	/* Number of scatter-gather segments returned by
449  				 * dma_map_sg.  The driver needs this for calls
450  				 * to dma_unmap_sg.
451  				 */
452  	unsigned long start_time;
453  	spinlock_t buf_lock;	/* lock used in case of simultaneous abort */
454  	bool expedite;		/* this is an expedite io_buf */
455  
456  	union {
457  		/* SCSI specific fields */
458  		struct {
459  			struct scsi_cmnd *pCmd;
460  			struct lpfc_rport_data *rdata;
461  			uint32_t prot_seg_cnt;  /* seg_cnt's counterpart for
462  						 * protection data
463  						 */
464  
465  			/*
466  			 * data and dma_handle are the kernel virtual and bus
467  			 * address of the dma-able buffer containing the
468  			 * fcp_cmd, fcp_rsp and a scatter gather bde list that
469  			 * supports the sg_tablesize value.
470  			 */
471  			struct fcp_cmnd *fcp_cmnd;
472  			struct fcp_rsp *fcp_rsp;
473  
474  			wait_queue_head_t *waitq;
475  
476  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
477  			/* Used to restore any changes to protection data for
478  			 * error injection
479  			 */
480  			void *prot_data_segment;
481  			uint32_t prot_data;
482  			uint32_t prot_data_type;
483  #define	LPFC_INJERR_REFTAG	1
484  #define	LPFC_INJERR_APPTAG	2
485  #define	LPFC_INJERR_GUARD	3
486  #endif
487  		};
488  
489  		/* NVME specific fields */
490  		struct {
491  			struct nvmefc_fcp_req *nvmeCmd;
492  			uint16_t qidx;
493  		};
494  	};
495  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
496  	uint64_t ts_cmd_start;
497  	uint64_t ts_last_cmd;
498  	uint64_t ts_cmd_wqput;
499  	uint64_t ts_isr_cmpl;
500  	uint64_t ts_data_io;
501  #endif
502  	uint64_t rx_cmd_start;
503  };
504