1  /*******************************************************************
2   * This file is part of the Emulex Linux Device Driver for         *
3   * Fibre Channel Host Bus Adapters.                                *
4   * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
5   * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
6   * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7   * EMULEX and SLI are trademarks of Emulex.                        *
8   * www.broadcom.com                                                *
9   * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10   *                                                                 *
11   * This program is free software; you can redistribute it and/or   *
12   * modify it under the terms of version 2 of the GNU General       *
13   * Public License as published by the Free Software Foundation.    *
14   * This program is distributed in the hope that it will be useful. *
15   * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16   * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17   * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18   * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19   * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20   * more details, a copy of which can be found in the file COPYING  *
21   * included with this package.                                     *
22   ********************************************************************/
23  
24  #include <linux/nvme.h>
25  #include <linux/nvme-fc-driver.h>
26  #include <linux/nvme-fc.h>
27  
28  #define LPFC_NVME_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
29  
30  #define LPFC_NVME_ERSP_LEN		0x20
31  
32  #define LPFC_NVME_WAIT_TMO              10
33  #define LPFC_NVME_EXPEDITE_XRICNT	8
34  #define LPFC_NVME_FB_SHIFT		9
35  #define LPFC_NVME_MAX_FB		(1 << 20)	/* 1M */
36  
37  #define lpfc_ndlp_get_nrport(ndlp)				\
38  	((!ndlp->nrport || (ndlp->fc4_xpt_flags & NVME_XPT_UNREG_WAIT))\
39  	? NULL : ndlp->nrport)
40  
41  struct lpfc_nvme_qhandle {
42  	uint32_t index;		/* WQ index to use */
43  	uint32_t qidx;		/* queue index passed to create */
44  	uint32_t cpu_id;	/* current cpu id at time of create */
45  };
46  
47  /* Declare nvme-based local and remote port definitions. */
48  struct lpfc_nvme_lport {
49  	struct lpfc_vport *vport;
50  	struct completion *lport_unreg_cmp;
51  	/* Add stats counters here */
52  	atomic_t fc4NvmeLsRequests;
53  	atomic_t fc4NvmeLsCmpls;
54  	atomic_t xmt_fcp_noxri;
55  	atomic_t xmt_fcp_bad_ndlp;
56  	atomic_t xmt_fcp_qdepth;
57  	atomic_t xmt_fcp_wqerr;
58  	atomic_t xmt_fcp_err;
59  	atomic_t xmt_fcp_abort;
60  	atomic_t xmt_ls_abort;
61  	atomic_t xmt_ls_err;
62  	atomic_t cmpl_fcp_xb;
63  	atomic_t cmpl_fcp_err;
64  	atomic_t cmpl_ls_xb;
65  	atomic_t cmpl_ls_err;
66  };
67  
68  struct lpfc_nvme_rport {
69  	struct lpfc_nvme_lport *lport;
70  	struct nvme_fc_remote_port *remoteport;
71  	struct lpfc_nodelist *ndlp;
72  	struct completion rport_unreg_done;
73  };
74  
75  struct lpfc_nvme_fcpreq_priv {
76  	struct lpfc_io_buf *nvme_buf;
77  };
78  
79  /*
80   * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
81   * set by the spec, which appears to have issues with some devices.
82   */
83  #define LPFC_NVME_LS_TIMEOUT		30
84  
85  
86  #define LPFC_NVMET_DEFAULT_SEGS		(64 + 1)	/* 256K IOs */
87  #define LPFC_NVMET_RQE_MIN_POST		128
88  #define LPFC_NVMET_RQE_DEF_POST		512
89  #define LPFC_NVMET_RQE_DEF_COUNT	2048
90  #define LPFC_NVMET_SUCCESS_LEN		12
91  
92  #define LPFC_NVMET_MRQ_AUTO		0
93  #define LPFC_NVMET_MRQ_MAX		16
94  
95  #define LPFC_NVMET_WAIT_TMO		(5 * MSEC_PER_SEC)
96  
97  /* Used for NVME Target */
98  #define LPFC_NVMET_INV_HOST_ACTIVE      1
99  
100  struct lpfc_nvmet_tgtport {
101  	struct lpfc_hba *phba;
102  	struct completion *tport_unreg_cmp;
103  	atomic_t state;		/* tracks nvmet hosthandle invalidation */
104  
105  	/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
106  	atomic_t rcv_ls_req_in;
107  	atomic_t rcv_ls_req_out;
108  	atomic_t rcv_ls_req_drop;
109  	atomic_t xmt_ls_abort;
110  	atomic_t xmt_ls_abort_cmpl;
111  
112  	/* Stats counters - lpfc_nvmet_xmt_ls_rsp */
113  	atomic_t xmt_ls_rsp;
114  	atomic_t xmt_ls_drop;
115  
116  	/* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
117  	atomic_t xmt_ls_rsp_error;
118  	atomic_t xmt_ls_rsp_aborted;
119  	atomic_t xmt_ls_rsp_xb_set;
120  	atomic_t xmt_ls_rsp_cmpl;
121  
122  	/* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
123  	atomic_t rcv_fcp_cmd_in;
124  	atomic_t rcv_fcp_cmd_out;
125  	atomic_t rcv_fcp_cmd_drop;
126  	atomic_t rcv_fcp_cmd_defer;
127  	atomic_t xmt_fcp_release;
128  
129  	/* Stats counters - lpfc_nvmet_xmt_fcp_op */
130  	atomic_t xmt_fcp_drop;
131  	atomic_t xmt_fcp_read_rsp;
132  	atomic_t xmt_fcp_read;
133  	atomic_t xmt_fcp_write;
134  	atomic_t xmt_fcp_rsp;
135  
136  	/* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
137  	atomic_t xmt_fcp_rsp_xb_set;
138  	atomic_t xmt_fcp_rsp_cmpl;
139  	atomic_t xmt_fcp_rsp_error;
140  	atomic_t xmt_fcp_rsp_aborted;
141  	atomic_t xmt_fcp_rsp_drop;
142  
143  	/* Stats counters - lpfc_nvmet_xmt_fcp_abort */
144  	atomic_t xmt_fcp_xri_abort_cqe;
145  	atomic_t xmt_fcp_abort;
146  	atomic_t xmt_fcp_abort_cmpl;
147  	atomic_t xmt_abort_sol;
148  	atomic_t xmt_abort_unsol;
149  	atomic_t xmt_abort_rsp;
150  	atomic_t xmt_abort_rsp_error;
151  
152  	/* Stats counters - defer IO */
153  	atomic_t defer_ctx;
154  	atomic_t defer_fod;
155  	atomic_t defer_wqfull;
156  };
157  
158  struct lpfc_nvmet_ctx_info {
159  	struct list_head nvmet_ctx_list;
160  	spinlock_t	nvmet_ctx_list_lock; /* lock per CPU */
161  	struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
162  	struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
163  	uint16_t	nvmet_ctx_list_cnt;
164  	char pad[16];  /* pad to a cache-line */
165  };
166  
167  /* This retrieves the context info associated with the specified cpu / mrq */
168  #define lpfc_get_ctx_list(phba, cpu, mrq)  \
169  	(phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
170  
171  /* Values for state field of struct lpfc_async_xchg_ctx */
172  #define LPFC_NVME_STE_LS_RCV		1
173  #define LPFC_NVME_STE_LS_ABORT		2
174  #define LPFC_NVME_STE_LS_RSP		3
175  #define LPFC_NVME_STE_RCV		4
176  #define LPFC_NVME_STE_DATA		5
177  #define LPFC_NVME_STE_ABORT		6
178  #define LPFC_NVME_STE_DONE		7
179  #define LPFC_NVME_STE_FREE		0xff
180  
181  /* Values for flag field of struct lpfc_async_xchg_ctx */
182  #define LPFC_NVME_IO_INP		0x1  /* IO is in progress on exchange */
183  #define LPFC_NVME_ABORT_OP		0x2  /* Abort WQE issued on exchange */
184  #define LPFC_NVME_XBUSY			0x4  /* XB bit set on IO cmpl */
185  #define LPFC_NVME_CTX_RLS		0x8  /* ctx free requested */
186  #define LPFC_NVME_ABTS_RCV		0x10  /* ABTS received on exchange */
187  #define LPFC_NVME_CTX_REUSE_WQ		0x20  /* ctx reused via WQ */
188  #define LPFC_NVME_DEFER_WQFULL		0x40  /* Waiting on a free WQE */
189  #define LPFC_NVME_TNOTIFY		0x80  /* notify transport of abts */
190  
191  struct lpfc_async_xchg_ctx {
192  	union {
193  		struct nvmefc_tgt_fcp_req fcp_req;
194  	} hdlrctx;
195  	struct list_head list;
196  	struct lpfc_hba *phba;
197  	struct lpfc_nodelist *ndlp;
198  	struct nvmefc_ls_req *ls_req;
199  	struct nvmefc_ls_rsp ls_rsp;
200  	struct lpfc_iocbq *wqeq;
201  	struct lpfc_iocbq *abort_wqeq;
202  	spinlock_t ctxlock; /* protect flag access */
203  	uint32_t sid;
204  	uint32_t offset;
205  	uint16_t oxid;
206  	uint16_t size;
207  	uint16_t entry_cnt;
208  	uint16_t cpu;
209  	uint16_t idx;
210  	uint16_t state;
211  	uint16_t flag;
212  	void *payload;
213  	struct rqb_dmabuf *rqb_buffer;
214  	struct lpfc_nvmet_ctxbuf *ctxbuf;
215  	struct lpfc_sli4_hdw_queue *hdwq;
216  
217  #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
218  	uint64_t ts_isr_cmd;
219  	uint64_t ts_cmd_nvme;
220  	uint64_t ts_nvme_data;
221  	uint64_t ts_data_wqput;
222  	uint64_t ts_isr_data;
223  	uint64_t ts_data_nvme;
224  	uint64_t ts_nvme_status;
225  	uint64_t ts_status_wqput;
226  	uint64_t ts_isr_status;
227  	uint64_t ts_status_nvme;
228  #endif
229  };
230  
231  
232  /* routines found in lpfc_nvme.c */
233  int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
234  		struct nvmefc_ls_req *pnvme_lsreq,
235  		void (*gen_req_cmp)(struct lpfc_hba *phba,
236  				struct lpfc_iocbq *cmdwqe,
237  				struct lpfc_iocbq *rspwqe));
238  void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba,  struct lpfc_vport *vport,
239  		struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
240  int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
241  		struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
242  
243  /* routines found in lpfc_nvmet.c */
244  int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
245  			struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
246  			uint16_t xri);
247  int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
248  			struct nvmefc_ls_rsp *ls_rsp,
249  			void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
250  				struct lpfc_iocbq *cmdwqe,
251  				struct lpfc_iocbq *rspwqe));
252  void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
253  		struct lpfc_iocbq *cmdwqe, struct lpfc_iocbq *rspwqe);
254