1  /*
2   * This file is part of the Chelsio FCoE driver for Linux.
3   *
4   * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5   *
6   * This software is available to you under a choice of one of two
7   * licenses.  You may choose to be licensed under the terms of the GNU
8   * General Public License (GPL) Version 2, available from the file
9   * COPYING in the main directory of this source tree, or the
10   * OpenIB.org BSD license below:
11   *
12   *     Redistribution and use in source and binary forms, with or
13   *     without modification, are permitted provided that the following
14   *     conditions are met:
15   *
16   *      - Redistributions of source code must retain the above
17   *        copyright notice, this list of conditions and the following
18   *        disclaimer.
19   *
20   *      - Redistributions in binary form must reproduce the above
21   *        copyright notice, this list of conditions and the following
22   *        disclaimer in the documentation and/or other materials
23   *        provided with the distribution.
24   *
25   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26   * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28   * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29   * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30   * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31   * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32   * SOFTWARE.
33   */
34  
35  #include <linux/kernel.h>
36  #include <linux/delay.h>
37  #include <linux/slab.h>
38  #include <linux/utsname.h>
39  #include <scsi/scsi_device.h>
40  #include <scsi/scsi_transport_fc.h>
41  #include <linux/unaligned.h>
42  #include <scsi/fc/fc_els.h>
43  #include <scsi/fc/fc_fs.h>
44  #include <scsi/fc/fc_gs.h>
45  #include <scsi/fc/fc_ms.h>
46  
47  #include "csio_hw.h"
48  #include "csio_mb.h"
49  #include "csio_lnode.h"
50  #include "csio_rnode.h"
51  
52  int csio_fcoe_rnodes = 1024;
53  int csio_fdmi_enable = 1;
54  
55  #define PORT_ID_PTR(_x)         ((uint8_t *)(&_x) + 1)
56  
57  /* Lnode SM declarations */
58  static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
59  static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
60  static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
61  static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
62  
63  static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
64  		void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
65  		enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
66  
67  /* LN event mapping */
68  static enum csio_ln_ev fwevt_to_lnevt[] = {
69  	CSIO_LNE_NONE,		/* None */
70  	CSIO_LNE_NONE,		/* PLOGI_ACC_RCVD  */
71  	CSIO_LNE_NONE,		/* PLOGI_RJT_RCVD  */
72  	CSIO_LNE_NONE,		/* PLOGI_RCVD	   */
73  	CSIO_LNE_NONE,		/* PLOGO_RCVD	   */
74  	CSIO_LNE_NONE,		/* PRLI_ACC_RCVD   */
75  	CSIO_LNE_NONE,		/* PRLI_RJT_RCVD   */
76  	CSIO_LNE_NONE,		/* PRLI_RCVD	   */
77  	CSIO_LNE_NONE,		/* PRLO_RCVD	   */
78  	CSIO_LNE_NONE,		/* NPORT_ID_CHGD   */
79  	CSIO_LNE_LOGO,		/* FLOGO_RCVD	   */
80  	CSIO_LNE_LOGO,		/* CLR_VIRT_LNK_RCVD */
81  	CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD   */
82  	CSIO_LNE_NONE,		/* FLOGI_RJT_RCVD   */
83  	CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD   */
84  	CSIO_LNE_NONE,		/* FDISC_RJT_RCVD   */
85  	CSIO_LNE_NONE,		/* FLOGI_TMO_MAX_RETRY */
86  	CSIO_LNE_NONE,		/* IMPL_LOGO_ADISC_ACC */
87  	CSIO_LNE_NONE,		/* IMPL_LOGO_ADISC_RJT */
88  	CSIO_LNE_NONE,		/* IMPL_LOGO_ADISC_CNFLT */
89  	CSIO_LNE_NONE,		/* PRLI_TMO		*/
90  	CSIO_LNE_NONE,		/* ADISC_TMO		*/
91  	CSIO_LNE_NONE,		/* RSCN_DEV_LOST */
92  	CSIO_LNE_NONE,		/* SCR_ACC_RCVD */
93  	CSIO_LNE_NONE,		/* ADISC_RJT_RCVD */
94  	CSIO_LNE_NONE,		/* LOGO_SNT */
95  	CSIO_LNE_NONE,		/* PROTO_ERR_IMPL_LOGO */
96  };
97  
98  #define CSIO_FWE_TO_LNE(_evt)	((_evt > PROTO_ERR_IMPL_LOGO) ?		\
99  						CSIO_LNE_NONE :	\
100  						fwevt_to_lnevt[_evt])
101  
102  #define csio_ct_rsp(cp)		(((struct fc_ct_hdr *)cp)->ct_cmd)
103  #define csio_ct_reason(cp)	(((struct fc_ct_hdr *)cp)->ct_reason)
104  #define csio_ct_expl(cp)	(((struct fc_ct_hdr *)cp)->ct_explan)
105  #define csio_ct_get_pld(cp)	((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
106  
107  /*
108   * csio_ln_match_by_portid - lookup lnode using given portid.
109   * @hw: HW module
110   * @portid: port-id.
111   *
112   * If found, returns lnode matching given portid otherwise returns NULL.
113   */
114  static struct csio_lnode *
csio_ln_lookup_by_portid(struct csio_hw * hw,uint8_t portid)115  csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
116  {
117  	struct csio_lnode *ln;
118  	struct list_head *tmp;
119  
120  	/* Match siblings lnode with portid */
121  	list_for_each(tmp, &hw->sln_head) {
122  		ln = (struct csio_lnode *) tmp;
123  		if (ln->portid == portid)
124  			return ln;
125  	}
126  
127  	return NULL;
128  }
129  
130  /*
131   * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
132   * @hw - HW module
133   * @vnpi - vnp index.
134   * Returns - If found, returns lnode matching given vnp id
135   * otherwise returns NULL.
136   */
137  static struct csio_lnode *
csio_ln_lookup_by_vnpi(struct csio_hw * hw,uint32_t vnp_id)138  csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
139  {
140  	struct list_head *tmp1, *tmp2;
141  	struct csio_lnode *sln = NULL, *cln = NULL;
142  
143  	if (list_empty(&hw->sln_head)) {
144  		CSIO_INC_STATS(hw, n_lnlkup_miss);
145  		return NULL;
146  	}
147  	/* Traverse sibling lnodes */
148  	list_for_each(tmp1, &hw->sln_head) {
149  		sln = (struct csio_lnode *) tmp1;
150  
151  		/* Match sibling lnode */
152  		if (sln->vnp_flowid == vnp_id)
153  			return sln;
154  
155  		if (list_empty(&sln->cln_head))
156  			continue;
157  
158  		/* Traverse children lnodes */
159  		list_for_each(tmp2, &sln->cln_head) {
160  			cln = (struct csio_lnode *) tmp2;
161  
162  			if (cln->vnp_flowid == vnp_id)
163  				return cln;
164  		}
165  	}
166  	CSIO_INC_STATS(hw, n_lnlkup_miss);
167  	return NULL;
168  }
169  
170  /**
171   * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
172   * @hw:		HW module.
173   * @wwpn:	WWPN.
174   *
175   * If found, returns lnode matching given wwpn, returns NULL otherwise.
176   */
177  struct csio_lnode *
csio_lnode_lookup_by_wwpn(struct csio_hw * hw,uint8_t * wwpn)178  csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
179  {
180  	struct list_head *tmp1, *tmp2;
181  	struct csio_lnode *sln = NULL, *cln = NULL;
182  
183  	if (list_empty(&hw->sln_head)) {
184  		CSIO_INC_STATS(hw, n_lnlkup_miss);
185  		return NULL;
186  	}
187  	/* Traverse sibling lnodes */
188  	list_for_each(tmp1, &hw->sln_head) {
189  		sln = (struct csio_lnode *) tmp1;
190  
191  		/* Match sibling lnode */
192  		if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
193  			return sln;
194  
195  		if (list_empty(&sln->cln_head))
196  			continue;
197  
198  		/* Traverse children lnodes */
199  		list_for_each(tmp2, &sln->cln_head) {
200  			cln = (struct csio_lnode *) tmp2;
201  
202  			if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
203  				return cln;
204  		}
205  	}
206  	return NULL;
207  }
208  
209  /* FDMI */
210  static void
csio_fill_ct_iu(void * buf,uint8_t type,uint8_t sub_type,uint16_t op)211  csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
212  {
213  	struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
214  	cmd->ct_rev = FC_CT_REV;
215  	cmd->ct_fs_type = type;
216  	cmd->ct_fs_subtype = sub_type;
217  	cmd->ct_cmd = htons(op);
218  }
219  
220  static int
csio_hostname(uint8_t * buf,size_t buf_len)221  csio_hostname(uint8_t *buf, size_t buf_len)
222  {
223  	if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
224  		return 0;
225  	return -1;
226  }
227  
228  static int
csio_osname(uint8_t * buf,size_t buf_len)229  csio_osname(uint8_t *buf, size_t buf_len)
230  {
231  	if (snprintf(buf, buf_len, "%s %s %s",
232  		     init_utsname()->sysname,
233  		     init_utsname()->release,
234  		     init_utsname()->version) > 0)
235  		return 0;
236  
237  	return -1;
238  }
239  
240  static inline void
csio_append_attrib(uint8_t ** ptr,uint16_t type,void * val,size_t val_len)241  csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
242  {
243  	uint16_t len;
244  	struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
245  
246  	if (WARN_ON(val_len > U16_MAX))
247  		return;
248  
249  	len = val_len;
250  
251  	ae->type = htons(type);
252  	len += 4;		/* includes attribute type and length */
253  	len = (len + 3) & ~3;	/* should be multiple of 4 bytes */
254  	ae->len = htons(len);
255  	memcpy(ae->value, val, val_len);
256  	if (len > val_len)
257  		memset(ae->value + val_len, 0, len - val_len);
258  	*ptr += len;
259  }
260  
261  /*
262   * csio_ln_fdmi_done - FDMI registeration completion
263   * @hw: HW context
264   * @fdmi_req: fdmi request
265   */
266  static void
csio_ln_fdmi_done(struct csio_hw * hw,struct csio_ioreq * fdmi_req)267  csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
268  {
269  	void *cmd;
270  	struct csio_lnode *ln = fdmi_req->lnode;
271  
272  	if (fdmi_req->wr_status != FW_SUCCESS) {
273  		csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
274  			    fdmi_req->wr_status);
275  		CSIO_INC_STATS(ln, n_fdmi_err);
276  	}
277  
278  	cmd = fdmi_req->dma_buf.vaddr;
279  	if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
280  		csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
281  			    csio_ct_reason(cmd), csio_ct_expl(cmd));
282  	}
283  }
284  
285  /*
286   * csio_ln_fdmi_rhba_cbfn - RHBA completion
287   * @hw: HW context
288   * @fdmi_req: fdmi request
289   */
290  static void
csio_ln_fdmi_rhba_cbfn(struct csio_hw * hw,struct csio_ioreq * fdmi_req)291  csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
292  {
293  	void *cmd;
294  	uint8_t *pld;
295  	uint32_t len = 0;
296  	__be32 val;
297  	__be16 mfs;
298  	uint32_t numattrs = 0;
299  	struct csio_lnode *ln = fdmi_req->lnode;
300  	struct fs_fdmi_attrs *attrib_blk;
301  	struct fc_fdmi_port_name *port_name;
302  	uint8_t buf[64];
303  	uint8_t *fc4_type;
304  	unsigned long flags;
305  
306  	if (fdmi_req->wr_status != FW_SUCCESS) {
307  		csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
308  			    fdmi_req->wr_status);
309  		CSIO_INC_STATS(ln, n_fdmi_err);
310  	}
311  
312  	cmd = fdmi_req->dma_buf.vaddr;
313  	if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
314  		csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
315  			    csio_ct_reason(cmd), csio_ct_expl(cmd));
316  	}
317  
318  	if (!csio_is_rnode_ready(fdmi_req->rnode)) {
319  		CSIO_INC_STATS(ln, n_fdmi_err);
320  		return;
321  	}
322  
323  	/* Prepare CT hdr for RPA cmd */
324  	memset(cmd, 0, FC_CT_HDR_LEN);
325  	csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
326  
327  	/* Prepare RPA payload */
328  	pld = (uint8_t *)csio_ct_get_pld(cmd);
329  	port_name = (struct fc_fdmi_port_name *)pld;
330  	memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
331  	pld += sizeof(*port_name);
332  
333  	/* Start appending Port attributes */
334  	attrib_blk = (struct fs_fdmi_attrs *)pld;
335  	attrib_blk->numattrs = 0;
336  	len += sizeof(attrib_blk->numattrs);
337  	pld += sizeof(attrib_blk->numattrs);
338  
339  	fc4_type = &buf[0];
340  	memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
341  	fc4_type[2] = 1;
342  	fc4_type[7] = 1;
343  	csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
344  			   fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
345  	numattrs++;
346  	val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
347  	csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
348  			   &val,
349  			   FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
350  	numattrs++;
351  
352  	if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
353  		val = htonl(FC_PORTSPEED_1GBIT);
354  	else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
355  		val = htonl(FC_PORTSPEED_10GBIT);
356  	else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_25G)
357  		val = htonl(FC_PORTSPEED_25GBIT);
358  	else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_40G)
359  		val = htonl(FC_PORTSPEED_40GBIT);
360  	else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_50G)
361  		val = htonl(FC_PORTSPEED_50GBIT);
362  	else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP32_SPEED_100G)
363  		val = htonl(FC_PORTSPEED_100GBIT);
364  	else
365  		val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
366  	csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
367  			   &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
368  	numattrs++;
369  
370  	mfs = ln->ln_sparm.csp.sp_bb_data;
371  	csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
372  			   &mfs, sizeof(mfs));
373  	numattrs++;
374  
375  	strcpy(buf, "csiostor");
376  	csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
377  			   strlen(buf));
378  	numattrs++;
379  
380  	if (!csio_hostname(buf, sizeof(buf))) {
381  		csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
382  				   buf, strlen(buf));
383  		numattrs++;
384  	}
385  	attrib_blk->numattrs = htonl(numattrs);
386  	len = (uint32_t)(pld - (uint8_t *)cmd);
387  
388  	/* Submit FDMI RPA request */
389  	spin_lock_irqsave(&hw->lock, flags);
390  	if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
391  				FCOE_CT, &fdmi_req->dma_buf, len)) {
392  		CSIO_INC_STATS(ln, n_fdmi_err);
393  		csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
394  	}
395  	spin_unlock_irqrestore(&hw->lock, flags);
396  }
397  
398  /*
399   * csio_ln_fdmi_dprt_cbfn - DPRT completion
400   * @hw: HW context
401   * @fdmi_req: fdmi request
402   */
403  static void
csio_ln_fdmi_dprt_cbfn(struct csio_hw * hw,struct csio_ioreq * fdmi_req)404  csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
405  {
406  	void *cmd;
407  	uint8_t *pld;
408  	uint32_t len = 0;
409  	uint32_t numattrs = 0;
410  	__be32  maxpayload = htonl(65536);
411  	struct fc_fdmi_hba_identifier *hbaid;
412  	struct csio_lnode *ln = fdmi_req->lnode;
413  	struct fc_fdmi_rpl *reg_pl;
414  	struct fs_fdmi_attrs *attrib_blk;
415  	uint8_t buf[64];
416  	unsigned long flags;
417  
418  	if (fdmi_req->wr_status != FW_SUCCESS) {
419  		csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
420  			    fdmi_req->wr_status);
421  		CSIO_INC_STATS(ln, n_fdmi_err);
422  	}
423  
424  	if (!csio_is_rnode_ready(fdmi_req->rnode)) {
425  		CSIO_INC_STATS(ln, n_fdmi_err);
426  		return;
427  	}
428  	cmd = fdmi_req->dma_buf.vaddr;
429  	if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
430  		csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
431  			    csio_ct_reason(cmd), csio_ct_expl(cmd));
432  	}
433  
434  	/* Prepare CT hdr for RHBA cmd */
435  	memset(cmd, 0, FC_CT_HDR_LEN);
436  	csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
437  	len = FC_CT_HDR_LEN;
438  
439  	/* Prepare RHBA payload */
440  	pld = (uint8_t *)csio_ct_get_pld(cmd);
441  	hbaid = (struct fc_fdmi_hba_identifier *)pld;
442  	memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
443  	pld += sizeof(*hbaid);
444  
445  	/* Register one port per hba */
446  	reg_pl = (struct fc_fdmi_rpl *)pld;
447  	reg_pl->numport = htonl(1);
448  	memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
449  	pld += sizeof(*reg_pl);
450  
451  	/* Start appending HBA attributes hba */
452  	attrib_blk = (struct fs_fdmi_attrs *)pld;
453  	attrib_blk->numattrs = 0;
454  	len += sizeof(attrib_blk->numattrs);
455  	pld += sizeof(attrib_blk->numattrs);
456  
457  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
458  			   FC_FDMI_HBA_ATTR_NODENAME_LEN);
459  	numattrs++;
460  
461  	memset(buf, 0, sizeof(buf));
462  
463  	strcpy(buf, "Chelsio Communications");
464  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
465  			   strlen(buf));
466  	numattrs++;
467  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
468  			   hw->vpd.sn, sizeof(hw->vpd.sn));
469  	numattrs++;
470  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
471  			   sizeof(hw->vpd.id));
472  	numattrs++;
473  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
474  			   hw->model_desc, strlen(hw->model_desc));
475  	numattrs++;
476  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
477  			   hw->hw_ver, sizeof(hw->hw_ver));
478  	numattrs++;
479  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
480  			   hw->fwrev_str, strlen(hw->fwrev_str));
481  	numattrs++;
482  
483  	if (!csio_osname(buf, sizeof(buf))) {
484  		csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
485  				   buf, strlen(buf));
486  		numattrs++;
487  	}
488  
489  	csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
490  			   &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
491  	len = (uint32_t)(pld - (uint8_t *)cmd);
492  	numattrs++;
493  	attrib_blk->numattrs = htonl(numattrs);
494  
495  	/* Submit FDMI RHBA request */
496  	spin_lock_irqsave(&hw->lock, flags);
497  	if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
498  				FCOE_CT, &fdmi_req->dma_buf, len)) {
499  		CSIO_INC_STATS(ln, n_fdmi_err);
500  		csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
501  	}
502  	spin_unlock_irqrestore(&hw->lock, flags);
503  }
504  
505  /*
506   * csio_ln_fdmi_dhba_cbfn - DHBA completion
507   * @hw: HW context
508   * @fdmi_req: fdmi request
509   */
510  static void
csio_ln_fdmi_dhba_cbfn(struct csio_hw * hw,struct csio_ioreq * fdmi_req)511  csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
512  {
513  	struct csio_lnode *ln = fdmi_req->lnode;
514  	void *cmd;
515  	struct fc_fdmi_port_name *port_name;
516  	uint32_t len;
517  	unsigned long flags;
518  
519  	if (fdmi_req->wr_status != FW_SUCCESS) {
520  		csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
521  			    fdmi_req->wr_status);
522  		CSIO_INC_STATS(ln, n_fdmi_err);
523  	}
524  
525  	if (!csio_is_rnode_ready(fdmi_req->rnode)) {
526  		CSIO_INC_STATS(ln, n_fdmi_err);
527  		return;
528  	}
529  	cmd = fdmi_req->dma_buf.vaddr;
530  	if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
531  		csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
532  			    csio_ct_reason(cmd), csio_ct_expl(cmd));
533  	}
534  
535  	/* Send FDMI cmd to de-register any Port attributes if registered
536  	 * before
537  	 */
538  
539  	/* Prepare FDMI DPRT cmd */
540  	memset(cmd, 0, FC_CT_HDR_LEN);
541  	csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
542  	len = FC_CT_HDR_LEN;
543  	port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
544  	memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
545  	len += sizeof(*port_name);
546  
547  	/* Submit FDMI request */
548  	spin_lock_irqsave(&hw->lock, flags);
549  	if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
550  				FCOE_CT, &fdmi_req->dma_buf, len)) {
551  		CSIO_INC_STATS(ln, n_fdmi_err);
552  		csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
553  	}
554  	spin_unlock_irqrestore(&hw->lock, flags);
555  }
556  
557  /**
558   * csio_ln_fdmi_start - Start an FDMI request.
559   * @ln:		lnode
560   * @context:	session context
561   *
562   * Issued with lock held.
563   */
564  int
csio_ln_fdmi_start(struct csio_lnode * ln,void * context)565  csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
566  {
567  	struct csio_ioreq *fdmi_req;
568  	struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
569  	void *cmd;
570  	struct fc_fdmi_hba_identifier *hbaid;
571  	uint32_t len;
572  
573  	if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
574  		return -EPROTONOSUPPORT;
575  
576  	if (!csio_is_rnode_ready(fdmi_rn))
577  		CSIO_INC_STATS(ln, n_fdmi_err);
578  
579  	/* Send FDMI cmd to de-register any HBA attributes if registered
580  	 * before
581  	 */
582  
583  	fdmi_req = ln->mgmt_req;
584  	fdmi_req->lnode = ln;
585  	fdmi_req->rnode = fdmi_rn;
586  
587  	/* Prepare FDMI DHBA cmd */
588  	cmd = fdmi_req->dma_buf.vaddr;
589  	memset(cmd, 0, FC_CT_HDR_LEN);
590  	csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
591  	len = FC_CT_HDR_LEN;
592  
593  	hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
594  	memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
595  	len += sizeof(*hbaid);
596  
597  	/* Submit FDMI request */
598  	if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
599  					FCOE_CT, &fdmi_req->dma_buf, len)) {
600  		CSIO_INC_STATS(ln, n_fdmi_err);
601  		csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
602  	}
603  
604  	return 0;
605  }
606  
607  /*
608   * csio_ln_vnp_read_cbfn - vnp read completion handler.
609   * @hw: HW lnode
610   * @cbfn: Completion handler.
611   *
612   * Reads vnp response and updates ln parameters.
613   */
614  static void
csio_ln_vnp_read_cbfn(struct csio_hw * hw,struct csio_mb * mbp)615  csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
616  {
617  	struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
618  	struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
619  	struct fc_els_csp *csp;
620  	struct fc_els_cssp *clsp;
621  	enum fw_retval retval;
622  	__be32 nport_id = 0;
623  
624  	retval = FW_CMD_RETVAL_G(ntohl(rsp->alloc_to_len16));
625  	if (retval != FW_SUCCESS) {
626  		csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
627  		mempool_free(mbp, hw->mb_mempool);
628  		return;
629  	}
630  
631  	spin_lock_irq(&hw->lock);
632  
633  	memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
634  	memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
635  	ln->nport_id = ntohl(nport_id);
636  	ln->nport_id = ln->nport_id >> 8;
637  
638  	/* Update WWNs */
639  	/*
640  	 * This may look like a duplication of what csio_fcoe_enable_link()
641  	 * does, but is absolutely necessary if the vnpi changes between
642  	 * a FCOE LINK UP and FCOE LINK DOWN.
643  	 */
644  	memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
645  	memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
646  
647  	/* Copy common sparam */
648  	csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
649  	ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
650  	ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
651  	ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
652  	ln->ln_sparm.csp.sp_features = csp->sp_features;
653  	ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
654  	ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
655  	ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
656  
657  	/* Copy word 0 & word 1 of class sparam */
658  	clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
659  	ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
660  	ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
661  	ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
662  	ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
663  
664  	spin_unlock_irq(&hw->lock);
665  
666  	mempool_free(mbp, hw->mb_mempool);
667  
668  	/* Send an event to update local attribs */
669  	csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
670  }
671  
672  /*
673   * csio_ln_vnp_read - Read vnp params.
674   * @ln: lnode
675   * @cbfn: Completion handler.
676   *
677   * Issued with lock held.
678   */
679  static int
csio_ln_vnp_read(struct csio_lnode * ln,void (* cbfn)(struct csio_hw *,struct csio_mb *))680  csio_ln_vnp_read(struct csio_lnode *ln,
681  		void (*cbfn) (struct csio_hw *, struct csio_mb *))
682  {
683  	struct csio_hw *hw = ln->hwp;
684  	struct csio_mb  *mbp;
685  
686  	/* Allocate Mbox request */
687  	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
688  	if (!mbp) {
689  		CSIO_INC_STATS(hw, n_err_nomem);
690  		return -ENOMEM;
691  	}
692  
693  	/* Prepare VNP Command */
694  	csio_fcoe_vnp_read_init_mb(ln, mbp,
695  				    CSIO_MB_DEFAULT_TMO,
696  				    ln->fcf_flowid,
697  				    ln->vnp_flowid,
698  				    cbfn);
699  
700  	/* Issue MBOX cmd */
701  	if (csio_mb_issue(hw, mbp)) {
702  		csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
703  		mempool_free(mbp, hw->mb_mempool);
704  		return -EINVAL;
705  	}
706  
707  	return 0;
708  }
709  
710  /*
711   * csio_fcoe_enable_link - Enable fcoe link.
712   * @ln: lnode
713   * @enable: enable/disable
714   * Issued with lock held.
715   * Issues mbox cmd to bring up FCOE link on port associated with given ln.
716   */
717  static int
csio_fcoe_enable_link(struct csio_lnode * ln,bool enable)718  csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
719  {
720  	struct csio_hw *hw = ln->hwp;
721  	struct csio_mb  *mbp;
722  	enum fw_retval retval;
723  	uint8_t portid;
724  	uint8_t sub_op;
725  	struct fw_fcoe_link_cmd *lcmd;
726  	int i;
727  
728  	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
729  	if (!mbp) {
730  		CSIO_INC_STATS(hw, n_err_nomem);
731  		return -ENOMEM;
732  	}
733  
734  	portid = ln->portid;
735  	sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
736  
737  	csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
738  		 sub_op ? "UP" : "DOWN", portid);
739  
740  	csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
741  					  portid, sub_op, 0, 0, 0, NULL);
742  
743  	if (csio_mb_issue(hw, mbp)) {
744  		csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
745  			portid);
746  		mempool_free(mbp, hw->mb_mempool);
747  		return -EINVAL;
748  	}
749  
750  	retval = csio_mb_fw_retval(mbp);
751  	if (retval != FW_SUCCESS) {
752  		csio_err(hw,
753  			 "FCOE LINK %s cmd on port[%d] failed with "
754  			 "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
755  		mempool_free(mbp, hw->mb_mempool);
756  		return -EINVAL;
757  	}
758  
759  	if (!enable)
760  		goto out;
761  
762  	lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
763  
764  	memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
765  	memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
766  
767  	for (i = 0; i < CSIO_MAX_PPORTS; i++)
768  		if (hw->pport[i].portid == portid)
769  			memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
770  
771  out:
772  	mempool_free(mbp, hw->mb_mempool);
773  	return 0;
774  }
775  
776  /*
777   * csio_ln_read_fcf_cbfn - Read fcf parameters
778   * @ln: lnode
779   *
780   * read fcf response and Update ln fcf information.
781   */
782  static void
csio_ln_read_fcf_cbfn(struct csio_hw * hw,struct csio_mb * mbp)783  csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
784  {
785  	struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
786  	struct csio_fcf_info	*fcf_info;
787  	struct fw_fcoe_fcf_cmd *rsp =
788  				(struct fw_fcoe_fcf_cmd *)(mbp->mb);
789  	enum fw_retval retval;
790  
791  	retval = FW_CMD_RETVAL_G(ntohl(rsp->retval_len16));
792  	if (retval != FW_SUCCESS) {
793  		csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
794  				retval);
795  		mempool_free(mbp, hw->mb_mempool);
796  		return;
797  	}
798  
799  	spin_lock_irq(&hw->lock);
800  	fcf_info = ln->fcfinfo;
801  	fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
802  					ntohs(rsp->priority_pkd));
803  	fcf_info->vf_id = ntohs(rsp->vf_id);
804  	fcf_info->vlan_id = rsp->vlan_id;
805  	fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
806  	fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
807  	fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
808  	fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
809  	fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
810  	fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
811  	fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
812  	memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
813  	memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
814  	memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
815  	memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
816  	memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
817  
818  	spin_unlock_irq(&hw->lock);
819  
820  	mempool_free(mbp, hw->mb_mempool);
821  }
822  
823  /*
824   * csio_ln_read_fcf_entry - Read fcf entry.
825   * @ln: lnode
826   * @cbfn: Completion handler.
827   *
828   * Issued with lock held.
829   */
830  static int
csio_ln_read_fcf_entry(struct csio_lnode * ln,void (* cbfn)(struct csio_hw *,struct csio_mb *))831  csio_ln_read_fcf_entry(struct csio_lnode *ln,
832  			void (*cbfn) (struct csio_hw *, struct csio_mb *))
833  {
834  	struct csio_hw *hw = ln->hwp;
835  	struct csio_mb  *mbp;
836  
837  	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
838  	if (!mbp) {
839  		CSIO_INC_STATS(hw, n_err_nomem);
840  		return -ENOMEM;
841  	}
842  
843  	/* Get FCoE FCF information */
844  	csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
845  				      ln->portid, ln->fcf_flowid, cbfn);
846  
847  	if (csio_mb_issue(hw, mbp)) {
848  		csio_err(hw, "failed to issue FCOE FCF cmd\n");
849  		mempool_free(mbp, hw->mb_mempool);
850  		return -EINVAL;
851  	}
852  
853  	return 0;
854  }
855  
856  /*
857   * csio_handle_link_up - Logical Linkup event.
858   * @hw - HW module.
859   * @portid - Physical port number
860   * @fcfi - FCF index.
861   * @vnpi - VNP index.
862   * Returns - none.
863   *
864   * This event is received from FW, when virtual link is established between
865   * Physical port[ENode] and FCF. If its new vnpi, then local node object is
866   * created on this FCF and set to [ONLINE] state.
867   * Lnode waits for FW_RDEV_CMD event to be received indicating that
868   * Fabric login is completed and lnode moves to [READY] state.
869   *
870   * This called with hw lock held
871   */
872  static void
csio_handle_link_up(struct csio_hw * hw,uint8_t portid,uint32_t fcfi,uint32_t vnpi)873  csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
874  		    uint32_t vnpi)
875  {
876  	struct csio_lnode *ln = NULL;
877  
878  	/* Lookup lnode based on vnpi */
879  	ln = csio_ln_lookup_by_vnpi(hw, vnpi);
880  	if (!ln) {
881  		/* Pick lnode based on portid */
882  		ln = csio_ln_lookup_by_portid(hw, portid);
883  		if (!ln) {
884  			csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
885  				portid);
886  			CSIO_DB_ASSERT(0);
887  			return;
888  		}
889  
890  		/* Check if lnode has valid vnp flowid */
891  		if (ln->vnp_flowid != CSIO_INVALID_IDX) {
892  			/* New VN-Port */
893  			spin_unlock_irq(&hw->lock);
894  			csio_lnode_alloc(hw);
895  			spin_lock_irq(&hw->lock);
896  			if (!ln) {
897  				csio_err(hw,
898  					 "failed to allocate fcoe lnode"
899  					 "for port:%d vnpi:x%x\n",
900  					 portid, vnpi);
901  				CSIO_DB_ASSERT(0);
902  				return;
903  			}
904  			ln->portid = portid;
905  		}
906  		ln->vnp_flowid = vnpi;
907  		ln->dev_num &= ~0xFFFF;
908  		ln->dev_num |= vnpi;
909  	}
910  
911  	/*Initialize fcfi */
912  	ln->fcf_flowid = fcfi;
913  
914  	csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
915  
916  	CSIO_INC_STATS(ln, n_link_up);
917  
918  	/* Send LINKUP event to SM */
919  	csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
920  }
921  
922  /*
923   * csio_post_event_rns
924   * @ln - FCOE lnode
925   * @evt - Given rnode event
926   * Returns - none
927   *
928   * Posts given rnode event to all FCOE rnodes connected with given Lnode.
929   * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
930   * event.
931   *
932   * This called with hw lock held
933   */
934  static void
csio_post_event_rns(struct csio_lnode * ln,enum csio_rn_ev evt)935  csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
936  {
937  	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
938  	struct list_head *tmp, *next;
939  	struct csio_rnode *rn;
940  
941  	list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
942  		rn = (struct csio_rnode *) tmp;
943  		csio_post_event(&rn->sm, evt);
944  	}
945  }
946  
947  /*
948   * csio_cleanup_rns
949   * @ln - FCOE lnode
950   * Returns - none
951   *
952   * Frees all FCOE rnodes connected with given Lnode.
953   *
954   * This called with hw lock held
955   */
956  static void
csio_cleanup_rns(struct csio_lnode * ln)957  csio_cleanup_rns(struct csio_lnode *ln)
958  {
959  	struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
960  	struct list_head *tmp, *next_rn;
961  	struct csio_rnode *rn;
962  
963  	list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
964  		rn = (struct csio_rnode *) tmp;
965  		csio_put_rnode(ln, rn);
966  	}
967  
968  }
969  
970  /*
971   * csio_post_event_lns
972   * @ln - FCOE lnode
973   * @evt - Given lnode event
974   * Returns - none
975   *
976   * Posts given lnode event to all FCOE lnodes connected with given Lnode.
977   * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
978   * event.
979   *
980   * This called with hw lock held
981   */
982  static void
csio_post_event_lns(struct csio_lnode * ln,enum csio_ln_ev evt)983  csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
984  {
985  	struct list_head *tmp;
986  	struct csio_lnode *cln, *sln;
987  
988  	/* If NPIV lnode, send evt only to that and return */
989  	if (csio_is_npiv_ln(ln)) {
990  		csio_post_event(&ln->sm, evt);
991  		return;
992  	}
993  
994  	sln = ln;
995  	/* Traverse children lnodes list and send evt */
996  	list_for_each(tmp, &sln->cln_head) {
997  		cln = (struct csio_lnode *) tmp;
998  		csio_post_event(&cln->sm, evt);
999  	}
1000  
1001  	/* Send evt to parent lnode */
1002  	csio_post_event(&ln->sm, evt);
1003  }
1004  
1005  /*
1006   * csio_ln_down - Lcoal nport is down
1007   * @ln - FCOE Lnode
1008   * Returns - none
1009   *
1010   * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
1011   *
1012   * This called with hw lock held
1013   */
1014  static void
csio_ln_down(struct csio_lnode * ln)1015  csio_ln_down(struct csio_lnode *ln)
1016  {
1017  	csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
1018  }
1019  
1020  /*
1021   * csio_handle_link_down - Logical Linkdown event.
1022   * @hw - HW module.
1023   * @portid - Physical port number
1024   * @fcfi - FCF index.
1025   * @vnpi - VNP index.
1026   * Returns - none
1027   *
1028   * This event is received from FW, when virtual link goes down between
1029   * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
1030   * this vnpi[VN-Port] will be de-instantiated.
1031   *
1032   * This called with hw lock held
1033   */
1034  static void
csio_handle_link_down(struct csio_hw * hw,uint8_t portid,uint32_t fcfi,uint32_t vnpi)1035  csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
1036  		      uint32_t vnpi)
1037  {
1038  	struct csio_fcf_info *fp;
1039  	struct csio_lnode *ln;
1040  
1041  	/* Lookup lnode based on vnpi */
1042  	ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1043  	if (ln) {
1044  		fp = ln->fcfinfo;
1045  		CSIO_INC_STATS(ln, n_link_down);
1046  
1047  		/*Warn if linkdown received if lnode is not in ready state */
1048  		if (!csio_is_lnode_ready(ln)) {
1049  			csio_ln_warn(ln,
1050  				"warn: FCOE link is already in offline "
1051  				"Ignoring Fcoe linkdown event on portid %d\n",
1052  				 portid);
1053  			CSIO_INC_STATS(ln, n_evt_drop);
1054  			return;
1055  		}
1056  
1057  		/* Verify portid */
1058  		if (fp->portid != portid) {
1059  			csio_ln_warn(ln,
1060  				"warn: FCOE linkdown recv with "
1061  				"invalid port %d\n", portid);
1062  			CSIO_INC_STATS(ln, n_evt_drop);
1063  			return;
1064  		}
1065  
1066  		/* verify fcfi */
1067  		if (ln->fcf_flowid != fcfi) {
1068  			csio_ln_warn(ln,
1069  				"warn: FCOE linkdown recv with "
1070  				"invalid fcfi x%x\n", fcfi);
1071  			CSIO_INC_STATS(ln, n_evt_drop);
1072  			return;
1073  		}
1074  
1075  		csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
1076  
1077  		/* Send LINK_DOWN event to lnode s/m */
1078  		csio_ln_down(ln);
1079  
1080  		return;
1081  	} else {
1082  		csio_warn(hw,
1083  			  "warn: FCOE linkdown recv with invalid vnpi x%x\n",
1084  			  vnpi);
1085  		CSIO_INC_STATS(hw, n_evt_drop);
1086  	}
1087  }
1088  
1089  /*
1090   * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
1091   * @ln: Lnode module
1092   *
1093   * Returns True if FCOE lnode is in ready state.
1094   */
1095  int
csio_is_lnode_ready(struct csio_lnode * ln)1096  csio_is_lnode_ready(struct csio_lnode *ln)
1097  {
1098  	return (csio_get_state(ln) == csio_lns_ready);
1099  }
1100  
1101  /*****************************************************************************/
1102  /* START: Lnode SM                                                           */
1103  /*****************************************************************************/
1104  /*
1105   * csio_lns_uninit - The request in uninit state.
1106   * @ln - FCOE lnode.
1107   * @evt - Event to be processed.
1108   *
1109   * Process the given lnode event which is currently in "uninit" state.
1110   * Invoked with HW lock held.
1111   * Return - none.
1112   */
1113  static void
csio_lns_uninit(struct csio_lnode * ln,enum csio_ln_ev evt)1114  csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
1115  {
1116  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1117  	struct csio_lnode *rln = hw->rln;
1118  	int rv;
1119  
1120  	CSIO_INC_STATS(ln, n_evt_sm[evt]);
1121  	switch (evt) {
1122  	case CSIO_LNE_LINKUP:
1123  		csio_set_state(&ln->sm, csio_lns_online);
1124  		/* Read FCF only for physical lnode */
1125  		if (csio_is_phys_ln(ln)) {
1126  			rv = csio_ln_read_fcf_entry(ln,
1127  					csio_ln_read_fcf_cbfn);
1128  			if (rv != 0) {
1129  				/* TODO: Send HW RESET event */
1130  				CSIO_INC_STATS(ln, n_err);
1131  				break;
1132  			}
1133  
1134  			/* Add FCF record */
1135  			list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1136  		}
1137  
1138  		rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1139  		if (rv != 0) {
1140  			/* TODO: Send HW RESET event */
1141  			CSIO_INC_STATS(ln, n_err);
1142  		}
1143  		break;
1144  
1145  	case CSIO_LNE_DOWN_LINK:
1146  		break;
1147  
1148  	default:
1149  		csio_ln_dbg(ln,
1150  			    "unexp ln event %d recv from did:x%x in "
1151  			    "ln state[uninit].\n", evt, ln->nport_id);
1152  		CSIO_INC_STATS(ln, n_evt_unexp);
1153  		break;
1154  	} /* switch event */
1155  }
1156  
1157  /*
1158   * csio_lns_online - The request in online state.
1159   * @ln - FCOE lnode.
1160   * @evt - Event to be processed.
1161   *
1162   * Process the given lnode event which is currently in "online" state.
1163   * Invoked with HW lock held.
1164   * Return - none.
1165   */
1166  static void
csio_lns_online(struct csio_lnode * ln,enum csio_ln_ev evt)1167  csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
1168  {
1169  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1170  
1171  	CSIO_INC_STATS(ln, n_evt_sm[evt]);
1172  	switch (evt) {
1173  	case CSIO_LNE_LINKUP:
1174  		csio_ln_warn(ln,
1175  			     "warn: FCOE link is up already "
1176  			     "Ignoring linkup on port:%d\n", ln->portid);
1177  		CSIO_INC_STATS(ln, n_evt_drop);
1178  		break;
1179  
1180  	case CSIO_LNE_FAB_INIT_DONE:
1181  		csio_set_state(&ln->sm, csio_lns_ready);
1182  
1183  		spin_unlock_irq(&hw->lock);
1184  		csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
1185  		spin_lock_irq(&hw->lock);
1186  
1187  		break;
1188  
1189  	case CSIO_LNE_LINK_DOWN:
1190  	case CSIO_LNE_DOWN_LINK:
1191  		csio_set_state(&ln->sm, csio_lns_uninit);
1192  		if (csio_is_phys_ln(ln)) {
1193  			/* Remove FCF entry */
1194  			list_del_init(&ln->fcfinfo->list);
1195  		}
1196  		break;
1197  
1198  	default:
1199  		csio_ln_dbg(ln,
1200  			    "unexp ln event %d recv from did:x%x in "
1201  			    "ln state[uninit].\n", evt, ln->nport_id);
1202  		CSIO_INC_STATS(ln, n_evt_unexp);
1203  
1204  		break;
1205  	} /* switch event */
1206  }
1207  
1208  /*
1209   * csio_lns_ready - The request in ready state.
1210   * @ln - FCOE lnode.
1211   * @evt - Event to be processed.
1212   *
1213   * Process the given lnode event which is currently in "ready" state.
1214   * Invoked with HW lock held.
1215   * Return - none.
1216   */
1217  static void
csio_lns_ready(struct csio_lnode * ln,enum csio_ln_ev evt)1218  csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
1219  {
1220  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1221  
1222  	CSIO_INC_STATS(ln, n_evt_sm[evt]);
1223  	switch (evt) {
1224  	case CSIO_LNE_FAB_INIT_DONE:
1225  		csio_ln_dbg(ln,
1226  			    "ignoring event %d recv from did x%x"
1227  			    "in ln state[ready].\n", evt, ln->nport_id);
1228  		CSIO_INC_STATS(ln, n_evt_drop);
1229  		break;
1230  
1231  	case CSIO_LNE_LINK_DOWN:
1232  		csio_set_state(&ln->sm, csio_lns_offline);
1233  		csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1234  
1235  		spin_unlock_irq(&hw->lock);
1236  		csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1237  		spin_lock_irq(&hw->lock);
1238  
1239  		if (csio_is_phys_ln(ln)) {
1240  			/* Remove FCF entry */
1241  			list_del_init(&ln->fcfinfo->list);
1242  		}
1243  		break;
1244  
1245  	case CSIO_LNE_DOWN_LINK:
1246  		csio_set_state(&ln->sm, csio_lns_offline);
1247  		csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1248  
1249  		/* Host need to issue aborts in case if FW has not returned
1250  		 * WRs with status "ABORTED"
1251  		 */
1252  		spin_unlock_irq(&hw->lock);
1253  		csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
1254  		spin_lock_irq(&hw->lock);
1255  
1256  		if (csio_is_phys_ln(ln)) {
1257  			/* Remove FCF entry */
1258  			list_del_init(&ln->fcfinfo->list);
1259  		}
1260  		break;
1261  
1262  	case CSIO_LNE_CLOSE:
1263  		csio_set_state(&ln->sm, csio_lns_uninit);
1264  		csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1265  		break;
1266  
1267  	case CSIO_LNE_LOGO:
1268  		csio_set_state(&ln->sm, csio_lns_offline);
1269  		csio_post_event_rns(ln, CSIO_RNFE_DOWN);
1270  		break;
1271  
1272  	default:
1273  		csio_ln_dbg(ln,
1274  			    "unexp ln event %d recv from did:x%x in "
1275  			    "ln state[uninit].\n", evt, ln->nport_id);
1276  		CSIO_INC_STATS(ln, n_evt_unexp);
1277  		CSIO_DB_ASSERT(0);
1278  		break;
1279  	} /* switch event */
1280  }
1281  
1282  /*
1283   * csio_lns_offline - The request in offline state.
1284   * @ln - FCOE lnode.
1285   * @evt - Event to be processed.
1286   *
1287   * Process the given lnode event which is currently in "offline" state.
1288   * Invoked with HW lock held.
1289   * Return - none.
1290   */
1291  static void
csio_lns_offline(struct csio_lnode * ln,enum csio_ln_ev evt)1292  csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
1293  {
1294  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1295  	struct csio_lnode *rln = hw->rln;
1296  	int rv;
1297  
1298  	CSIO_INC_STATS(ln, n_evt_sm[evt]);
1299  	switch (evt) {
1300  	case CSIO_LNE_LINKUP:
1301  		csio_set_state(&ln->sm, csio_lns_online);
1302  		/* Read FCF only for physical lnode */
1303  		if (csio_is_phys_ln(ln)) {
1304  			rv = csio_ln_read_fcf_entry(ln,
1305  					csio_ln_read_fcf_cbfn);
1306  			if (rv != 0) {
1307  				/* TODO: Send HW RESET event */
1308  				CSIO_INC_STATS(ln, n_err);
1309  				break;
1310  			}
1311  
1312  			/* Add FCF record */
1313  			list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
1314  		}
1315  
1316  		rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
1317  		if (rv != 0) {
1318  			/* TODO: Send HW RESET event */
1319  			CSIO_INC_STATS(ln, n_err);
1320  		}
1321  		break;
1322  
1323  	case CSIO_LNE_LINK_DOWN:
1324  	case CSIO_LNE_DOWN_LINK:
1325  	case CSIO_LNE_LOGO:
1326  		csio_ln_dbg(ln,
1327  			    "ignoring event %d recv from did x%x"
1328  			    "in ln state[offline].\n", evt, ln->nport_id);
1329  		CSIO_INC_STATS(ln, n_evt_drop);
1330  		break;
1331  
1332  	case CSIO_LNE_CLOSE:
1333  		csio_set_state(&ln->sm, csio_lns_uninit);
1334  		csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
1335  		break;
1336  
1337  	default:
1338  		csio_ln_dbg(ln,
1339  			    "unexp ln event %d recv from did:x%x in "
1340  			    "ln state[offline]\n", evt, ln->nport_id);
1341  		CSIO_INC_STATS(ln, n_evt_unexp);
1342  		CSIO_DB_ASSERT(0);
1343  		break;
1344  	} /* switch event */
1345  }
1346  
1347  /*****************************************************************************/
1348  /* END: Lnode SM                                                             */
1349  /*****************************************************************************/
1350  
1351  static void
csio_free_fcfinfo(struct kref * kref)1352  csio_free_fcfinfo(struct kref *kref)
1353  {
1354  	struct csio_fcf_info *fcfinfo = container_of(kref,
1355  						struct csio_fcf_info, kref);
1356  	kfree(fcfinfo);
1357  }
1358  
1359  /* Helper routines for attributes  */
1360  /*
1361   * csio_lnode_state_to_str - Get current state of FCOE lnode.
1362   * @ln - lnode
1363   * @str - state of lnode.
1364   *
1365   */
1366  void
csio_lnode_state_to_str(struct csio_lnode * ln,int8_t * str)1367  csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
1368  {
1369  	if (csio_get_state(ln) == csio_lns_uninit) {
1370  		strcpy(str, "UNINIT");
1371  		return;
1372  	}
1373  	if (csio_get_state(ln) == csio_lns_ready) {
1374  		strcpy(str, "READY");
1375  		return;
1376  	}
1377  	if (csio_get_state(ln) == csio_lns_offline) {
1378  		strcpy(str, "OFFLINE");
1379  		return;
1380  	}
1381  	strcpy(str, "UNKNOWN");
1382  } /* csio_lnode_state_to_str */
1383  
1384  
1385  int
csio_get_phy_port_stats(struct csio_hw * hw,uint8_t portid,struct fw_fcoe_port_stats * port_stats)1386  csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
1387  			struct fw_fcoe_port_stats *port_stats)
1388  {
1389  	struct csio_mb  *mbp;
1390  	struct fw_fcoe_port_cmd_params portparams;
1391  	enum fw_retval retval;
1392  	int idx;
1393  
1394  	mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1395  	if (!mbp) {
1396  		csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
1397  		return -EINVAL;
1398  	}
1399  	portparams.portid = portid;
1400  
1401  	for (idx = 1; idx <= 3; idx++) {
1402  		portparams.idx = (idx-1)*6 + 1;
1403  		portparams.nstats = 6;
1404  		if (idx == 3)
1405  			portparams.nstats = 4;
1406  		csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
1407  							&portparams, NULL);
1408  		if (csio_mb_issue(hw, mbp)) {
1409  			csio_err(hw, "Issue of FCoE port params failed!\n");
1410  			mempool_free(mbp, hw->mb_mempool);
1411  			return -EINVAL;
1412  		}
1413  		csio_mb_process_portparams_rsp(hw, mbp, &retval,
1414  						&portparams, port_stats);
1415  	}
1416  
1417  	mempool_free(mbp, hw->mb_mempool);
1418  	return 0;
1419  }
1420  
1421  /*
1422   * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
1423   * @wr - WR.
1424   * @len - WR len.
1425   * This handler is invoked when an outstanding mgmt WR is completed.
1426   * Its invoked in the context of FW event worker thread for every
1427   * mgmt event received.
1428   * Return - none.
1429   */
1430  
1431  static void
csio_ln_mgmt_wr_handler(struct csio_hw * hw,void * wr,uint32_t len)1432  csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
1433  {
1434  	struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1435  	struct csio_ioreq *io_req = NULL;
1436  	struct fw_fcoe_els_ct_wr *wr_cmd;
1437  
1438  
1439  	wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
1440  
1441  	if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
1442  		csio_err(mgmtm->hw,
1443  			 "Invalid ELS CT WR length recvd, len:%x\n", len);
1444  		mgmtm->stats.n_err++;
1445  		return;
1446  	}
1447  
1448  	io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
1449  	io_req->wr_status = csio_wr_status(wr_cmd);
1450  
1451  	/* lookup ioreq exists in our active Q */
1452  	spin_lock_irq(&hw->lock);
1453  	if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
1454  		csio_err(mgmtm->hw,
1455  			"Error- Invalid IO handle recv in WR. handle: %p\n",
1456  			io_req);
1457  		mgmtm->stats.n_err++;
1458  		spin_unlock_irq(&hw->lock);
1459  		return;
1460  	}
1461  
1462  	mgmtm = csio_hw_to_mgmtm(hw);
1463  
1464  	/* Dequeue from active queue */
1465  	list_del_init(&io_req->sm.sm_list);
1466  	mgmtm->stats.n_active--;
1467  	spin_unlock_irq(&hw->lock);
1468  
1469  	/* io_req will be freed by completion handler */
1470  	if (io_req->io_cbfn)
1471  		io_req->io_cbfn(hw, io_req);
1472  }
1473  
1474  /**
1475   * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
1476   * @hw:		HW module
1477   * @cpl_op:	CPL opcode
1478   * @cmd:	FW cmd/WR.
1479   *
1480   * Process received FCoE cmd/WR event from FW.
1481   */
1482  void
csio_fcoe_fwevt_handler(struct csio_hw * hw,__u8 cpl_op,__be64 * cmd)1483  csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
1484  {
1485  	struct csio_lnode *ln;
1486  	struct csio_rnode *rn;
1487  	uint8_t portid, opcode = *(uint8_t *)cmd;
1488  	struct fw_fcoe_link_cmd *lcmd;
1489  	struct fw_wr_hdr *wr;
1490  	struct fw_rdev_wr *rdev_wr;
1491  	enum fw_fcoe_link_status lstatus;
1492  	uint32_t fcfi, rdev_flowid, vnpi;
1493  	enum csio_ln_ev evt;
1494  
1495  	if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
1496  
1497  		lcmd = (struct fw_fcoe_link_cmd *)cmd;
1498  		lstatus = lcmd->lstatus;
1499  		portid = FW_FCOE_LINK_CMD_PORTID_GET(
1500  					ntohl(lcmd->op_to_portid));
1501  		fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
1502  		vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
1503  
1504  		if (lstatus == FCOE_LINKUP) {
1505  
1506  			/* HW lock here */
1507  			spin_lock_irq(&hw->lock);
1508  			csio_handle_link_up(hw, portid, fcfi, vnpi);
1509  			spin_unlock_irq(&hw->lock);
1510  			/* HW un lock here */
1511  
1512  		} else if (lstatus == FCOE_LINKDOWN) {
1513  
1514  			/* HW lock here */
1515  			spin_lock_irq(&hw->lock);
1516  			csio_handle_link_down(hw, portid, fcfi, vnpi);
1517  			spin_unlock_irq(&hw->lock);
1518  			/* HW un lock here */
1519  		} else {
1520  			csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
1521  				  lcmd->lstatus);
1522  			CSIO_INC_STATS(hw, n_cpl_unexp);
1523  		}
1524  	} else if (cpl_op == CPL_FW6_PLD) {
1525  		wr = (struct fw_wr_hdr *) (cmd + 4);
1526  		if (FW_WR_OP_G(be32_to_cpu(wr->hi))
1527  			== FW_RDEV_WR) {
1528  
1529  			rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
1530  
1531  			rdev_flowid = FW_RDEV_WR_FLOWID_GET(
1532  					ntohl(rdev_wr->alloc_to_len16));
1533  			vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
1534  				    ntohl(rdev_wr->flags_to_assoc_flowid));
1535  
1536  			csio_dbg(hw,
1537  				"FW_RDEV_WR: flowid:x%x ev_cause:x%x "
1538  				"vnpi:0x%x\n", rdev_flowid,
1539  				rdev_wr->event_cause, vnpi);
1540  
1541  			if (rdev_wr->protocol != PROT_FCOE) {
1542  				csio_err(hw,
1543  					"FW_RDEV_WR: invalid proto:x%x "
1544  					"received with flowid:x%x\n",
1545  					rdev_wr->protocol,
1546  					rdev_flowid);
1547  				CSIO_INC_STATS(hw, n_evt_drop);
1548  				return;
1549  			}
1550  
1551  			/* HW lock here */
1552  			spin_lock_irq(&hw->lock);
1553  			ln = csio_ln_lookup_by_vnpi(hw, vnpi);
1554  			if (!ln) {
1555  				csio_err(hw,
1556  					"FW_DEV_WR: invalid vnpi:x%x received "
1557  					"with flowid:x%x\n", vnpi, rdev_flowid);
1558  				CSIO_INC_STATS(hw, n_evt_drop);
1559  				goto out_pld;
1560  			}
1561  
1562  			rn = csio_confirm_rnode(ln, rdev_flowid,
1563  					&rdev_wr->u.fcoe_rdev);
1564  			if (!rn) {
1565  				csio_ln_dbg(ln,
1566  					"Failed to confirm rnode "
1567  					"for flowid:x%x\n", rdev_flowid);
1568  				CSIO_INC_STATS(hw, n_evt_drop);
1569  				goto out_pld;
1570  			}
1571  
1572  			/* save previous event for debugging */
1573  			ln->prev_evt = ln->cur_evt;
1574  			ln->cur_evt = rdev_wr->event_cause;
1575  			CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
1576  
1577  			/* Translate all the fabric events to lnode SM events */
1578  			evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
1579  			if (evt) {
1580  				csio_ln_dbg(ln,
1581  					"Posting event to lnode event:%d "
1582  					"cause:%d flowid:x%x\n", evt,
1583  					rdev_wr->event_cause, rdev_flowid);
1584  				csio_post_event(&ln->sm, evt);
1585  			}
1586  
1587  			/* Handover event to rn SM here. */
1588  			csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
1589  out_pld:
1590  			spin_unlock_irq(&hw->lock);
1591  			return;
1592  		} else {
1593  			csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1594  				  FW_WR_OP_G(be32_to_cpu((wr->hi))));
1595  			CSIO_INC_STATS(hw, n_cpl_unexp);
1596  		}
1597  	} else if (cpl_op == CPL_FW6_MSG) {
1598  		wr = (struct fw_wr_hdr *) (cmd);
1599  		if (FW_WR_OP_G(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
1600  			csio_ln_mgmt_wr_handler(hw, wr,
1601  					sizeof(struct fw_fcoe_els_ct_wr));
1602  		} else {
1603  			csio_warn(hw, "unexpected WR op(0x%x) recv\n",
1604  				  FW_WR_OP_G(be32_to_cpu((wr->hi))));
1605  			CSIO_INC_STATS(hw, n_cpl_unexp);
1606  		}
1607  	} else {
1608  		csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
1609  		CSIO_INC_STATS(hw, n_cpl_unexp);
1610  	}
1611  }
1612  
1613  /**
1614   * csio_lnode_start - Kickstart lnode discovery.
1615   * @ln:		lnode
1616   *
1617   * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
1618   */
1619  int
csio_lnode_start(struct csio_lnode * ln)1620  csio_lnode_start(struct csio_lnode *ln)
1621  {
1622  	int rv = 0;
1623  	if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
1624  		rv = csio_fcoe_enable_link(ln, 1);
1625  		ln->flags |= CSIO_LNF_LINK_ENABLE;
1626  	}
1627  
1628  	return rv;
1629  }
1630  
1631  /**
1632   * csio_lnode_stop - Stop the lnode.
1633   * @ln:		lnode
1634   *
1635   * This routine is invoked by HW module to stop lnode and its associated NPIV
1636   * lnodes.
1637   */
1638  void
csio_lnode_stop(struct csio_lnode * ln)1639  csio_lnode_stop(struct csio_lnode *ln)
1640  {
1641  	csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
1642  	if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
1643  		csio_fcoe_enable_link(ln, 0);
1644  		ln->flags &= ~CSIO_LNF_LINK_ENABLE;
1645  	}
1646  	csio_ln_dbg(ln, "stopping ln :%p\n", ln);
1647  }
1648  
1649  /**
1650   * csio_lnode_close - Close an lnode.
1651   * @ln:		lnode
1652   *
1653   * This routine is invoked by HW module to close an lnode and its
1654   * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
1655   * set to uninitialized state.
1656   */
1657  void
csio_lnode_close(struct csio_lnode * ln)1658  csio_lnode_close(struct csio_lnode *ln)
1659  {
1660  	csio_post_event_lns(ln, CSIO_LNE_CLOSE);
1661  	if (csio_is_phys_ln(ln))
1662  		ln->vnp_flowid = CSIO_INVALID_IDX;
1663  
1664  	csio_ln_dbg(ln, "closed ln :%p\n", ln);
1665  }
1666  
1667  /*
1668   * csio_ln_prep_ecwr - Prepare ELS/CT WR.
1669   * @io_req - IO request.
1670   * @wr_len - WR len
1671   * @immd_len - WR immediate data
1672   * @sub_op - Sub opcode
1673   * @sid - source portid.
1674   * @did - destination portid
1675   * @flow_id - flowid
1676   * @fw_wr - ELS/CT WR to be prepared.
1677   * Returns: 0 - on success
1678   */
1679  static int
csio_ln_prep_ecwr(struct csio_ioreq * io_req,uint32_t wr_len,uint32_t immd_len,uint8_t sub_op,uint32_t sid,uint32_t did,uint32_t flow_id,uint8_t * fw_wr)1680  csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
1681  		      uint32_t immd_len, uint8_t sub_op, uint32_t sid,
1682  		      uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
1683  {
1684  	struct fw_fcoe_els_ct_wr *wr;
1685  	__be32 port_id;
1686  
1687  	wr  = (struct fw_fcoe_els_ct_wr *)fw_wr;
1688  	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_FCOE_ELS_CT_WR) |
1689  				     FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
1690  
1691  	wr_len =  DIV_ROUND_UP(wr_len, 16);
1692  	wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID_V(flow_id) |
1693  				       FW_WR_LEN16_V(wr_len));
1694  	wr->els_ct_type = sub_op;
1695  	wr->ctl_pri = 0;
1696  	wr->cp_en_class = 0;
1697  	wr->cookie = io_req->fw_handle;
1698  	wr->iqid = cpu_to_be16(csio_q_physiqid(
1699  					io_req->lnode->hwp, io_req->iq_idx));
1700  	wr->fl_to_sp =  FW_FCOE_ELS_CT_WR_SP(1);
1701  	wr->tmo_val = (uint8_t) io_req->tmo;
1702  	port_id = htonl(sid);
1703  	memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
1704  	port_id = htonl(did);
1705  	memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
1706  
1707  	/* Prepare RSP SGL */
1708  	wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
1709  	wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
1710  	return 0;
1711  }
1712  
1713  /*
1714   * csio_ln_mgmt_submit_wr - Post elsct work request.
1715   * @mgmtm - mgmtm
1716   * @io_req - io request.
1717   * @sub_op - ELS or CT request type
1718   * @pld - Dma Payload buffer
1719   * @pld_len - Payload len
1720   * Prepares ELSCT Work request and sents it to FW.
1721   * Returns: 0 - on success
1722   */
1723  static int
csio_ln_mgmt_submit_wr(struct csio_mgmtm * mgmtm,struct csio_ioreq * io_req,uint8_t sub_op,struct csio_dma_buf * pld,uint32_t pld_len)1724  csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
1725  		uint8_t sub_op, struct csio_dma_buf *pld,
1726  		uint32_t pld_len)
1727  {
1728  	struct csio_wr_pair wrp;
1729  	struct csio_lnode *ln = io_req->lnode;
1730  	struct csio_rnode *rn = io_req->rnode;
1731  	struct	csio_hw	*hw = mgmtm->hw;
1732  	uint8_t fw_wr[64];
1733  	struct ulptx_sgl dsgl;
1734  	uint32_t wr_size = 0;
1735  	uint8_t im_len = 0;
1736  	uint32_t wr_off = 0;
1737  
1738  	int ret = 0;
1739  
1740  	/* Calculate WR Size for this ELS REQ */
1741  	wr_size = sizeof(struct fw_fcoe_els_ct_wr);
1742  
1743  	/* Send as immediate data if pld < 256 */
1744  	if (pld_len < 256) {
1745  		wr_size += ALIGN(pld_len, 8);
1746  		im_len = (uint8_t)pld_len;
1747  	} else
1748  		wr_size += sizeof(struct ulptx_sgl);
1749  
1750  	/* Roundup WR size in units of 16 bytes */
1751  	wr_size = ALIGN(wr_size, 16);
1752  
1753  	/* Get WR to send ELS REQ */
1754  	ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
1755  	if (ret != 0) {
1756  		csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
1757  			io_req, ret);
1758  		return ret;
1759  	}
1760  
1761  	/* Prepare Generic WR used by all ELS/CT cmd */
1762  	csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
1763  				ln->nport_id, rn->nport_id,
1764  				csio_rn_flowid(rn),
1765  				&fw_wr[0]);
1766  
1767  	/* Copy ELS/CT WR CMD */
1768  	csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
1769  			sizeof(struct fw_fcoe_els_ct_wr));
1770  	wr_off += sizeof(struct fw_fcoe_els_ct_wr);
1771  
1772  	/* Copy payload to Immediate section of WR */
1773  	if (im_len)
1774  		csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
1775  	else {
1776  		/* Program DSGL to dma payload */
1777  		dsgl.cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
1778  					ULPTX_MORE_F | ULPTX_NSGE_V(1));
1779  		dsgl.len0 = cpu_to_be32(pld_len);
1780  		dsgl.addr0 = cpu_to_be64(pld->paddr);
1781  		csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
1782  				   sizeof(struct ulptx_sgl));
1783  	}
1784  
1785  	/* Issue work request to xmit ELS/CT req to FW */
1786  	csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
1787  	return ret;
1788  }
1789  
1790  /*
1791   * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
1792   * @io_req - IO Request
1793   * @io_cbfn - Completion handler.
1794   * @req_type - ELS or CT request type
1795   * @pld - Dma Payload buffer
1796   * @pld_len - Payload len
1797   *
1798   *
1799   * This API used submit managment ELS/CT request.
1800   * This called with hw lock held
1801   * Returns: 0 - on success
1802   *	    -ENOMEM	- on error.
1803   */
1804  static int
csio_ln_mgmt_submit_req(struct csio_ioreq * io_req,void (* io_cbfn)(struct csio_hw *,struct csio_ioreq *),enum fcoe_cmn_type req_type,struct csio_dma_buf * pld,uint32_t pld_len)1805  csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
1806  		void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
1807  		enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
1808  		uint32_t pld_len)
1809  {
1810  	struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
1811  	struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
1812  	int rv;
1813  
1814  	BUG_ON(pld_len > pld->len);
1815  
1816  	io_req->io_cbfn = io_cbfn;	/* Upper layer callback handler */
1817  	io_req->fw_handle = (uintptr_t) (io_req);
1818  	io_req->eq_idx = mgmtm->eq_idx;
1819  	io_req->iq_idx = mgmtm->iq_idx;
1820  
1821  	rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
1822  	if (rv == 0) {
1823  		list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
1824  		mgmtm->stats.n_active++;
1825  	}
1826  	return rv;
1827  }
1828  
1829  /*
1830   * csio_ln_fdmi_init - FDMI Init entry point.
1831   * @ln: lnode
1832   */
1833  static int
csio_ln_fdmi_init(struct csio_lnode * ln)1834  csio_ln_fdmi_init(struct csio_lnode *ln)
1835  {
1836  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1837  	struct csio_dma_buf	*dma_buf;
1838  
1839  	/* Allocate MGMT request required for FDMI */
1840  	ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
1841  	if (!ln->mgmt_req) {
1842  		csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
1843  		CSIO_INC_STATS(hw, n_err_nomem);
1844  		return -ENOMEM;
1845  	}
1846  
1847  	/* Allocate Dma buffers for FDMI response Payload */
1848  	dma_buf = &ln->mgmt_req->dma_buf;
1849  	dma_buf->len = 2048;
1850  	dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
1851  						&dma_buf->paddr, GFP_KERNEL);
1852  	if (!dma_buf->vaddr) {
1853  		csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
1854  		kfree(ln->mgmt_req);
1855  		ln->mgmt_req = NULL;
1856  		return -ENOMEM;
1857  	}
1858  
1859  	ln->flags |= CSIO_LNF_FDMI_ENABLE;
1860  	return 0;
1861  }
1862  
1863  /*
1864   * csio_ln_fdmi_exit - FDMI exit entry point.
1865   * @ln: lnode
1866   */
1867  static int
csio_ln_fdmi_exit(struct csio_lnode * ln)1868  csio_ln_fdmi_exit(struct csio_lnode *ln)
1869  {
1870  	struct csio_dma_buf *dma_buf;
1871  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1872  
1873  	if (!ln->mgmt_req)
1874  		return 0;
1875  
1876  	dma_buf = &ln->mgmt_req->dma_buf;
1877  	if (dma_buf->vaddr)
1878  		dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
1879  				    dma_buf->paddr);
1880  
1881  	kfree(ln->mgmt_req);
1882  	return 0;
1883  }
1884  
1885  int
csio_scan_done(struct csio_lnode * ln,unsigned long ticks,unsigned long time,unsigned long max_scan_ticks,unsigned long delta_scan_ticks)1886  csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
1887  		unsigned long time, unsigned long max_scan_ticks,
1888  		unsigned long delta_scan_ticks)
1889  {
1890  	int rv = 0;
1891  
1892  	if (time >= max_scan_ticks)
1893  		return 1;
1894  
1895  	if (!ln->tgt_scan_tick)
1896  		ln->tgt_scan_tick = ticks;
1897  
1898  	if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
1899  		if (!ln->last_scan_ntgts)
1900  			ln->last_scan_ntgts = ln->n_scsi_tgts;
1901  		else {
1902  			if (ln->last_scan_ntgts == ln->n_scsi_tgts)
1903  				return 1;
1904  
1905  			ln->last_scan_ntgts = ln->n_scsi_tgts;
1906  		}
1907  		ln->tgt_scan_tick = ticks;
1908  	}
1909  	return rv;
1910  }
1911  
1912  /*
1913   * csio_notify_lnodes:
1914   * @hw: HW module
1915   * @note: Notification
1916   *
1917   * Called from the HW SM to fan out notifications to the
1918   * Lnode SM. Since the HW SM is entered with lock held,
1919   * there is no need to hold locks here.
1920   *
1921   */
1922  void
csio_notify_lnodes(struct csio_hw * hw,enum csio_ln_notify note)1923  csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
1924  {
1925  	struct list_head *tmp;
1926  	struct csio_lnode *ln;
1927  
1928  	csio_dbg(hw, "Notifying all nodes of event %d\n", note);
1929  
1930  	/* Traverse children lnodes list and send evt */
1931  	list_for_each(tmp, &hw->sln_head) {
1932  		ln = (struct csio_lnode *) tmp;
1933  
1934  		switch (note) {
1935  		case CSIO_LN_NOTIFY_HWREADY:
1936  			csio_lnode_start(ln);
1937  			break;
1938  
1939  		case CSIO_LN_NOTIFY_HWRESET:
1940  		case CSIO_LN_NOTIFY_HWREMOVE:
1941  			csio_lnode_close(ln);
1942  			break;
1943  
1944  		case CSIO_LN_NOTIFY_HWSTOP:
1945  			csio_lnode_stop(ln);
1946  			break;
1947  
1948  		default:
1949  			break;
1950  
1951  		}
1952  	}
1953  }
1954  
1955  /*
1956   * csio_disable_lnodes:
1957   * @hw: HW module
1958   * @portid:port id
1959   * @disable: disable/enable flag.
1960   * If disable=1, disables all lnode hosted on given physical port.
1961   * otherwise enables all the lnodes on given phsysical port.
1962   * This routine need to called with hw lock held.
1963   */
1964  void
csio_disable_lnodes(struct csio_hw * hw,uint8_t portid,bool disable)1965  csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
1966  {
1967  	struct list_head *tmp;
1968  	struct csio_lnode *ln;
1969  
1970  	csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
1971  
1972  	/* Traverse sibling lnodes list and send evt */
1973  	list_for_each(tmp, &hw->sln_head) {
1974  		ln = (struct csio_lnode *) tmp;
1975  		if (ln->portid != portid)
1976  			continue;
1977  
1978  		if (disable)
1979  			csio_lnode_stop(ln);
1980  		else
1981  			csio_lnode_start(ln);
1982  	}
1983  }
1984  
1985  /*
1986   * csio_ln_init - Initialize an lnode.
1987   * @ln:		lnode
1988   *
1989   */
1990  static int
csio_ln_init(struct csio_lnode * ln)1991  csio_ln_init(struct csio_lnode *ln)
1992  {
1993  	int rv = -EINVAL;
1994  	struct csio_lnode *pln;
1995  	struct csio_hw *hw = csio_lnode_to_hw(ln);
1996  
1997  	csio_init_state(&ln->sm, csio_lns_uninit);
1998  	ln->vnp_flowid = CSIO_INVALID_IDX;
1999  	ln->fcf_flowid = CSIO_INVALID_IDX;
2000  
2001  	if (csio_is_root_ln(ln)) {
2002  
2003  		/* This is the lnode used during initialization */
2004  
2005  		ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
2006  		if (!ln->fcfinfo) {
2007  			csio_ln_err(ln, "Failed to alloc FCF record\n");
2008  			CSIO_INC_STATS(hw, n_err_nomem);
2009  			goto err;
2010  		}
2011  
2012  		INIT_LIST_HEAD(&ln->fcf_lsthead);
2013  		kref_init(&ln->fcfinfo->kref);
2014  
2015  		if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2016  			goto err;
2017  
2018  	} else { /* Either a non-root physical or a virtual lnode */
2019  
2020  		/*
2021  		 * THe rest is common for non-root physical and NPIV lnodes.
2022  		 * Just get references to all other modules
2023  		 */
2024  
2025  		if (csio_is_npiv_ln(ln)) {
2026  			/* NPIV */
2027  			pln = csio_parent_lnode(ln);
2028  			kref_get(&pln->fcfinfo->kref);
2029  			ln->fcfinfo = pln->fcfinfo;
2030  		} else {
2031  			/* Another non-root physical lnode (FCF) */
2032  			ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
2033  								GFP_KERNEL);
2034  			if (!ln->fcfinfo) {
2035  				csio_ln_err(ln, "Failed to alloc FCF info\n");
2036  				CSIO_INC_STATS(hw, n_err_nomem);
2037  				goto err;
2038  			}
2039  
2040  			kref_init(&ln->fcfinfo->kref);
2041  
2042  			if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
2043  				goto err;
2044  		}
2045  
2046  	} /* if (!csio_is_root_ln(ln)) */
2047  
2048  	return 0;
2049  err:
2050  	return rv;
2051  }
2052  
2053  static void
csio_ln_exit(struct csio_lnode * ln)2054  csio_ln_exit(struct csio_lnode *ln)
2055  {
2056  	struct csio_lnode *pln;
2057  
2058  	csio_cleanup_rns(ln);
2059  	if (csio_is_npiv_ln(ln)) {
2060  		pln = csio_parent_lnode(ln);
2061  		kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
2062  	} else {
2063  		kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
2064  		if (csio_fdmi_enable)
2065  			csio_ln_fdmi_exit(ln);
2066  	}
2067  	ln->fcfinfo = NULL;
2068  }
2069  
2070  /*
2071   * csio_lnode_init - Initialize the members of an lnode.
2072   * @ln:		lnode
2073   */
2074  int
csio_lnode_init(struct csio_lnode * ln,struct csio_hw * hw,struct csio_lnode * pln)2075  csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
2076  		struct csio_lnode *pln)
2077  {
2078  	int rv = -EINVAL;
2079  
2080  	/* Link this lnode to hw */
2081  	csio_lnode_to_hw(ln)	= hw;
2082  
2083  	/* Link child to parent if child lnode */
2084  	if (pln)
2085  		ln->pln = pln;
2086  	else
2087  		ln->pln = NULL;
2088  
2089  	/* Initialize scsi_tgt and timers to zero */
2090  	ln->n_scsi_tgts = 0;
2091  	ln->last_scan_ntgts = 0;
2092  	ln->tgt_scan_tick = 0;
2093  
2094  	/* Initialize rnode list */
2095  	INIT_LIST_HEAD(&ln->rnhead);
2096  	INIT_LIST_HEAD(&ln->cln_head);
2097  
2098  	/* Initialize log level for debug */
2099  	ln->params.log_level	= hw->params.log_level;
2100  
2101  	if (csio_ln_init(ln))
2102  		goto err;
2103  
2104  	/* Add lnode to list of sibling or children lnodes */
2105  	spin_lock_irq(&hw->lock);
2106  	list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
2107  	if (pln)
2108  		pln->num_vports++;
2109  	spin_unlock_irq(&hw->lock);
2110  
2111  	hw->num_lns++;
2112  
2113  	return 0;
2114  err:
2115  	csio_lnode_to_hw(ln) = NULL;
2116  	return rv;
2117  }
2118  
2119  /**
2120   * csio_lnode_exit - De-instantiate an lnode.
2121   * @ln:		lnode
2122   *
2123   */
2124  void
csio_lnode_exit(struct csio_lnode * ln)2125  csio_lnode_exit(struct csio_lnode *ln)
2126  {
2127  	struct csio_hw *hw = csio_lnode_to_hw(ln);
2128  
2129  	csio_ln_exit(ln);
2130  
2131  	/* Remove this lnode from hw->sln_head */
2132  	spin_lock_irq(&hw->lock);
2133  
2134  	list_del_init(&ln->sm.sm_list);
2135  
2136  	/* If it is children lnode, decrement the
2137  	 * counter in its parent lnode
2138  	 */
2139  	if (ln->pln)
2140  		ln->pln->num_vports--;
2141  
2142  	/* Update root lnode pointer */
2143  	if (list_empty(&hw->sln_head))
2144  		hw->rln = NULL;
2145  	else
2146  		hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
2147  
2148  	spin_unlock_irq(&hw->lock);
2149  
2150  	csio_lnode_to_hw(ln)	= NULL;
2151  	hw->num_lns--;
2152  }
2153