1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * QLogic Fibre Channel HBA Driver
4   * Copyright (c)  2003-2014 QLogic Corporation
5   */
6  #include "qla_def.h"
7  
8  #include <linux/debugfs.h>
9  #include <linux/seq_file.h>
10  
11  static struct dentry *qla2x00_dfs_root;
12  static atomic_t qla2x00_dfs_root_count;
13  
14  #define QLA_DFS_RPORT_DEVLOSS_TMO	1
15  
16  static int
qla_dfs_rport_get(struct fc_port * fp,int attr_id,u64 * val)17  qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
18  {
19  	switch (attr_id) {
20  	case QLA_DFS_RPORT_DEVLOSS_TMO:
21  		/* Only supported for FC-NVMe devices that are registered. */
22  		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
23  			return -EIO;
24  		*val = fp->nvme_remote_port->dev_loss_tmo;
25  		break;
26  	default:
27  		return -EINVAL;
28  	}
29  	return 0;
30  }
31  
32  static int
qla_dfs_rport_set(struct fc_port * fp,int attr_id,u64 val)33  qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
34  {
35  	switch (attr_id) {
36  	case QLA_DFS_RPORT_DEVLOSS_TMO:
37  		/* Only supported for FC-NVMe devices that are registered. */
38  		if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
39  			return -EIO;
40  #if (IS_ENABLED(CONFIG_NVME_FC))
41  		return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
42  						      val);
43  #else /* CONFIG_NVME_FC */
44  		return -EINVAL;
45  #endif /* CONFIG_NVME_FC */
46  	default:
47  		return -EINVAL;
48  	}
49  	return 0;
50  }
51  
52  #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr)		\
53  static int qla_dfs_rport_##_attr##_get(void *data, u64 *val)	\
54  {								\
55  	struct fc_port *fp = data;				\
56  	return qla_dfs_rport_get(fp, _attr_id, val);		\
57  }								\
58  static int qla_dfs_rport_##_attr##_set(void *data, u64 val)	\
59  {								\
60  	struct fc_port *fp = data;				\
61  	return qla_dfs_rport_set(fp, _attr_id, val);		\
62  }								\
63  DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops,		\
64  		qla_dfs_rport_##_attr##_get,			\
65  		qla_dfs_rport_##_attr##_set, "%llu\n")
66  
67  /*
68   * Wrapper for getting fc_port fields.
69   *
70   * _attr    : Attribute name.
71   * _get_val : Accessor macro to retrieve the value.
72   */
73  #define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)			\
74  static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val)	\
75  {									\
76  	struct fc_port *fp = data;					\
77  	*val = _get_val;						\
78  	return 0;							\
79  }									\
80  DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops,		\
81  		qla_dfs_rport_field_##_attr##_get,			\
82  		NULL, "%llu\n")
83  
84  #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
85  	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
86  
87  #define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
88  	DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
89  
90  DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
91  
92  DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
93  DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
94  DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
95  DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
96  DEFINE_QLA_DFS_RPORT_FIELD(flags);
97  DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
98  DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
99  DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
100  DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
101  DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
102  DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
103  DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
104  
105  void
qla2x00_dfs_create_rport(scsi_qla_host_t * vha,struct fc_port * fp)106  qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
107  {
108  	char wwn[32];
109  
110  #define QLA_CREATE_RPORT_FIELD_ATTR(_attr)			\
111  	debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir,	\
112  		fp, &qla_dfs_rport_field_##_attr##_fops)
113  
114  	if (!vha->dfs_rport_root || fp->dfs_rport_dir)
115  		return;
116  
117  	sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
118  	fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
119  	if (IS_ERR(fp->dfs_rport_dir))
120  		return;
121  	if (NVME_TARGET(vha->hw, fp))
122  		debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
123  				    fp, &qla_dfs_rport_dev_loss_tmo_fops);
124  
125  	QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
126  	QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
127  	QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
128  	QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
129  	QLA_CREATE_RPORT_FIELD_ATTR(flags);
130  	QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
131  	QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
132  	QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
133  	QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
134  	QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
135  	QLA_CREATE_RPORT_FIELD_ATTR(port_id);
136  	QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
137  }
138  
139  void
qla2x00_dfs_remove_rport(scsi_qla_host_t * vha,struct fc_port * fp)140  qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
141  {
142  	if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
143  		return;
144  	debugfs_remove_recursive(fp->dfs_rport_dir);
145  	fp->dfs_rport_dir = NULL;
146  }
147  
148  static int
qla2x00_dfs_tgt_sess_show(struct seq_file * s,void * unused)149  qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
150  {
151  	scsi_qla_host_t *vha = s->private;
152  	struct qla_hw_data *ha = vha->hw;
153  	unsigned long flags;
154  	struct fc_port *sess = NULL;
155  	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
156  
157  	seq_printf(s, "%s\n", vha->host_str);
158  	if (tgt) {
159  		seq_puts(s, "Port ID   Port Name                Handle\n");
160  
161  		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
162  		list_for_each_entry(sess, &vha->vp_fcports, list)
163  			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
164  			    sess->d_id.b.domain, sess->d_id.b.area,
165  			    sess->d_id.b.al_pa, sess->port_name,
166  			    sess->loop_id);
167  		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
168  	}
169  
170  	return 0;
171  }
172  
173  DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess);
174  
175  static int
qla2x00_dfs_tgt_port_database_show(struct seq_file * s,void * unused)176  qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
177  {
178  	scsi_qla_host_t *vha = s->private;
179  	struct qla_hw_data *ha = vha->hw;
180  	struct gid_list_info *gid_list;
181  	dma_addr_t gid_list_dma;
182  	fc_port_t fc_port;
183  	char *id_iter;
184  	int rc, i;
185  	uint16_t entries, loop_id;
186  
187  	seq_printf(s, "%s\n", vha->host_str);
188  	gid_list = dma_alloc_coherent(&ha->pdev->dev,
189  				      qla2x00_gid_list_size(ha),
190  				      &gid_list_dma, GFP_KERNEL);
191  	if (!gid_list) {
192  		ql_dbg(ql_dbg_user, vha, 0x7018,
193  		       "DMA allocation failed for %u\n",
194  		       qla2x00_gid_list_size(ha));
195  		return 0;
196  	}
197  
198  	rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
199  				  &entries);
200  	if (rc != QLA_SUCCESS)
201  		goto out_free_id_list;
202  
203  	id_iter = (char *)gid_list;
204  
205  	seq_puts(s, "Port Name	Port ID		Loop ID\n");
206  
207  	for (i = 0; i < entries; i++) {
208  		struct gid_list_info *gid =
209  			(struct gid_list_info *)id_iter;
210  		loop_id = le16_to_cpu(gid->loop_id);
211  		memset(&fc_port, 0, sizeof(fc_port_t));
212  
213  		fc_port.loop_id = loop_id;
214  
215  		rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
216  		seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
217  			   fc_port.port_name, fc_port.d_id.b.domain,
218  			   fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
219  			   fc_port.loop_id);
220  		id_iter += ha->gid_list_info_size;
221  	}
222  out_free_id_list:
223  	dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
224  			  gid_list, gid_list_dma);
225  
226  	return 0;
227  }
228  
229  DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database);
230  
231  static int
qla_dfs_fw_resource_cnt_show(struct seq_file * s,void * unused)232  qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
233  {
234  	struct scsi_qla_host *vha = s->private;
235  	uint16_t mb[MAX_IOCB_MB_REG];
236  	int rc;
237  	struct qla_hw_data *ha = vha->hw;
238  	u16 iocbs_used, i, exch_used;
239  
240  	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
241  	if (rc != QLA_SUCCESS) {
242  		seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
243  	} else {
244  		seq_puts(s, "FW Resource count\n\n");
245  		seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
246  		seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
247  		seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
248  		seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
249  		seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
250  		seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
251  		seq_printf(s, "MAX VP count[%d]\n", mb[11]);
252  		seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
253  		seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
254  		    mb[20]);
255  		seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
256  		    mb[21]);
257  		seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
258  		    mb[22]);
259  		seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
260  		    mb[23]);
261  	}
262  
263  	if (ql2xenforce_iocb_limit) {
264  		/* lock is not require. It's an estimate. */
265  		iocbs_used = ha->base_qpair->fwres.iocbs_used;
266  		exch_used = ha->base_qpair->fwres.exch_used;
267  		for (i = 0; i < ha->max_qpairs; i++) {
268  			if (ha->queue_pair_map[i]) {
269  				iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
270  				exch_used += ha->queue_pair_map[i]->fwres.exch_used;
271  			}
272  		}
273  
274  		seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
275  			   iocbs_used, ha->base_qpair->fwres.iocbs_limit);
276  
277  		seq_printf(s, "estimate exchange used[%d] high water limit [%d]\n",
278  			   exch_used, ha->base_qpair->fwres.exch_limit);
279  
280  		if (ql2xenforce_iocb_limit == 2) {
281  			iocbs_used = atomic_read(&ha->fwres.iocb_used);
282  			exch_used  = atomic_read(&ha->fwres.exch_used);
283  			seq_printf(s, "        estimate iocb2 used [%d] high water limit [%d]\n",
284  					iocbs_used, ha->fwres.iocb_limit);
285  
286  			seq_printf(s, "        estimate exchange2 used[%d] high water limit [%d] \n",
287  					exch_used, ha->fwres.exch_limit);
288  		}
289  	}
290  
291  	return 0;
292  }
293  
294  DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt);
295  
296  static int
qla_dfs_tgt_counters_show(struct seq_file * s,void * unused)297  qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
298  {
299  	struct scsi_qla_host *vha = s->private;
300  	struct qla_qpair *qpair = vha->hw->base_qpair;
301  	uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
302  		core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
303  		num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
304  	u16 i;
305  	fc_port_t *fcport = NULL;
306  
307  	if (qla2x00_chip_is_down(vha))
308  		return 0;
309  
310  	qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
311  	core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
312  	qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
313  	core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
314  	qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
315  	core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
316  	num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
317  	num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
318  	num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
319  
320  	for (i = 0; i < vha->hw->max_qpairs; i++) {
321  		qpair = vha->hw->queue_pair_map[i];
322  		if (!qpair)
323  			continue;
324  		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
325  		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
326  		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
327  		core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
328  		qla_core_ret_sta_ctio +=
329  		    qpair->tgt_counters.qla_core_ret_sta_ctio;
330  		core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
331  		num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
332  		num_alloc_iocb_failed +=
333  		    qpair->tgt_counters.num_alloc_iocb_failed;
334  		num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
335  	}
336  
337  	seq_puts(s, "Target Counters\n");
338  	seq_printf(s, "qla_core_sbt_cmd = %lld\n",
339  		qla_core_sbt_cmd);
340  	seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
341  		qla_core_ret_sta_ctio);
342  	seq_printf(s, "qla_core_ret_ctio = %lld\n",
343  		qla_core_ret_ctio);
344  	seq_printf(s, "core_qla_que_buf = %lld\n",
345  		core_qla_que_buf);
346  	seq_printf(s, "core_qla_snd_status = %lld\n",
347  		core_qla_snd_status);
348  	seq_printf(s, "core_qla_free_cmd = %lld\n",
349  		core_qla_free_cmd);
350  	seq_printf(s, "num alloc iocb failed = %lld\n",
351  		num_alloc_iocb_failed);
352  	seq_printf(s, "num term exchange sent = %lld\n",
353  		num_term_xchg_sent);
354  	seq_printf(s, "num Q full sent = %lld\n",
355  		num_q_full_sent);
356  
357  	/* DIF stats */
358  	seq_printf(s, "DIF Inp Bytes = %lld\n",
359  		vha->qla_stats.qla_dif_stats.dif_input_bytes);
360  	seq_printf(s, "DIF Outp Bytes = %lld\n",
361  		vha->qla_stats.qla_dif_stats.dif_output_bytes);
362  	seq_printf(s, "DIF Inp Req = %lld\n",
363  		vha->qla_stats.qla_dif_stats.dif_input_requests);
364  	seq_printf(s, "DIF Outp Req = %lld\n",
365  		vha->qla_stats.qla_dif_stats.dif_output_requests);
366  	seq_printf(s, "DIF Guard err = %d\n",
367  		vha->qla_stats.qla_dif_stats.dif_guard_err);
368  	seq_printf(s, "DIF Ref tag err = %d\n",
369  		vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
370  	seq_printf(s, "DIF App tag err = %d\n",
371  		vha->qla_stats.qla_dif_stats.dif_app_tag_err);
372  
373  	seq_puts(s, "\n");
374  	seq_puts(s, "Initiator Error Counters\n");
375  	seq_printf(s, "HW Error Count =		%14lld\n",
376  		   vha->hw_err_cnt);
377  	seq_printf(s, "Link Down Count =	%14lld\n",
378  		   vha->short_link_down_cnt);
379  	seq_printf(s, "Interface Err Count =	%14lld\n",
380  		   vha->interface_err_cnt);
381  	seq_printf(s, "Cmd Timeout Count =	%14lld\n",
382  		   vha->cmd_timeout_cnt);
383  	seq_printf(s, "Reset Count =		%14lld\n",
384  		   vha->reset_cmd_err_cnt);
385  	seq_puts(s, "\n");
386  
387  	list_for_each_entry(fcport, &vha->vp_fcports, list) {
388  		if (!fcport->rport)
389  			continue;
390  
391  		seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
392  			   fcport->rport->number, fcport->tgt_short_link_down_cnt);
393  	}
394  	seq_puts(s, "\n");
395  
396  	return 0;
397  }
398  
399  DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters);
400  
401  static int
qla2x00_dfs_fce_show(struct seq_file * s,void * unused)402  qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
403  {
404  	scsi_qla_host_t *vha = s->private;
405  	uint32_t cnt;
406  	uint32_t *fce;
407  	uint64_t fce_start;
408  	struct qla_hw_data *ha = vha->hw;
409  
410  	mutex_lock(&ha->fce_mutex);
411  
412  	seq_puts(s, "FCE Trace Buffer\n");
413  	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
414  	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
415  	seq_puts(s, "FCE Enable Registers\n");
416  	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
417  	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
418  	    ha->fce_mb[5], ha->fce_mb[6]);
419  
420  	fce = (uint32_t *) ha->fce;
421  	fce_start = (unsigned long long) ha->fce_dma;
422  	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
423  		if (cnt % 8 == 0)
424  			seq_printf(s, "\n%llx: ",
425  			    (unsigned long long)((cnt * 4) + fce_start));
426  		else
427  			seq_putc(s, ' ');
428  		seq_printf(s, "%08x", *fce++);
429  	}
430  
431  	seq_puts(s, "\nEnd\n");
432  
433  	mutex_unlock(&ha->fce_mutex);
434  
435  	return 0;
436  }
437  
438  static int
qla2x00_dfs_fce_open(struct inode * inode,struct file * file)439  qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
440  {
441  	scsi_qla_host_t *vha = inode->i_private;
442  	struct qla_hw_data *ha = vha->hw;
443  	int rval;
444  
445  	if (!ha->flags.fce_enabled)
446  		goto out;
447  
448  	mutex_lock(&ha->fce_mutex);
449  
450  	/* Pause tracing to flush FCE buffers. */
451  	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
452  	if (rval)
453  		ql_dbg(ql_dbg_user, vha, 0x705c,
454  		    "DebugFS: Unable to disable FCE (%d).\n", rval);
455  
456  	ha->flags.fce_enabled = 0;
457  
458  	mutex_unlock(&ha->fce_mutex);
459  out:
460  	return single_open(file, qla2x00_dfs_fce_show, vha);
461  }
462  
463  static int
qla2x00_dfs_fce_release(struct inode * inode,struct file * file)464  qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
465  {
466  	scsi_qla_host_t *vha = inode->i_private;
467  	struct qla_hw_data *ha = vha->hw;
468  	int rval;
469  
470  	if (ha->flags.fce_enabled)
471  		goto out;
472  
473  	mutex_lock(&ha->fce_mutex);
474  
475  	/* Re-enable FCE tracing. */
476  	ha->flags.fce_enabled = 1;
477  	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
478  	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
479  	    ha->fce_mb, &ha->fce_bufs);
480  	if (rval) {
481  		ql_dbg(ql_dbg_user, vha, 0x700d,
482  		    "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
483  		ha->flags.fce_enabled = 0;
484  	}
485  
486  	mutex_unlock(&ha->fce_mutex);
487  out:
488  	return single_release(inode, file);
489  }
490  
491  static const struct file_operations dfs_fce_ops = {
492  	.open		= qla2x00_dfs_fce_open,
493  	.read		= seq_read,
494  	.llseek		= seq_lseek,
495  	.release	= qla2x00_dfs_fce_release,
496  };
497  
498  static int
qla_dfs_naqp_show(struct seq_file * s,void * unused)499  qla_dfs_naqp_show(struct seq_file *s, void *unused)
500  {
501  	struct scsi_qla_host *vha = s->private;
502  	struct qla_hw_data *ha = vha->hw;
503  
504  	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
505  	return 0;
506  }
507  
508  /*
509   * Helper macros for setting up debugfs entries.
510   * _name: The name of the debugfs entry
511   * _ctx_struct: The context that was passed when creating the debugfs file
512   *
513   * QLA_DFS_SETUP_RD could be used when there is only a show function.
514   * - show function take the name qla_dfs_<sysfs-name>_show
515   *
516   * QLA_DFS_SETUP_RW could be used when there are both show and write functions.
517   * - show function take the name  qla_dfs_<sysfs-name>_show
518   * - write function take the name qla_dfs_<sysfs-name>_write
519   *
520   * To have a new debugfs entry, do:
521   * 1. Create a "struct dentry *" in the appropriate structure in the format
522   * dfs_<sysfs-name>
523   * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW
524   * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE
525   * or QLA_DFS_ROOT_CREATE_FILE
526   * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE
527   * or QLA_DFS_ROOT_REMOVE_FILE
528   *
529   * Example for creating "TEST" sysfs file:
530   * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; }
531   * 2. QLA_DFS_SETUP_RD(TEST);
532   * 3. In qla2x00_dfs_setup():
533   * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha);
534   * 4. In qla2x00_dfs_remove():
535   * QLA_DFS_REMOVE_FILE(ha, TEST);
536   */
537  #define QLA_DFS_SETUP_RD(_name)	DEFINE_SHOW_ATTRIBUTE(qla_dfs_##_name)
538  
539  #define QLA_DFS_SETUP_RW(_name)	DEFINE_SHOW_STORE_ATTRIBUTE(qla_dfs_##_name)
540  
541  #define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx)			\
542  	do {								\
543  		if (!qla_dfs_##_name)					\
544  			qla_dfs_##_name = debugfs_create_file(#_name,	\
545  					_perm, qla2x00_dfs_root, _ctx,	\
546  					&qla_dfs_##_name##_fops);	\
547  	} while (0)
548  
549  #define QLA_DFS_ROOT_REMOVE_FILE(_name)					\
550  	do {								\
551  		if (qla_dfs_##_name) {					\
552  			debugfs_remove(qla_dfs_##_name);		\
553  			qla_dfs_##_name = NULL;				\
554  		}							\
555  	} while (0)
556  
557  #define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx)	\
558  	do {								\
559  		(_struct)->dfs_##_name = debugfs_create_file(#_name,	\
560  					_perm, _parent, _ctx,		\
561  					&qla_dfs_##_name##_fops)	\
562  	} while (0)
563  
564  #define QLA_DFS_REMOVE_FILE(_struct, _name)				\
565  	do {								\
566  		if ((_struct)->dfs_##_name) {				\
567  			debugfs_remove((_struct)->dfs_##_name);		\
568  			(_struct)->dfs_##_name = NULL;			\
569  		}							\
570  	} while (0)
571  
572  static ssize_t
qla_dfs_naqp_write(struct file * file,const char __user * buffer,size_t count,loff_t * pos)573  qla_dfs_naqp_write(struct file *file, const char __user *buffer,
574      size_t count, loff_t *pos)
575  {
576  	struct seq_file *s = file->private_data;
577  	struct scsi_qla_host *vha = s->private;
578  	struct qla_hw_data *ha = vha->hw;
579  	char *buf;
580  	int rc = 0;
581  	unsigned long num_act_qp;
582  
583  	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
584  		pr_err("host%ld: this adapter does not support Multi Q.",
585  		    vha->host_no);
586  		return -EINVAL;
587  	}
588  
589  	if (!vha->flags.qpairs_available) {
590  		pr_err("host%ld: Driver is not setup with Multi Q.",
591  		    vha->host_no);
592  		return -EINVAL;
593  	}
594  	buf = memdup_user_nul(buffer, count);
595  	if (IS_ERR(buf)) {
596  		pr_err("host%ld: fail to copy user buffer.",
597  		    vha->host_no);
598  		return PTR_ERR(buf);
599  	}
600  
601  	num_act_qp = simple_strtoul(buf, NULL, 0);
602  
603  	if (num_act_qp >= vha->hw->max_qpairs) {
604  		pr_err("User set invalid number of qpairs %lu. Max = %d",
605  		    num_act_qp, vha->hw->max_qpairs);
606  		rc = -EINVAL;
607  		goto out_free;
608  	}
609  
610  	if (num_act_qp != ha->tgt.num_act_qpairs) {
611  		ha->tgt.num_act_qpairs = num_act_qp;
612  		qlt_clr_qp_table(vha);
613  	}
614  	rc = count;
615  out_free:
616  	kfree(buf);
617  	return rc;
618  }
619  QLA_DFS_SETUP_RW(naqp);
620  
621  int
qla2x00_dfs_setup(scsi_qla_host_t * vha)622  qla2x00_dfs_setup(scsi_qla_host_t *vha)
623  {
624  	struct qla_hw_data *ha = vha->hw;
625  
626  	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
627  	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
628  		goto out;
629  	if (!ha->fce)
630  		goto out;
631  
632  	if (qla2x00_dfs_root)
633  		goto create_dir;
634  
635  	atomic_set(&qla2x00_dfs_root_count, 0);
636  	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
637  
638  create_dir:
639  	if (ha->dfs_dir)
640  		goto create_nodes;
641  
642  	mutex_init(&ha->fce_mutex);
643  	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
644  
645  	atomic_inc(&qla2x00_dfs_root_count);
646  
647  create_nodes:
648  	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
649  	    S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops);
650  
651  	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
652  	    ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops);
653  
654  	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
655  	    S_IRUSR,  ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops);
656  
657  	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
658  	    &dfs_fce_ops);
659  
660  	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
661  		S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops);
662  
663  	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
664  		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
665  		    0400, ha->dfs_dir, vha, &qla_dfs_naqp_fops);
666  		if (IS_ERR(ha->tgt.dfs_naqp)) {
667  			ql_log(ql_log_warn, vha, 0xd011,
668  			       "Unable to create debugFS naqp node.\n");
669  			goto out;
670  		}
671  	}
672  	vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
673  	if (IS_ERR(vha->dfs_rport_root)) {
674  		ql_log(ql_log_warn, vha, 0xd012,
675  		       "Unable to create debugFS rports node.\n");
676  		goto out;
677  	}
678  out:
679  	return 0;
680  }
681  
682  int
qla2x00_dfs_remove(scsi_qla_host_t * vha)683  qla2x00_dfs_remove(scsi_qla_host_t *vha)
684  {
685  	struct qla_hw_data *ha = vha->hw;
686  
687  	if (ha->tgt.dfs_naqp) {
688  		debugfs_remove(ha->tgt.dfs_naqp);
689  		ha->tgt.dfs_naqp = NULL;
690  	}
691  
692  	if (ha->tgt.dfs_tgt_sess) {
693  		debugfs_remove(ha->tgt.dfs_tgt_sess);
694  		ha->tgt.dfs_tgt_sess = NULL;
695  	}
696  
697  	if (ha->tgt.dfs_tgt_port_database) {
698  		debugfs_remove(ha->tgt.dfs_tgt_port_database);
699  		ha->tgt.dfs_tgt_port_database = NULL;
700  	}
701  
702  	if (ha->dfs_fw_resource_cnt) {
703  		debugfs_remove(ha->dfs_fw_resource_cnt);
704  		ha->dfs_fw_resource_cnt = NULL;
705  	}
706  
707  	if (ha->dfs_tgt_counters) {
708  		debugfs_remove(ha->dfs_tgt_counters);
709  		ha->dfs_tgt_counters = NULL;
710  	}
711  
712  	if (ha->dfs_fce) {
713  		debugfs_remove(ha->dfs_fce);
714  		ha->dfs_fce = NULL;
715  	}
716  
717  	if (vha->dfs_rport_root) {
718  		debugfs_remove_recursive(vha->dfs_rport_root);
719  		vha->dfs_rport_root = NULL;
720  	}
721  
722  	if (ha->dfs_dir) {
723  		debugfs_remove(ha->dfs_dir);
724  		ha->dfs_dir = NULL;
725  		atomic_dec(&qla2x00_dfs_root_count);
726  	}
727  
728  	if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
729  	    qla2x00_dfs_root) {
730  		debugfs_remove(qla2x00_dfs_root);
731  		qla2x00_dfs_root = NULL;
732  	}
733  
734  	return 0;
735  }
736